diff --git a/.github/ISSUE_TEMPLATE/proposal.md b/.github/ISSUE_TEMPLATE/proposal.md new file mode 100644 index 0000000000..45f0bff42f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/proposal.md @@ -0,0 +1,37 @@ +--- +name: Protocol Change Proposal +about: Create a proposal to request a change to the protocol + +--- + + + +# Protocol Change Proposal + +## Summary + + + +## Problem Definition + + + +## Proposal + + + +____ + +#### For Admin Use + +- [ ] Not duplicate issue +- [ ] Appropriate labels applied +- [ ] Appropriate contributors tagged +- [ ] Contributor assigned/self-assigned diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 9960106ffd..4bd765afd2 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,8 +3,7 @@ updates: - package-ecosystem: github-actions directory: "/" schedule: - interval: daily - time: "11:00" + interval: weekly open-pull-requests-limit: 10 # - package-ecosystem: npm # directory: "/docs" @@ -18,7 +17,7 @@ updates: directory: "/" schedule: interval: daily - time: "11:00" + target-branch: "v0.35.x" open-pull-requests-limit: 10 reviewers: - shotonoff diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index de908193c6..ebc0f58ee2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,4 +1,3 @@ ---- name: Build # Tests runs different tests (test_abci_apps, test_abci_cli, test_apps) # This workflow runs on every push to master or release branch and every pull requests @@ -43,7 +42,7 @@ jobs: arch: ${{ matrix.goarch }} - name: install-gcc run: sudo apt-get update -qq && sudo apt-get install -qq --yes gcc-10-arm-linux-gnueabi g++-10-arm-linux-gnueabi - if: "matrix.goarch == 'arm'" + if: "matrix.goarch == 'arm'" - name: install run: | GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} make build-binary @@ -55,11 +54,11 @@ jobs: needs: build timeout-minutes: 5 steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: go-version: "1.17" - - uses: actions/checkout@v2.4.0 - - uses: technote-space/get-diff-action@v5 + - uses: actions/checkout@v3 + - uses: technote-space/get-diff-action@v6 with: PATTERNS: | **/**.go @@ -80,11 +79,11 @@ jobs: needs: build timeout-minutes: 5 steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: go-version: "1.17" - - uses: actions/checkout@v2.4.0 - - uses: technote-space/get-diff-action@v5 + - uses: actions/checkout@v3 + - uses: technote-space/get-diff-action@v6 with: PATTERNS: | **/**.go diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index c8ff2e1b35..65f07c43ab 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -19,7 +19,7 @@ jobs: platforms: all - name: Set up Docker Build - uses: docker/setup-buildx-action@v1.6.0 + uses: docker/setup-buildx-action@v1.7.0 - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} @@ -50,7 +50,7 @@ jobs: suffix=${{ steps.suffix.outputs.result }} - name: Publish to Docker Hub - uses: docker/build-push-action@v2.9.0 + uses: docker/build-push-action@v2.10.0 with: context: . file: ./DOCKER/Dockerfile diff --git a/.github/workflows/e2e-manual.yml b/.github/workflows/e2e-manual.yml index acff89af10..bab3fcf62d 100644 --- a/.github/workflows/e2e-manual.yml +++ b/.github/workflows/e2e-manual.yml @@ -1,4 +1,5 @@ -# Manually run randomly generated E2E testnets (as nightly). +# Runs randomly generated E2E testnets nightly on master +# manually run e2e tests name: e2e-manual on: workflow_dispatch: @@ -10,16 +11,15 @@ jobs: strategy: fail-fast: false matrix: - p2p: ['legacy', 'new', 'hybrid'] group: ['00', '01', '02', '03'] runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: go-version: '1.17' - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3 - name: Build working-directory: test/e2e @@ -29,8 +29,8 @@ jobs: - name: Generate testnets working-directory: test/e2e # When changing -g, also change the matrix groups above - run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }} + run: ./build/generator -g 4 -d networks/nightly/ - name: Run ${{ matrix.p2p }} p2p testnets working-directory: test/e2e - run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml + run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index 0160718359..ec92cb112b 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -6,7 +6,7 @@ name: e2e-nightly-34x on: - workflow_dispatch: # allow running workflow manually, in theory + workflow_dispatch: # allow running workflow manually, in theory schedule: - cron: '0 2 * * *' @@ -21,11 +21,11 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v3 with: ref: 'v0.34.x' @@ -59,7 +59,7 @@ jobs: SLACK_MESSAGE: Nightly E2E tests failed on v0.34.x SLACK_FOOTER: '' - e2e-nightly-success: # may turn this off once they seem to pass consistently + e2e-nightly-success: # may turn this off once they seem to pass consistently needs: e2e-nightly-test if: ${{ success() }} runs-on: ubuntu-latest diff --git a/.github/workflows/e2e-nightly-35x.yml b/.github/workflows/e2e-nightly-35x.yml new file mode 100644 index 0000000000..c397ead9c0 --- /dev/null +++ b/.github/workflows/e2e-nightly-35x.yml @@ -0,0 +1,75 @@ +# Runs randomly generated E2E testnets nightly on v0.35.x. + +# !! If you change something in this file, you probably want +# to update the e2e-nightly-master workflow as well! + +name: e2e-nightly-35x +on: + schedule: + - cron: '0 2 * * *' + +jobs: + e2e-nightly-test: + # Run parallel jobs for the listed testnet groups (must match the + # ./build/generator -g flag) + strategy: + fail-fast: false + matrix: + p2p: ['legacy', 'new', 'hybrid'] + group: ['00', '01', '02', '03'] + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/setup-go@v3 + with: + go-version: '1.17' + + - uses: actions/checkout@v3 + with: + ref: 'v0.35.x' + + - name: Build + working-directory: test/e2e + # Run make jobs in parallel, since we can't run steps in parallel. + run: make -j2 docker generator runner tests + + - name: Generate testnets + working-directory: test/e2e + # When changing -g, also change the matrix groups above + run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }} + + - name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }} + working-directory: test/e2e + run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml + + e2e-nightly-fail-2: + needs: e2e-nightly-test + if: ${{ failure() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack on failure + uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: tendermint-internal + SLACK_USERNAME: Nightly E2E Tests + SLACK_ICON_EMOJI: ':skull:' + SLACK_COLOR: danger + SLACK_MESSAGE: Nightly E2E tests failed on v0.35.x + SLACK_FOOTER: '' + + e2e-nightly-success: # may turn this off once they seem to pass consistently + needs: e2e-nightly-test + if: ${{ success() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack on success + uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: tendermint-internal + SLACK_USERNAME: Nightly E2E Tests + SLACK_ICON_EMOJI: ':white_check_mark:' + SLACK_COLOR: good + SLACK_MESSAGE: Nightly E2E tests passed on v0.35.x + SLACK_FOOTER: '' diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index 30479bf8de..58ffa81c17 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -5,27 +5,26 @@ name: e2e-nightly-master on: - workflow_dispatch: # allow running workflow manually + workflow_dispatch: # allow running workflow manually schedule: - cron: '0 2 * * *' jobs: - e2e-nightly-test-2: + e2e-nightly-test: # Run parallel jobs for the listed testnet groups (must match the # ./build/generator -g flag) strategy: fail-fast: false matrix: - p2p: ['legacy', 'new', 'hybrid'] group: ['00', '01', '02', '03'] runs-on: ubuntu-latest timeout-minutes: 60 steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v3 - name: Build working-directory: test/e2e @@ -35,14 +34,14 @@ jobs: - name: Generate testnets working-directory: test/e2e # When changing -g, also change the matrix groups above - run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }} + run: ./build/generator -g 4 -d networks/nightly/ - - name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }} + - name: Run ${{ matrix.p2p }} p2p testnets working-directory: test/e2e - run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml + run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml e2e-nightly-fail-2: - needs: e2e-nightly-test-2 + needs: e2e-nightly-test if: ${{ failure() }} runs-on: ubuntu-latest steps: diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index a44473e647..de13a03302 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -20,13 +20,13 @@ jobs: env: FULLNODE_PUBKEY_KEEP: false steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v3 with: submodules: true - - uses: technote-space/get-diff-action@v5 + - uses: technote-space/get-diff-action@v6 with: PATTERNS: | **/**.go diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index 57a0962084..0fcab9ae5b 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -13,34 +13,19 @@ jobs: fuzz-nightly-test: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: go-version: '1.17' - - uses: actions/checkout@v2.3.4 + - uses: actions/checkout@v3 - name: Install go-fuzz working-directory: test/fuzz - run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build + run: go install github.com/dvyukov/go-fuzz/go-fuzz@latest github.com/dvyukov/go-fuzz/go-fuzz-build@latest - - name: Fuzz mempool-v1 + - name: Fuzz mempool working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v1 - continue-on-error: true - - - name: Fuzz mempool-v0 - working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v0 - continue-on-error: true - - - name: Fuzz p2p-addrbook - working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-addrbook - continue-on-error: true - - - name: Fuzz p2p-pex - working-directory: test/fuzz - run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-pex + run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool continue-on-error: true - name: Fuzz p2p-sc @@ -54,14 +39,14 @@ jobs: continue-on-error: true - name: Archive crashers - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: crashers path: test/fuzz/**/crashers retention-days: 3 - name: Archive suppressions - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: suppressions path: test/fuzz/**/suppressions diff --git a/.github/workflows/jepsen.yml b/.github/workflows/jepsen.yml index 0e358af6e4..04e599564a 100644 --- a/.github/workflows/jepsen.yml +++ b/.github/workflows/jepsen.yml @@ -46,7 +46,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout the Jepsen repository - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v3 with: repository: 'tendermint/jepsen' @@ -58,7 +58,7 @@ jobs: run: docker exec -i jepsen-control bash -c 'source /root/.bashrc; cd /jepsen/tendermint; lein run test --nemesis ${{ github.event.inputs.nemesis }} --workload ${{ github.event.inputs.workload }} --concurrency ${{ github.event.inputs.concurrency }} --tendermint-url ${{ github.event.inputs.tendermintUrl }} --merkleeyes-url ${{ github.event.inputs.merkleeyesUrl }} --time-limit ${{ github.event.inputs.timeLimit }} ${{ github.event.inputs.dupOrSuperByzValidators }}' - name: Archive results - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: results path: tendermint/store/latest diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml index 6633c2c441..e2ba808617 100644 --- a/.github/workflows/linkchecker.yml +++ b/.github/workflows/linkchecker.yml @@ -6,7 +6,7 @@ jobs: markdown-link-check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2.3.4 - - uses: gaurav-nelson/github-action-markdown-link-check@1.0.13 + - uses: actions/checkout@v3 + - uses: creachadair/github-action-markdown-link-check@master with: folder-path: "docs" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 3f99b9f808..cfe8dde29b 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -1,7 +1,11 @@ -name: Lint -# Lint runs golangci-lint over the entire Tendermint repository -# This workflow is run on every pull request and push to master -# The `golangci` job will pass without running if no *.{go, mod, sum} files have been modified. +name: Golang Linter +# Lint runs golangci-lint over the entire Tendermint repository. +# +# This workflow is run on every pull request and push to master. +# +# The `golangci` job will pass without running if no *.{go, mod, sum} +# files have been modified. + on: pull_request: push: @@ -13,13 +17,13 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 8 steps: - - uses: actions/checkout@v2.4.0 + - uses: actions/checkout@v3 with: submodules: true - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: go-version: '^1.17' - - uses: technote-space/get-diff-action@v5 + - uses: technote-space/get-diff-action@v6 with: PATTERNS: | **/**.go @@ -32,7 +36,9 @@ jobs: - uses: golangci/golangci-lint-action@v3.1.0 with: - # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. + # Required: the version of golangci-lint is required and + # must be specified without patch version: we always use the + # latest patch version. version: v1.45 args: --timeout 10m github-token: ${{ secrets.github_token }} diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 628b1af69e..badae8c1f8 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -1,4 +1,4 @@ -name: Lint +name: Markdown Linter on: push: branches: @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout Code - uses: actions/checkout@v2.4.0 + uses: actions/checkout@v3 - name: Lint Code Base uses: docker://github/super-linter:v4 env: diff --git a/.github/workflows/markdown-links.yml b/.github/workflows/markdown-links.yml new file mode 100644 index 0000000000..7af7e3ce90 --- /dev/null +++ b/.github/workflows/markdown-links.yml @@ -0,0 +1,23 @@ +name: Check Markdown links + +on: + push: + branches: + - master + pull_request: + branches: [master] + +jobs: + markdown-link-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: technote-space/get-diff-action@v6 + with: + PATTERNS: | + **/**.md + - uses: creachadair/github-action-markdown-link-check@master + with: + check-modified-files-only: 'yes' + config-file: '.md-link-check.json' + if: env.GIT_DIFF diff --git a/.github/workflows/proto-docker.yml b/.github/workflows/proto-docker.yml deleted file mode 100644 index 340a1b78b2..0000000000 --- a/.github/workflows/proto-docker.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: Build & Push TM Proto Builder -on: - pull_request: - paths: - - "tools/proto/*" - push: - branches: - - master - paths: - - "tools/proto/*" - schedule: - # run this job once a month to recieve any go or buf updates - - cron: "* * 1 * *" - -jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2.3.4 - - name: Prepare - id: prep - run: | - DOCKER_IMAGE=tendermintdev/docker-build-proto - VERSION=noop - if [[ $GITHUB_REF == refs/tags/* ]]; then - VERSION=${GITHUB_REF#refs/tags/} - elif [[ $GITHUB_REF == refs/heads/* ]]; then - VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g') - if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then - VERSION=latest - fi - fi - TAGS="${DOCKER_IMAGE}:${VERSION}" - echo ::set-output name=tags::${TAGS} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1.6.0 - - - name: Login to DockerHub - uses: docker/login-action@v1.14.1 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Publish to Docker Hub - uses: docker/build-push-action@v2.9.0 - with: - context: ./tools/proto - file: ./tools/proto/Dockerfile - push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.prep.outputs.tags }} diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml new file mode 100644 index 0000000000..b1fbeab9df --- /dev/null +++ b/.github/workflows/proto-lint.yml @@ -0,0 +1,21 @@ +name: Protobuf Lint +on: + pull_request: + paths: + - 'proto/**' + push: + branches: + - master + paths: + - 'proto/**' + +jobs: + lint: + runs-on: ubuntu-latest + timeout-minutes: 5 + steps: + - uses: actions/checkout@v3 + - uses: bufbuild/buf-setup-action@v1.4.0 + - uses: bufbuild/buf-lint-action@v1 + with: + input: 'proto' diff --git a/.github/workflows/proto.yml b/.github/workflows/proto.yml deleted file mode 100644 index 2eeb3dfd55..0000000000 --- a/.github/workflows/proto.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Protobuf -# Protobuf runs buf (https://buf.build/) lint and check-breakage -# This workflow is only run when a .proto file has been modified -on: - workflow_dispatch: # allow running workflow manually - pull_request: - paths: - - "**.proto" -jobs: - proto-lint: - runs-on: ubuntu-latest - timeout-minutes: 4 - steps: - - uses: actions/checkout@v2.3.4 - - name: lint - run: make proto-lint - proto-breakage: - runs-on: ubuntu-latest - timeout-minutes: 4 - steps: - - uses: actions/checkout@v2.3.4 - - name: check-breakage - run: "make BASE_BRANCH='${{ github.base_ref }}' proto-check-breaking-ci" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 2939c567b0..fdd466fd57 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -8,11 +8,11 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2.3.4 + uses: actions/checkout@v3 with: fetch-depth: 0 - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: go-version: '1.17' @@ -23,11 +23,13 @@ jobs: version: latest args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run + - run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md + - name: Release uses: goreleaser/goreleaser-action@v2 if: startsWith(github.ref, 'refs/tags/') with: version: latest - args: release --rm-dist + args: release --rm-dist --release-notes=../release_notes.md env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 1109f09c1c..4089abfbc3 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -7,7 +7,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v4 + - uses: actions/stale@v5 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-pr-message: "This pull request has been automatically marked as stale because it has not had diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index d389ddbf8e..2fcde2c30a 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -16,11 +16,11 @@ jobs: matrix: part: ["00", "01", "02", "03", "04", "05"] steps: - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: go-version: "1.17" - - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v5 + - uses: actions/checkout@v3 + - uses: technote-space/get-diff-action@v6 with: PATTERNS: | **/**.go @@ -35,7 +35,7 @@ jobs: run: | make test-group-${{ matrix.part }} NUM_SPLIT=6 if: env.GIT_DIFF - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 with: name: "${{ github.sha }}-${{ matrix.part }}-coverage" path: ./build/${{ matrix.part }}.profile.out @@ -44,8 +44,8 @@ jobs: runs-on: ubuntu-latest needs: tests steps: - - uses: actions/checkout@v2.4.0 - - uses: technote-space/get-diff-action@v5 + - uses: actions/checkout@v3 + - uses: technote-space/get-diff-action@v6 with: PATTERNS: | **/**.go @@ -53,26 +53,26 @@ jobs: go.mod go.sum Makefile - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: "${{ github.sha }}-00-coverage" if: env.GIT_DIFF - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: "${{ github.sha }}-01-coverage" if: env.GIT_DIFF - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: "${{ github.sha }}-02-coverage" if: env.GIT_DIFF - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: "${{ github.sha }}-03-coverage" if: env.GIT_DIFF - run: | cat ./*profile.out | grep -v "mode: set" >> coverage.txt if: env.GIT_DIFF - - uses: codecov/codecov-action@v2.1.0 + - uses: codecov/codecov-action@v3.1.0 with: file: ./coverage.txt if: env.GIT_DIFF diff --git a/.gitignore b/.gitignore index 1846a354cc..7cfe62dece 100644 --- a/.gitignore +++ b/.gitignore @@ -55,3 +55,11 @@ test/fuzz/**/corpus test/fuzz/**/crashers test/fuzz/**/suppressions test/fuzz/**/*.zip +proto/spec/**/*.pb.go +*.aux +*.bbl +*.blg +*.log +*.pdf +*.gz +*.dvi diff --git a/.markdownlint.yml b/.markdownlint.yml new file mode 100644 index 0000000000..80e3be4edb --- /dev/null +++ b/.markdownlint.yml @@ -0,0 +1,11 @@ +default: true +MD001: false +MD007: {indent: 4} +MD013: false +MD024: {siblings_only: true} +MD025: false +MD033: false +MD036: false +MD010: false +MD012: false +MD028: false diff --git a/.md-link-check.json b/.md-link-check.json new file mode 100644 index 0000000000..6f47fa2c94 --- /dev/null +++ b/.md-link-check.json @@ -0,0 +1,6 @@ +{ + "retryOn429": true, + "retryCount": 5, + "fallbackRetryDelay": "30s", + "aliveStatusCodes": [200, 206, 503] +} diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 7a51fb59ce..442cca2165 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -12,16 +12,77 @@ Special thanks to external contributors on this release: - CLI/RPC/Config + - [rpc] \#7121 Remove the deprecated gRPC interface to the RPC service. (@creachadair) + - [blocksync] \#7159 Remove support for disabling blocksync in any circumstance. (@tychoish) + - [mempool] \#7171 Remove legacy mempool implementation. (@tychoish) + - [rpc] \#7575 Rework how RPC responses are written back via HTTP. (@creachadair) + - [rpc] \#7713 Remove unused options for websocket clients. (@creachadair) + - [config] \#7930 Add new event subscription options and defaults. (@creachadair) + - [rpc] \#7982 Add new Events interface and deprecate Subscribe. (@creachadair) + - [cli] \#8081 make the reset command safe to use by intoducing `reset-state` command. Fixed by \#8259. (@marbar3778, @cmwaters) + - [config] \#8222 default indexer configuration to null. (@creachadair) + - Apps + - [tendermint/spec] \#7804 Migrate spec from [spec repo](https://github.com/tendermint/spec). + - [abci] \#7984 Remove the locks preventing concurrent use of ABCI applications by Tendermint. (@tychoish) + - P2P Protocol + - [p2p] \#7035 Remove legacy P2P routing implementation and associated configuration options. (@tychoish) + - [p2p] \#7265 Peer manager reduces peer score for each failed dial attempts for peers that have not successfully dialed. (@tychoish) + - [p2p] [\#7594](https://github.com/tendermint/tendermint/pull/7594) always advertise self, to enable mutual address discovery. (@altergui) + - Go API + - [rpc] \#7474 Remove the "URI" RPC client. (@creachadair) + - [libs/pubsub] \#7451 Internalize the pubsub packages. (@creachadair) + - [libs/sync] \#7450 Internalize and remove the library. (@creachadair) + - [libs/async] \#7449 Move library to internal. (@creachadair) + - [pubsub] \#7231 Remove unbuffered subscriptions and rework the Subscription interface. (@creachadair) + - [eventbus] \#7231 Move the EventBus type to the internal/eventbus package. (@creachadair) + - [blocksync] \#7046 Remove v2 implementation of the blocksync service and recactor, which was disabled in the previous release. (@tychoish) + - [p2p] \#7064 Remove WDRR queue implementation. (@tychoish) + - [config] \#7169 `WriteConfigFile` now returns an error. (@tychoish) + - [libs/service] \#7288 Remove SetLogger method on `service.Service` interface. (@tychoish) + - [abci/client] \#7607 Simplify client interface (removes most "async" methods). (@creachadair) + - [libs/json] \#7673 Remove the libs/json (tmjson) library. (@creachadair) + - [crypto] \#8412 \#8432 Remove `crypto/tmhash` package in favor of small functions in `crypto` package and cleanup of unused functions. (@tychoish) + - Blockchain Protocol ### FEATURES +- [rpc] [\#7270](https://github.com/tendermint/tendermint/pull/7270) Add `header` and `header_by_hash` RPC Client queries. (@fedekunze) +- [rpc] [\#7701] Add `ApplicationInfo` to `status` rpc call which contains the application version. (@jonasbostoen) +- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of non-determinstic app hash or reverting an upgrade. +- [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish) +- [consensus] \#7354 add a new `synchrony` field to the `ConsensusParameter` struct for controlling the parameters of the proposer-based timestamp algorithm. (@williambanfield) +- [consensus] \#7376 Update the proposal logic per the Propose-based timestamps specification so that the proposer will wait for the previous block time to occur before proposing the next block. (@williambanfield) +- [consensus] \#7391 Use the proposed block timestamp as the proposal timestamp. Update the block validation logic to ensure that the proposed block's timestamp matches the timestamp in the proposal message. (@williambanfield) +- [consensus] \#7415 Update proposal validation logic to Prevote nil if a proposal does not meet the conditions for Timelyness per the proposer-based timestamp specification. (@anca) +- [consensus] \#7382 Update block validation to no longer require the block timestamp to be the median of the timestamps of the previous commit. (@anca) +- [consensus] \#7711 Use the proposer timestamp for the first height instead of the genesis time. Chains will still start consensus at the genesis time. (@anca) +- [cli] \#8281 Add a tool to update old config files to the latest version. (@creachadair) + ### IMPROVEMENTS +- [internal/protoio] \#7325 Optimized `MarshalDelimited` by inlining the common case and using a `sync.Pool` in the worst case. (@odeke-em) +- [consensus] \#6969 remove logic to 'unlock' a locked block. +- [evidence] \#7700 Evidence messages contain single Evidence instead of EvidenceList (@jmalicevic) +- [evidence] \#7802 Evidence pool emits events when evidence is validated and updates a metric when the number of evidence in the evidence pool changes. (@jmalicevic) +- [pubsub] \#7319 Performance improvements for the event query API (@creachadair) +- [node] \#7521 Define concrete type for seed node implementation (@spacech1mp) +- [rpc] \#7612 paginate mempool /unconfirmed_txs rpc endpoint (@spacech1mp) +- [light] [\#7536](https://github.com/tendermint/tendermint/pull/7536) rpc /status call returns info about the light client (@jmalicevic) +- [types] \#7765 Replace EvidenceData with EvidenceList to avoid unnecessary nesting of evidence fields within a block. (@jmalicevic) + ### BUG FIXES + +- fix: assignment copies lock value in `BitArray.UnmarshalJSON()` (@lklimek) +- [light] \#7640 Light Client: fix absence proof verification (@ashcherbakov) +- [light] \#7641 Light Client: fix querying against the latest height (@ashcherbakov) +- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang) +- [cli] \#8276 scmigrate: ensure target key is correctly renamed. (@creachadair) +- [cli] \#8294 keymigrate: ensure block hash keys are correctly translated. (@creachadair) +- [cli] \#8352 keymigrate: ensure transaction hash keys are correctly translated. (@creachadair) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 8c5a992032..ec1477adcc 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -20,7 +20,7 @@ This code of conduct applies to all projects run by the Tendermint/COSMOS team a * Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works. -* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don’t tolerate behavior that excludes people in socially marginalized groups. +* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](https://github.com/stumpsyn/policies/blob/master/citizen_code_of_conduct.md); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don’t tolerate behavior that excludes people in socially marginalized groups. * Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel admins or the person mentioned above immediately. Whether you’re a regular contributor or a newcomer, we care about making this community a safe place for you and we’ve got your back. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 33b8cf6a78..bfa56bea64 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -105,11 +105,33 @@ specify exactly the dependency you want to update, eg. ## Protobuf -We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core. +We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along +with [`gogoproto`](https://github.com/gogo/protobuf) to generate code for use +across Tendermint Core. -For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`. +To generate proto stubs, lint, and check protos for breaking changes, you will +need to install [buf](https://buf.build/) and `gogoproto`. Then, from the root +of the repository, run: -We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`. +```bash +# Lint all of the .proto files in proto/tendermint +make proto-lint + +# Check if any of your local changes (prior to committing to the Git repository) +# are breaking +make proto-check-breaking + +# Generate Go code from the .proto files in proto/tendermint +make proto-gen +``` + +To automatically format `.proto` files, you will need +[`clang-format`](https://clang.llvm.org/docs/ClangFormat.html) installed. Once +installed, you can run: + +```bash +make proto-format +``` ### Visual Studio Code @@ -227,150 +249,6 @@ Fixes #nnnn Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though! -### Release procedure - -#### A note about backport branches -Tendermint's `master` branch is under active development. -Releases are specified using tags and are built from long-lived "backport" branches. -Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, -and the backport branches have names like `v0.34.x` or `v0.33.x` -(literally, `x`; it is not a placeholder in this case). - -As non-breaking changes land on `master`, they should also be backported (cherry-picked) -to these backport branches. - -We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport -to the needed branch. There should be a label for any backport branch that you'll be targeting. -To notify the bot to backport a pull request, mark the pull request with -the label `S:backport-to-`. -Once the original pull request is merged, the bot will try to cherry-pick the pull request -to the backport branch. If the bot fails to backport, it will open a pull request. -The author of the original pull request is responsible for solving the conflicts and -merging the pull request. - -#### Creating a backport branch - -If this is the first release candidate for a major release, you get to have the honor of creating -the backport branch! - -Note that, after creating the backport branch, you'll also need to update the tags on `master` -so that `go mod` is able to order the branches correctly. You should tag `master` with a "dev" tag -that is "greater than" the backport branches tags. See #6072 for more context. - -In the following example, we'll assume that we're making a backport branch for -the 0.35.x line. - -1. Start on `master` -2. Create the backport branch: - `git checkout -b v0.35.x` -3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up: - `git tag -a v0.36.0-dev; git push v0.36.0-dev` -4. Create a new workflow to run the e2e nightlies for this backport branch. - (See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-34x.yml - for an example.) - -#### Release candidates - -Before creating an official release, especially a major release, we may want to create a -release candidate (RC) for our friends and partners to test out. We use git tags to -create RCs, and we build them off of backport branches. - -Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end -(for example, `v0.35.0-rc0`). - -(Note that branches and tags _cannot_ have the same names, so it's important that these branches -have distinct names from the tags/release names.) - -If this is the first RC for a major release, you'll have to make a new backport branch (see above). -Otherwise: - -1. Start from the backport branch (e.g. `v0.35.x`). -1. Run the integration tests and the e2e nightlies - (which can be triggered from the Github UI; - e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml). -1. Prepare the changelog: - - Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`. - - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for - all PRs - - Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes - or other upgrading flows. - - Bump TMVersionDefault version in `version.go` - - Bump P2P and block protocol versions in `version.go`, if necessary - - Bump ABCI protocol version in `version.go`, if necessary -1. Open a PR with these changes against the backport branch. -1. Once these changes have landed on the backport branch, be sure to pull them back down locally. -2. Once you have the changes locally, create the new tag, specifying a name and a tag "message": - `git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0` -3. Push the tag back up to origin: - `git push origin v0.35.0-rc0` - Now the tag should be available on the repo's releases page. -4. Future RCs will continue to be built off of this branch. - -Note that this process should only be used for "true" RCs-- -release candidates that, if successful, will be the next release. -For more experimental "RCs," create a new, short-lived branch and tag that instead. - -#### Major release - -This major release process assumes that this release was preceded by release candidates. -If there were no release candidates, begin by creating a backport branch, as described above. - -1. Start on the backport branch (e.g. `v0.35.x`) -2. Run integration tests and the e2e nightlies. -3. Prepare the release: - - "Squash" changes from the changelog entries for the RCs into a single entry, - and add all changes included in `CHANGELOG_PENDING.md`. - (Squashing includes both combining all entries, as well as removing or simplifying - any intra-RC changes. It may also help to alphabetize the entries by package name.) - - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for - all PRs - - Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes - or other upgrading flows. - - Bump TMVersionDefault version in `version.go` - - Bump P2P and block protocol versions in `version.go`, if necessary - - Bump ABCI protocol version in `version.go`, if necessary -4. Open a PR with these changes against the backport branch. -5. Once these changes are on the backport branch, push a tag with prepared release details. - This will trigger the actual release `v0.35.0`. - - `git tag -a v0.35.0 -m 'Release v0.35.0'` - - `git push origin v0.35.0` -7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`. -8. Add the release to the documentation site generator config (see - [DOCS_README.md](./docs/DOCS_README.md) for more details). In summary: - - Start on branch `master`. - - Add a new line at the bottom of [`docs/versions`](./docs/versions) to - ensure the newest release is the default for the landing page. - - Add a new entry to `themeConfig.versions` in - [`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the - release in the dropdown versions menu. - -#### Minor release (point releases) - -Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master. -As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches. - -Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate. - -To create a minor release: - -1. Checkout the long-lived backport branch: `git checkout v0.35.x` -2. Run integration tests (`make test_integrations`) and the nightlies. -3. Check out a new branch and prepare the release: - - Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` - - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues - - Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh ` - - Reset the `CHANGELOG_PENDING.md` - - Bump the ABCI version number, if necessary. - (Note that ABCI follows semver, and that ABCI versions are the only versions - which can change during minor releases, and only field additions are valid minor changes.) -4. Open a PR with these changes that will land them back on `v0.35.x` -5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag. - - `git tag -a v0.35.1 -m 'Release v0.35.1'` - - `git push origin v0.35.1` -6. Create a pull request back to master with the CHANGELOG & version changes from the latest release. - - Remove all `R:minor` labels from the pull requests that were included in the release. - - Do not merge the backport branch into master. - ## Testing ### Unit tests diff --git a/DOCKER/.gitignore b/DOCKER/.gitignore deleted file mode 100644 index 9059c68485..0000000000 --- a/DOCKER/.gitignore +++ /dev/null @@ -1 +0,0 @@ -tendermint diff --git a/DOCKER/Dockerfile.build_c-amazonlinux b/DOCKER/Dockerfile.build_c-amazonlinux deleted file mode 100644 index 6ec9d539c6..0000000000 --- a/DOCKER/Dockerfile.build_c-amazonlinux +++ /dev/null @@ -1,27 +0,0 @@ -FROM amazonlinux:2 - -RUN yum -y update && \ - yum -y install wget - -RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \ - rpm -ivh epel-release-latest-7.noarch.rpm - -RUN yum -y groupinstall "Development Tools" -RUN yum -y install leveldb-devel which - -ENV GOVERSION=1.16.5 - -RUN cd /tmp && \ - wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \ - tar -C /usr/local -xf go${GOVERSION}.linux-amd64.tar.gz && \ - mkdir -p /go/src && \ - mkdir -p /go/bin - -ENV PATH=$PATH:/usr/local/go/bin:/go/bin -ENV GOBIN=/go/bin -ENV GOPATH=/go/src - -RUN mkdir -p /tenderdash -WORKDIR /tenderdash - -CMD ["/usr/bin/make", "build", "TENDERMINT_BUILD_OPTIONS=cleveldb"] diff --git a/DOCKER/Dockerfile.testing b/DOCKER/Dockerfile.testing deleted file mode 100644 index 7f86ee1800..0000000000 --- a/DOCKER/Dockerfile.testing +++ /dev/null @@ -1,16 +0,0 @@ -FROM golang:latest - -# Grab deps (jq, hexdump, xxd, killall) -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - jq bsdmainutils vim-common psmisc netcat - -# Add testing deps for curl -RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list && \ - apt-get update && \ - apt-get install -y --no-install-recommends curl - -VOLUME /go - -EXPOSE 26656 -EXPOSE 26657 diff --git a/DOCKER/Makefile b/DOCKER/Makefile deleted file mode 100644 index 082e52225e..0000000000 --- a/DOCKER/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -build: - @sh -c "'$(CURDIR)/build.sh'" - -push: - @sh -c "'$(CURDIR)/push.sh'" - -build_testing: - docker build --tag dashpay/tenderdash:testing -f ./Dockerfile.testing .. - -build_amazonlinux_buildimage: - docker build -t "dashpay/tenderdash:build_c-amazonlinux" -f Dockerfile.build_c-amazonlinux .. - -.PHONY: build push build_testing build_amazonlinux_buildimage diff --git a/DOCKER/README.md b/DOCKER/README.md index b670a06d4e..671b646ad0 100644 --- a/DOCKER/README.md +++ b/DOCKER/README.md @@ -8,7 +8,7 @@ Official releases can be found [here](https://github.com/tendermint/tendermint/r The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/master/DOCKER/Dockerfile). -Respective versioned files can be found (replace the Xs with the version number). +Respective versioned files can be found at `https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile` (replace the Xs with the version number). ## Quick reference diff --git a/DOCKER/build.sh b/DOCKER/build.sh deleted file mode 100755 index 193deb3383..0000000000 --- a/DOCKER/build.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Get the tag from the version, or try to figure it out. -if [ -z "$TAG" ]; then - TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go) -fi -if [ -z "$TAG" ]; then - echo "Please specify a tag." - exit 1 -fi - -TAG_NO_PATCH=${TAG%.*} - -read -p "==> Build 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r -echo -if [[ $REPLY =~ ^[Yy]$ ]] -then - docker build -t "dashpay/tenderdash" -t "dashpay/tenderdash:$TAG" -t "dashpay/tenderdash:$TAG_NO_PATCH" .. -fi diff --git a/DOCKER/push.sh b/DOCKER/push.sh deleted file mode 100755 index 5456967a7a..0000000000 --- a/DOCKER/push.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Get the tag from the version, or try to figure it out. -if [ -z "$TAG" ]; then - TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go) -fi -if [ -z "$TAG" ]; then - echo "Please specify a tag." - exit 1 -fi - -TAG_NO_PATCH=${TAG%.*} - -read -p "==> Push 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r -echo -if [[ $REPLY =~ ^[Yy]$ ]] -then - docker push "dashpay/tenderdash:latest" - docker push "dashpay/tenderdash:$TAG" - docker push "dashpay/tenderdash:$TAG_NO_PATCH" -fi diff --git a/Makefile b/Makefile index 4c8f774b04..537c8d2491 100644 --- a/Makefile +++ b/Makefile @@ -109,34 +109,47 @@ $(BUILDDIR)/: ### Protobuf ### ############################################################################### -proto-all: proto-gen proto-lint proto-check-breaking -.PHONY: proto-all +check-proto-deps: +ifeq (,$(shell which buf)) + $(error "buf is required for Protobuf building, linting and breakage checking. See https://docs.buf.build/installation for installation instructions.") +endif +ifeq (,$(shell which protoc-gen-gogofaster)) + $(error "gogofaster plugin for protoc is required. Run 'go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest' to install") +endif +.PHONY: check-proto-deps -proto-gen: - @echo "Generating Go packages for .proto files" - @$(DOCKER_PROTO) sh ./scripts/protocgen.sh +check-proto-format-deps: +ifeq (,$(shell which clang-format)) + $(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.") +endif +.PHONY: check-proto-format-deps + +proto-gen: check-proto-deps + @echo "Generating Protobuf files" + @buf generate + @mv ./proto/tendermint/abci/types.pb.go ./abci/types/ .PHONY: proto-gen -proto-lint: - @echo "Running lint checks for .proto files" - @$(DOCKER_PROTO) buf lint --error-format=json +# These targets are provided for convenience and are intended for local +# execution only. +proto-lint: check-proto-deps + @echo "Linting Protobuf files" + @buf lint .PHONY: proto-lint -proto-format: - @echo "Formatting .proto files" - @$(DOCKER_PROTO) find ./ -not -path "./third_party/*" -name '*.proto' -exec clang-format -i {} \; +proto-format: check-proto-format-deps + @echo "Formatting Protobuf files" + @find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \; .PHONY: proto-format -proto-check-breaking: - @echo "Checking for breaking changes in .proto files" - @$(DOCKER_PROTO) buf breaking --against .git#branch=$(BASE_BRANCH) +proto-check-breaking: check-proto-deps + @echo "Checking for breaking changes in Protobuf files against local branch" + @echo "Note: This is only useful if your changes have not yet been committed." + @echo " Otherwise read up on buf's \"breaking\" command usage:" + @echo " https://docs.buf.build/breaking/usage" + @buf breaking --against ".git" .PHONY: proto-check-breaking -proto-check-breaking-ci: - @echo "Checking for breaking changes in .proto files" - $(DOCKER_PROTO) buf breaking --against $(HTTPS_GIT)#branch=$(BASE_BRANCH) -.PHONY: proto-check-breaking-ci - ############################################################################### ### Build ABCI ### ############################################################################### @@ -192,7 +205,7 @@ go.sum: go.mod draw_deps: @# requires brew install graphviz or apt-get install graphviz - go get github.com/RobotsAndPencils/goviz + go install github.com/RobotsAndPencils/goviz@latest @goviz -i ${REPO_NAME}/cmd/tendermint -d 3 | dot -Tpng -o dependency-graph.png .PHONY: draw_deps @@ -359,4 +372,4 @@ $(BUILDDIR)/packages.txt:$(GO_TEST_FILES) $(BUILDDIR) split-test-packages:$(BUILDDIR)/packages.txt split -d -n l/$(NUM_SPLIT) $< $<. test-group-%:split-test-packages - cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=15m -race -coverprofile=$(BUILDDIR)/$*.profile.out + cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=5m -race -coverprofile=$(BUILDDIR)/$*.profile.out diff --git a/README.md b/README.md index 711f4dc3d2..b3b166f0f4 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ ![banner](docs/tendermint-core-image.jpg) [Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance) -[State Machines](https://en.wikipedia.org/wiki/State_machine_replication). +[State Machine Replication](https://en.wikipedia.org/wiki/State_machine_replication). Or [Blockchain](), for short. [![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/dashevo/tenderdash/releases/latest) @@ -20,10 +20,14 @@ Or [Blockchain](), for shor Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines. -For protocol details, see [the specification](https://github.com/tendermint/spec). +For protocol details, refer to the [Tendermint Specification](./spec/README.md). For detailed analysis of the consensus protocol, including safety and liveness proofs, -see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)". +read our paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)". + +## Documentation + +Complete documentation can be found on the [website](https://docs.tendermint.com/). ## Releases @@ -33,11 +37,14 @@ Tendermint has been in the production of private and public environments, most n See below for more details about [versioning](#versioning). In any case, if you intend to run Tendermint in production, we're happy to help. You can -contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/cosmosnetwork). +contact us [over email](mailto:hello@interchain.io) or [join the chat](https://discord.gg/cosmosnetwork). + +More on how releases are conducted can be found [here](./RELEASES.md). ## Security -To report a security vulnerability, see our [bug bounty program](https://hackerone.com/cosmos). +To report a security vulnerability, see our [bug bounty +program](https://hackerone.com/cosmos). For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md). We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list @@ -50,22 +57,17 @@ to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe | Requirement | Notes | |-------------|------------------| -| Go version | Go1.16 or higher | - -## Documentation - -Complete documentation can be found on the [website](https://docs.tendermint.com/master/). +| Go version | Go1.17 or higher | ### Install -See the [install instructions](/docs/introduction/install.md). +See the [install instructions](./docs/introduction/install.md). ### Quick Start -- [Single node](/docs/introduction/quick-start.md) -- [Local cluster using docker-compose](/docs/tools/docker-compose.md) -- [Remote cluster using Terraform and Ansible](/docs/tools/terraform-and-ansible.md) -- [Join the Cosmos testnet](https://cosmos.network/testnet) +- [Single node](./docs/introduction/quick-start.md) +- [Local cluster using docker-compose](./docs/tools/docker-compose.md) +- [Remote cluster using Terraform and Ansible](./docs/tools/terraform-and-ansible.md) ## Contributing @@ -73,9 +75,9 @@ Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions. Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md) and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the -[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md), +[specifications](./spec/README.md), and familiarize yourself with our -[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture). +[Architectural Decision Records (ADRs)](./docs/architecture/README.md) and [Request For Comments (RFCs)](./docs/rfc/README.md). ## Versioning @@ -111,24 +113,23 @@ in [UPGRADING.md](./UPGRADING.md). ## Resources -### Tendermint Core - -For details about the blockchain data structures and the p2p protocols, see the -[Tendermint specification](https://docs.tendermint.com/master/spec/). +### Roadmap -For details on using the software, see the [documentation](/docs/) which is also -hosted at: +We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap.md) -### Tools +### Libraries -Benchmarking is provided by [`tm-load-test`](https://github.com/informalsystems/tm-load-test). -Additional tooling can be found in [/docs/tools](/docs/tools). +- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); A framework for building applications in Golang +- [Tendermint in Rust](https://github.com/informalsystems/tendermint-rs) +- [ABCI Tower](https://github.com/penumbra-zone/tower-abci) ### Applications -- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework -- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint -- [Many more](https://tendermint.com/ecosystem) +- [Cosmos Hub](https://hub.cosmos.network/) +- [Terra](https://www.terra.money/) +- [Celestia](https://celestia.org/) +- [Anoma](https://anoma.network/) +- [Vocdoni](https://docs.vocdoni.io/) ### Research @@ -144,7 +145,7 @@ Tenderdash is maintained by [Dash Core Group](https://www.dash.org/dcg/). If you'd like to work full-time on Tenderdash, [see our Jobs page](https://www.dash.org/dcg/jobs/). Tendermint Core is maintained by [Interchain GmbH](https://interchain.berlin). -If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/p/682fb7e8a6f601-software-engineer-tendermint-core)! +If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/)! Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io), a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity diff --git a/RELEASES.md b/RELEASES.md new file mode 100644 index 0000000000..f3bfd20d5c --- /dev/null +++ b/RELEASES.md @@ -0,0 +1,207 @@ +# Releases + +Tendermint uses [semantic versioning](https://semver.org/) with each release following +a `vX.Y.Z` format. The `master` branch is used for active development and thus it's +advisable not to build against it. + +The latest changes are always initially merged into `master`. +Releases are specified using tags and are built from long-lived "backport" branches +that are cut from `master` when the release process begins. +Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, +and the backport branches have names like `v0.34.x` or `v0.33.x` +(literally, `x`; it is not a placeholder in this case). Tendermint only +maintains the last two releases at a time (the oldest release is predominantly +just security patches). + +## Backporting + +As non-breaking changes land on `master`, they should also be backported +to these backport branches. + +We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport +to the needed branch. There should be a label for any backport branch that you'll be targeting. +To notify the bot to backport a pull request, mark the pull request with the label corresponding +to the correct backport branch. For example, to backport to v0.35.x, add the label `S:backport-to-v0.35.x`. +Once the original pull request is merged, the bot will try to cherry-pick the pull request +to the backport branch. If the bot fails to backport, it will open a pull request. +The author of the original pull request is responsible for solving the conflicts and +merging the pull request. + +### Creating a backport branch + +If this is the first release candidate for a major release, you get to have the +honor of creating the backport branch! + +Note that, after creating the backport branch, you'll also need to update the +tags on `master` so that `go mod` is able to order the branches correctly. You +should tag `master` with a "dev" tag that is "greater than" the backport +branches tags. See [#6072](https://github.com/tendermint/tendermint/pull/6072) +for more context. + +In the following example, we'll assume that we're making a backport branch for +the 0.35.x line. + +1. Start on `master` + +2. Create and push the backport branch: + ```sh + git checkout -b v0.35.x + git push origin v0.35.x + ``` + +3. Create a PR to update the documentation directory for the backport branch. + + We only maintain RFC and ADR documents on master, to avoid confusion. + In addition, we rewrite Markdown URLs pointing to master to point to the + backport branch, so that generated documentation will link to the correct + versions of files elsewhere in the repository. For context on the latter, + see https://github.com/tendermint/tendermint/issues/7675. + + To prepare the PR: + ```sh + # Remove the RFC and ADR documents from the backport. + # We only maintain these on master to avoid confusion. + git rm -r docs/rfc docs/architecture + + # Update absolute links to point to the backport. + go run ./scripts/linkpatch -recur -target v0.35.x -skip-path docs/DOCS_README.md,docs/README.md docs + + # Create and push the PR. + git checkout -b update-docs-v035x + git commit -m "Update docs for v0.35.x backport branch." docs + git push -u origin update-docs-v035x + ``` + + Be sure to merge this PR before making other changes on the newly-created + backport branch. + +After doing these steps, go back to `master` and do the following: + +1. Tag `master` as the dev branch for the _next_ major release and push it up to GitHub. + For example: + ```sh + git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36." + git push origin v0.36.0-dev + ``` + +2. Create a new workflow to run e2e nightlies for the new backport branch. + (See [e2e-nightly-master.yml][e2e] for an example.) + +3. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the + backport bot to work on this branch, and add a corresponding `S:backport-to-v0.35.x` + [label](https://github.com/tendermint/tendermint/labels) so the bot can be triggered. + +4. Add a new section to the Dependabot config (`.github/dependabot.yml`) to + enable automatic update of Go dependencies on this branch. Copy and edit one + of the existing branch configurations to set the correct `target-branch`. + +[e2e]: https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-master.yml + +## Release candidates + +Before creating an official release, especially a major release, we may want to create a +release candidate (RC) for our friends and partners to test out. We use git tags to +create RCs, and we build them off of backport branches. + +Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end +(for example, `v0.35.0-rc0`). + +(Note that branches and tags _cannot_ have the same names, so it's important that these branches +have distinct names from the tags/release names.) + +If this is the first RC for a major release, you'll have to make a new backport branch (see above). +Otherwise: + +1. Start from the backport branch (e.g. `v0.35.x`). +2. Run the integration tests and the e2e nightlies + (which can be triggered from the Github UI; + e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml). +3. Prepare the changelog: + - Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`. Each RC should have + it's own changelog section. These will be squashed when the final candidate is released. + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for + all PRs + - Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes + or other upgrading flows. + - Bump TMVersionDefault version in `version.go` + - Bump P2P and block protocol versions in `version.go`, if necessary. + Check the changelog for breaking changes in these components. + - Bump ABCI protocol version in `version.go`, if necessary +4. Open a PR with these changes against the backport branch. +5. Once these changes have landed on the backport branch, be sure to pull them back down locally. +6. Once you have the changes locally, create the new tag, specifying a name and a tag "message": + `git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0` +7. Push the tag back up to origin: + `git push origin v0.35.0-rc0` + Now the tag should be available on the repo's releases page. +8. Future RCs will continue to be built off of this branch. + +Note that this process should only be used for "true" RCs-- +release candidates that, if successful, will be the next release. +For more experimental "RCs," create a new, short-lived branch and tag that instead. + +## Major release + +This major release process assumes that this release was preceded by release candidates. +If there were no release candidates, begin by creating a backport branch, as described above. + +1. Start on the backport branch (e.g. `v0.35.x`) +2. Run integration tests (`make test_integrations`) and the e2e nightlies. +3. Prepare the release: + - "Squash" changes from the changelog entries for the RCs into a single entry, + and add all changes included in `CHANGELOG_PENDING.md`. + (Squashing includes both combining all entries, as well as removing or simplifying + any intra-RC changes. It may also help to alphabetize the entries by package name.) + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for + all PRs + - Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes + or other upgrading flows. + - Bump TMVersionDefault version in `version.go` + - Bump P2P and block protocol versions in `version.go`, if necessary + - Bump ABCI protocol version in `version.go`, if necessary +4. Open a PR with these changes against the backport branch. +5. Once these changes are on the backport branch, push a tag with prepared release details. + This will trigger the actual release `v0.35.0`. + - `git tag -a v0.35.0 -m 'Release v0.35.0'` + - `git push origin v0.35.0` +6. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`. +7. Add the release to the documentation site generator config (see + [DOCS_README.md](./docs/DOCS_README.md) for more details). In summary: + - Start on branch `master`. + - Add a new line at the bottom of [`docs/versions`](./docs/versions) to + ensure the newest release is the default for the landing page. + - Add a new entry to `themeConfig.versions` in + [`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the + release in the dropdown versions menu. + - Commit these changes to `master` and backport them into the backport + branch for this release. + +## Minor release (point releases) + +Minor releases are done differently from major releases: They are built off of +long-lived backport branches, rather than from master. As non-breaking changes +land on `master`, they should also be backported into these backport branches. + +Minor releases don't have release candidates by default, although any tricky +changes may merit a release candidate. + +To create a minor release: + +1. Checkout the long-lived backport branch: `git checkout v0.35.x` +2. Run integration tests (`make test_integrations`) and the nightlies. +3. Check out a new branch and prepare the release: + - Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues + - Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh ` + - Reset the `CHANGELOG_PENDING.md` + - Bump the TMDefaultVersion in `version.go` + - Bump the ABCI version number, if necessary. + (Note that ABCI follows semver, and that ABCI versions are the only versions + which can change during minor releases, and only field additions are valid minor changes.) +4. Open a PR with these changes that will land them back on `v0.35.x` +5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag. + - `git tag -a v0.35.1 -m 'Release v0.35.1'` + - `git push origin v0.35.1` +6. Create a pull request back to master with the CHANGELOG & version changes from the latest release. + - Remove all `R:minor` labels from the pull requests that were included in the release. + - Do not merge the backport branch into master. diff --git a/SECURITY.md b/SECURITY.md index 57d13e565a..133e993c41 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -4,7 +4,7 @@ As part of our [Coordinated Vulnerability Disclosure Policy](https://tendermint.com/security), we operate a [bug -bounty](https://hackerone.com/tendermint). +bounty](https://hackerone.com/cosmos). See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in. ### Guidelines @@ -86,7 +86,7 @@ If you are running older versions of Tendermint Core, we encourage you to upgrad ## Scope -The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope: +The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/cosmos). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope: * Any third-party services * Findings from physical testing, such as office access diff --git a/UPGRADING.md b/UPGRADING.md index 8972ca6beb..28e44e58c0 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -2,6 +2,170 @@ This guide provides instructions for upgrading to specific versions of Tendermint Core. +## v0.36 + +### ABCI Changes + +#### ABCI++ + +Coming soon... + +#### ABCI Mutex + +In previous versions of ABCI, Tendermint was prevented from making +concurrent calls to ABCI implementations by virtue of mutexes in the +implementation of Tendermint's ABCI infrastructure. These mutexes have +been removed from the current implementation and applications will now +be responsible for managing their own concurrency control. + +To replicate the prior semantics, ensure that ABCI applications have a +single mutex that protects all ABCI method calls from concurrent +access. You can relax these requirements if your application can +provide safe concurrent access via other means. This safety is an +application concern so be very sure to test the application thoroughly +using realistic workloads and the race detector to ensure your +applications remains correct. + +### Config Changes + +- We have added a new, experimental tool to help operators migrate + configuration files created by previous versions of Tendermint. + To try this tool, run: + + ```shell + # Install the tool. + go install github.com/tendermint/tendermint/scripts/confix@latest + + # Run the tool with the old configuration file as input. + # Replace the -config argument with your path. + confix -config ~/.tendermint/config/config.toml -out updated.toml + ``` + + This tool should be able to update configurations from v0.34 and v0.35. We + plan to extend it to handle older configuration files in the future. For now, + it will report an error (without making any changes) if it does not recognize + the version that created the file. + +- The default configuration for a newly-created node now disables indexing for + ABCI event metadata. Existing node configurations that already have indexing + turned on are not affected. Operators who wish to enable indexing for a new + node, however, must now edit the `config.toml` explicitly. + +### RPC Changes + +Tendermint v0.36 adds a new RPC event subscription API. The existing event +subscription API based on websockets is now deprecated. It will continue to +work throughout the v0.36 release, but the `subscribe`, `unsubscribe`, and +`unsubscribe_all` methods, along with websocket support, will be removed in +Tendermint v0.37. Callers currently using these features should migrate as +soon as is practical to the new API. + +To enable the new API, node operators set a new `event-log-window-size` +parameter in the `[rpc]` section of the `config.toml` file. This defines a +duration of time during which the node will log all events published to the +event bus for use by RPC consumers. + +Consumers use the new `events` JSON-RPC method to poll for events matching +their query in the log. Unlike the streaming API, events are not discarded if +the caller is slow, loses its connection, or crashes. As long as the client +recovers before its events expire from the log window, it will be able to +replay and catch up after recovering. Also unlike the streaming API, the client +can tell if it has truly missed events because they have expired from the log. + +The `events` method is a normal JSON-RPC method, and does not require any +non-standard response processing (in contrast with the old `subscribe`). +Clients can modify their query at any time, and no longer need to coordinate +subscribe and unsubscribe calls to handle multiple queries. + +The Go client implementations in the Tendermint Core repository have all been +updated to add a new `Events` method, including the light client proxy. + +A new `rpc/client/eventstream` package has also been added to make it easier +for users to update existing use of the streaming API to use the polling API +The `eventstream` package handles polling and delivers matching events to a +callback. + +For more detailed information, see [ADR 075](https://tinyurl.com/adr075) which +defines and describes the new API in detail. + +### Timeout Parameter Changes + +Tendermint v0.36 updates how the Tendermint consensus timing parameters are +configured. These parameters, `timeout-propose`, `timeout-propose-delta`, +`timeout-prevote`, `timeout-prevote-delta`, `timeout-precommit`, +`timeout-precommit-delta`, `timeout-commit`, and `skip-timeout-commit`, were +previously configured in `config.toml`. These timing parameters have moved and +are no longer configured in the `config.toml` file. These parameters have been +migrated into the `ConsensusParameters`. Nodes with these parameters set in the +local configuration file will see a warning logged on startup indicating that +these parameters are no longer used. + +These parameters have also been pared-down. There are no longer separate +parameters for both the `prevote` and `precommit` phases of Tendermint. The +separate `timeout-prevote` and `timeout-precommit` parameters have been merged +into a single `timeout-vote` parameter that configures both of these similar +phases of the consensus protocol. + +A set of reasonable defaults have been put in place for these new parameters +that will take effect when the node starts up in version v0.36. New chains +created using v0.36 and beyond will be able to configure these parameters in the +chain's `genesis.json` file. Chains that upgrade to v0.36 from a previous +compatible version of Tendermint will begin running with the default values. +Upgrading applications that wish to use different values from the defaults for +these parameters may do so by setting the `ConsensusParams.Timeout` field of the +`FinalizeBlock` `ABCI` response. + +As a safety measure in case of unusual timing issues during the upgrade to +v0.36, an operator may override the consensus timeout values for a single node. +Note, however, that these overrides will be removed in Tendermint v0.37. See +[configuration](https://github.com/tendermint/tendermint/blob/master/docs/nodes/configuration.md) +for more information about these overrides. + +For more discussion of this, see [ADR 074](https://tinyurl.com/adr074), which +lays out the reasoning for the changes as well as [RFC +009](https://tinyurl.com/rfc009) for a discussion of the complexities of +upgrading consensus parameters. + +### CLI Changes + +The functionality around resetting a node has been extended to make it safer. The +`unsafe-reset-all` command has been replaced by a `reset` command with four +subcommands: `blockchain`, `peers`, `unsafe-signer` and `unsafe-all`. + +- `tendermint reset blockchain`: Clears a node of all blocks, consensus state, evidence, + and indexed transactions. NOTE: This command does not reset application state. + If you need to rollback the last application state (to recover from application + nondeterminism), see instead the `tendermint rollback` command. +- `tendermint reset peers`: Clears the peer store, which persists information on peers used + by the networking layer. This can be used to get rid of stale addresses or to switch + to a predefined set of static peers. +- `tendermint reset unsafe-signer`: Resets the watermark level of the PrivVal File signer + allowing it to sign votes from the genesis height. This should only be used in testing as + it can lead to the node double signing. +- `tendermint reset unsafe-all`: A summation of the other three commands. This will delete + the entire `data` directory which may include application data as well. + +### Go API Changes + +#### `crypto` Package Cleanup + +The `github.com/tendermint/tendermint/crypto/tmhash` package was removed +to improve clarity. Users are encouraged to use the standard library +`crypto/sha256` package directly. However, as a convenience, some constants +and one function have moved to the Tendermint `crypto` package: + +- The `crypto.Checksum` function returns the sha256 checksum of a + byteslice. This is a wrapper around `sha256.Sum265` from the + standard libary, but provided as a function to ease type + requirements (the library function returns a `[32]byte` rather than + a `[]byte`). +- `tmhash.TruncatedSize` is now `crypto.AddressSize` which was + previously an alias for the same value. +- `tmhash.Size` and `tmhash.BlockSize` are now `crypto.HashSize` and + `crypto.HashSize`. +- `tmhash.SumTruncated` is now available via `crypto.AddressHash` or by + `crypto.Checksum(<...>)[:crypto.AddressSize]` + ## v0.35 ### ABCI Changes @@ -116,11 +280,13 @@ the full RPC interface provided as direct function calls. Import the the node service as in the following: ```go - node := node.NewDefault() //construct the node object - // start and set up the node service +logger := log.NewNopLogger() + +// Construct and start up a node with default settings. +node := node.NewDefault(logger) - client := local.New(node.(local.NodeService)) - // use client object to interact with the node +// Construct a local (in-memory) RPC client to the node. +client := local.New(logger, node.(local.NodeService)) ``` ### gRPC Support @@ -217,7 +383,7 @@ Note also that Tendermint 0.34 also requires Go 1.16 or higher. were added to support the new State Sync feature. Previously, syncing a new node to a preexisting network could take days; but with State Sync, new nodes are able to join a network in a matter of seconds. - Read [the spec](https://docs.tendermint.com/master/spec/abci/apps.html#state-sync) + Read [the spec](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md) if you want to learn more about State Sync, or if you'd like your application to use it. (If you don't want to support State Sync in your application, you can just implement these new ABCI methods as no-ops, leaving them empty.) @@ -342,7 +508,6 @@ The `bech32` package has moved to the Cosmos SDK: ### CLI The `tendermint lite` command has been renamed to `tendermint light` and has a slightly different API. -See [the docs](https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html#http-proxy) for details. ### Light Client @@ -617,7 +782,7 @@ the compilation tag: Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or use `make build_c` / `make install_c` (full instructions can be found at -) + 0 { msg = args[0] } - res, err := client.EchoSync(ctx, msg) + res, err := client.Echo(cmd.Context(), msg) if err != nil { return err } + printResponse(cmd, args, response{ Data: []byte(res.Message), }) + return nil } @@ -465,7 +490,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error { if len(args) == 1 { version = args[0] } - res, err := client.InfoSync(ctx, types.RequestInfo{Version: version}) + res, err := client.Info(cmd.Context(), &types.RequestInfo{Version: version}) if err != nil { return err } @@ -478,28 +503,34 @@ func cmdInfo(cmd *cobra.Command, args []string) error { const codeBad uint32 = 10 // Append a new tx to application -func cmdDeliverTx(cmd *cobra.Command, args []string) error { +func cmdFinalizeBlock(cmd *cobra.Command, args []string) error { if len(args) == 0 { printResponse(cmd, args, response{ Code: codeBad, - Log: "want the tx", + Log: "Must provide at least one transaction", }) return nil } - txBytes, err := stringOrHexToBytes(args[0]) - if err != nil { - return err + txs := make([][]byte, len(args)) + for i, arg := range args { + txBytes, err := stringOrHexToBytes(arg) + if err != nil { + return err + } + txs[i] = txBytes } - res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes}) + res, err := client.FinalizeBlock(cmd.Context(), &types.RequestFinalizeBlock{Txs: txs}) if err != nil { return err } - printResponse(cmd, args, response{ - Code: res.Code, - Data: res.Data, - Info: res.Info, - Log: res.Log, - }) + for _, tx := range res.TxResults { + printResponse(cmd, args, response{ + Code: tx.Code, + Data: tx.Data, + Info: tx.Info, + Log: tx.Log, + }) + } return nil } @@ -516,7 +547,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { if err != nil { return err } - res, err := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes}) + res, err := client.CheckTx(cmd.Context(), &types.RequestCheckTx{Tx: txBytes}) if err != nil { return err } @@ -531,7 +562,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { // Get application Merkle root hash func cmdCommit(cmd *cobra.Command, args []string) error { - res, err := client.CommitSync(ctx) + res, err := client.Commit(cmd.Context()) if err != nil { return err } @@ -556,7 +587,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error { return err } - resQuery, err := client.QuerySync(ctx, types.RequestQuery{ + resQuery, err := client.Query(cmd.Context(), &types.RequestQuery{ Data: queryBytes, Path: flagPath, Height: int64(flagHeight), @@ -579,38 +610,34 @@ func cmdQuery(cmd *cobra.Command, args []string) error { return nil } -func cmdKVStore(cmd *cobra.Command, args []string) error { - logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) +func makeKVStoreCmd(logger log.Logger) func(*cobra.Command, []string) error { + return func(cmd *cobra.Command, args []string) error { + // Create the application - in memory or persisted to disk + var app types.Application + if flagPersist == "" { + app = kvstore.NewApplication() + } else { + app = kvstore.NewPersistentKVStoreApplication(logger, flagPersist) + } - // Create the application - in memory or persisted to disk - var app types.Application - if flagPersist == "" { - app = kvstore.NewApplication() - } else { - app = kvstore.NewPersistentKVStoreApplication(flagPersist) - app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore")) - } + // Start the listener + srv, err := server.NewServer(logger.With("module", "abci-server"), flagAddress, flagAbci, app) + if err != nil { + return err + } - // Start the listener - srv, err := server.NewServer(flagAddress, flagAbci, app) - if err != nil { - return err - } - srv.SetLogger(logger.With("module", "abci-server")) - if err := srv.Start(); err != nil { - return err - } + ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM) + defer cancel() - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - // Cleanup - if err := srv.Stop(); err != nil { - logger.Error("Error while stopping server", "err", err) + if err := srv.Start(ctx); err != nil { + return err } - }) - // Run forever. - select {} + // Run forever. + <-ctx.Done() + return nil + } + } //-------------------------------------------------------------------------------- @@ -618,7 +645,7 @@ func cmdKVStore(cmd *cobra.Command, args []string) error { func printResponse(cmd *cobra.Command, args []string, rsp response) { if flagVerbose { - fmt.Println(">", cmd.Use, strings.Join(args, " ")) + fmt.Println(">", strings.Join(append([]string{cmd.Use}, args...), " ")) } // Always print the status code. diff --git a/abci/example/counter/counter.go b/abci/example/counter/counter.go index e1675eaf8e..4bb6f5b404 100644 --- a/abci/example/counter/counter.go +++ b/abci/example/counter/counter.go @@ -1,6 +1,7 @@ package counter import ( + "context" "encoding/binary" "fmt" @@ -25,80 +26,82 @@ func NewApplication(serial bool) *Application { return &Application{serial: serial, CoreChainLockStep: 1} } -func (app *Application) Info(req types.RequestInfo) types.ResponseInfo { - return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)} +func (app *Application) Info(_ context.Context, _ *types.RequestInfo) (*types.ResponseInfo, error) { + return &types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}, nil } -func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx { +func (app *Application) CheckTx(_ context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) { if app.serial { if len(req.Tx) > 8 { - return types.ResponseDeliverTx{ + return &types.ResponseCheckTx{ Code: code.CodeTypeEncodingError, - Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))} - } - tx8 := make([]byte, 8) - copy(tx8[len(tx8)-len(req.Tx):], req.Tx) - txValue := binary.BigEndian.Uint64(tx8) - if txValue != uint64(app.txCount) { - return types.ResponseDeliverTx{ - Code: code.CodeTypeBadNonce, - Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)} - } - } - app.txCount++ - return types.ResponseDeliverTx{Code: code.CodeTypeOK} -} - -func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx { - if app.serial { - if len(req.Tx) > 8 { - return types.ResponseCheckTx{ - Code: code.CodeTypeEncodingError, - Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))} + Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx)), + }, nil } tx8 := make([]byte, 8) copy(tx8[len(tx8)-len(req.Tx):], req.Tx) txValue := binary.BigEndian.Uint64(tx8) if txValue < uint64(app.txCount) { - return types.ResponseCheckTx{ + return &types.ResponseCheckTx{ Code: code.CodeTypeBadNonce, - Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)} + Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue), + }, nil } } - return types.ResponseCheckTx{Code: code.CodeTypeOK} + return &types.ResponseCheckTx{Code: code.CodeTypeOK}, nil } -func (app *Application) Commit() (resp types.ResponseCommit) { +func (app *Application) Commit(_ context.Context) (*types.ResponseCommit, error) { app.hashCount++ if app.txCount == 0 { - return types.ResponseCommit{} + return &types.ResponseCommit{}, nil } hash := make([]byte, 24) endHash := make([]byte, 8) binary.BigEndian.PutUint64(endHash, uint64(app.txCount)) hash = append(hash, endHash...) - return types.ResponseCommit{Data: hash} + return &types.ResponseCommit{Data: hash}, nil } -func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery { +func (app *Application) Query(_ context.Context, reqQuery *types.RequestQuery) (*types.ResponseQuery, error) { switch reqQuery.Path { case "verify-chainlock": - return types.ResponseQuery{Code: 0} + return &types.ResponseQuery{Code: 0}, nil case "hash": - return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))} + return &types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}, nil case "tx": - return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))} + return &types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}, nil default: - return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)} + return &types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}, nil } } -func (app *Application) EndBlock(reqEndBlock types.RequestEndBlock) types.ResponseEndBlock { - var resp types.ResponseEndBlock +func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { + var resp types.ResponseFinalizeBlock + for _, tx := range req.Txs { + if app.serial { + if len(tx) > 8 { + resp.TxResults = append(resp.TxResults, &types.ExecTxResult{ + Code: code.CodeTypeEncodingError, + Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx)), + }) + } + tx8 := make([]byte, 8) + copy(tx8[len(tx8)-len(tx):], tx) + txValue := binary.BigEndian.Uint64(tx8) + if txValue != uint64(app.txCount) { + resp.TxResults = append(resp.TxResults, &types.ExecTxResult{ + Code: code.CodeTypeBadNonce, + Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue), + }) + } + } + app.txCount++ + } if app.HasCoreChainLocks { app.CurrentCoreChainLockHeight = app.CurrentCoreChainLockHeight + uint32(app.CoreChainLockStep) coreChainLock := tmtypes.NewMockChainLock(app.CurrentCoreChainLockHeight) resp.NextCoreChainLockUpdate = coreChainLock.ToProto() } - return resp + return &resp, nil } diff --git a/abci/example/example_test.go b/abci/example/example_test.go index 8b8691e371..066d4071d7 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -6,7 +6,6 @@ import ( "math/rand" "net" "os" - "reflect" "testing" "time" @@ -30,95 +29,69 @@ func init() { } func TestKVStore(t *testing.T) { - fmt.Println("### Testing KVStore") - testStream(t, kvstore.NewApplication()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewNopLogger() + + t.Log("### Testing KVStore") + testBulk(ctx, t, logger, kvstore.NewApplication()) } func TestBaseApp(t *testing.T) { - fmt.Println("### Testing BaseApp") - testStream(t, types.NewBaseApplication()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewNopLogger() + + t.Log("### Testing BaseApp") + testBulk(ctx, t, logger, types.NewBaseApplication()) } func TestGRPC(t *testing.T) { - fmt.Println("### Testing GRPC") - testGRPCSync(t, types.NewGRPCApplication(types.NewBaseApplication())) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + t.Log("### Testing GRPC") + testGRPCSync(ctx, t, logger, types.NewBaseApplication()) } -func testStream(t *testing.T, app types.Application) { - const numDeliverTxs = 20000 +func testBulk(ctx context.Context, t *testing.T, logger log.Logger, app types.Application) { + t.Helper() + + const numDeliverTxs = 700000 socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30)) defer os.Remove(socketFile) socket := fmt.Sprintf("unix://%v", socketFile) - // Start the listener - server := abciserver.NewSocketServer(socket, app) - server.SetLogger(log.TestingLogger().With("module", "abci-server")) - err := server.Start() + server := abciserver.NewSocketServer(logger.With("module", "abci-server"), socket, app) + t.Cleanup(server.Wait) + err := server.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) // Connect to the socket - client := abciclient.NewSocketClient(socket, false) - client.SetLogger(log.TestingLogger().With("module", "abci-client")) - err = client.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := client.Stop(); err != nil { - t.Error(err) - } - }) - - done := make(chan struct{}) - counter := 0 - client.SetResponseCallback(func(req *types.Request, res *types.Response) { - // Process response - switch r := res.Value.(type) { - case *types.Response_DeliverTx: - counter++ - if r.DeliverTx.Code != code.CodeTypeOK { - t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code) - } - if counter > numDeliverTxs { - t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs) - } - if counter == numDeliverTxs { - go func() { - time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow - close(done) - }() - return - } - case *types.Response_Flush: - // ignore - default: - t.Error("Unexpected response type", reflect.TypeOf(res.Value)) - } - }) + client := abciclient.NewSocketClient(logger.With("module", "abci-client"), socket, false) + t.Cleanup(client.Wait) - ctx := context.Background() + err = client.Start(ctx) + require.NoError(t, err) - // Write requests + // Construct request + rfb := &types.RequestFinalizeBlock{Txs: make([][]byte, numDeliverTxs)} for counter := 0; counter < numDeliverTxs; counter++ { - // Send request - _, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")}) - require.NoError(t, err) - - // Sometimes send flush messages - if counter%128 == 0 { - err = client.FlushSync(context.Background()) - require.NoError(t, err) - } + rfb.Txs[counter] = []byte("test") + } + // Send bulk request + res, err := client.FinalizeBlock(ctx, rfb) + require.NoError(t, err) + require.Equal(t, numDeliverTxs, len(res.TxResults), "Number of txs doesn't match") + for _, tx := range res.TxResults { + require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed") } // Send final flush message - _, err = client.FlushAsync(ctx) + err = client.Flush(ctx) require.NoError(t, err) - - <-done } //------------------------- @@ -128,33 +101,25 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { return tmnet.Connect(addr) } -func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) { - numDeliverTxs := 2000 +func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app types.Application) { + t.Helper() + numDeliverTxs := 680000 socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30)) defer os.Remove(socketFile) socket := fmt.Sprintf("unix://%v", socketFile) // Start the listener - server := abciserver.NewGRPCServer(socket, app) - server.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := server.Start(); err != nil { - t.Fatalf("Error starting GRPC server: %v", err.Error()) - } + server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app) - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, server.Start(ctx)) + t.Cleanup(server.Wait) // Connect to the socket conn, err := grpc.Dial(socket, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialerFunc), ) - if err != nil { - t.Fatalf("Error dialing GRPC server: %v", err.Error()) - } + require.NoError(t, err, "Error dialing GRPC server") t.Cleanup(func() { if err := conn.Close(); err != nil { @@ -164,26 +129,17 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) { client := types.NewABCIApplicationClient(conn) - // Write requests + // Construct request + rfb := types.RequestFinalizeBlock{Txs: make([][]byte, numDeliverTxs)} for counter := 0; counter < numDeliverTxs; counter++ { - // Send request - response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")}) - if err != nil { - t.Fatalf("Error in GRPC DeliverTx: %v", err.Error()) - } - counter++ - if response.Code != code.CodeTypeOK { - t.Error("DeliverTx failed with ret_code", response.Code) - } - if counter > numDeliverTxs { - t.Fatal("Too many DeliverTx responses") - } - t.Log("response", counter) - if counter == numDeliverTxs { - go func() { - time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow - }() - } + rfb.Txs[counter] = []byte("test") + } + // Send request + response, err := client.FinalizeBlock(ctx, &rfb) + require.NoError(t, err, "Error in GRPC FinalizeBlock") + require.Equal(t, numDeliverTxs, len(response.TxResults), "Number of txs returned via GRPC doesn't match") + for _, tx := range response.TxResults { + require.Equal(t, tx.Code, code.CodeTypeOK, "Tx failed") } } diff --git a/abci/example/kvstore/README.md b/abci/example/kvstore/README.md index fee6e35dca..5eed47050d 100644 --- a/abci/example/kvstore/README.md +++ b/abci/example/kvstore/README.md @@ -12,7 +12,7 @@ The app has no replay protection (other than what the mempool provides). ## PersistentKVStoreApplication The PersistentKVStoreApplication wraps the KVStoreApplication -and provides two additional features: +and provides three additional features: 1) persistence of state across app restarts (using Tendermint's ABCI-Handshake mechanism) 2) validator set changes diff --git a/abci/example/kvstore/helpers.go b/abci/example/kvstore/helpers.go index b70b541ea7..ae60f5d202 100644 --- a/abci/example/kvstore/helpers.go +++ b/abci/example/kvstore/helpers.go @@ -1,6 +1,8 @@ package kvstore import ( + "context" + "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/dash/llmq" @@ -26,11 +28,12 @@ func RandValidatorSetUpdate(cnt int) types.ValidatorSetUpdate { // InitKVStore initializes the kvstore app with some data, // which allows tests to pass and is fine as long as you // don't make any tx that modify the validator state -func InitKVStore(app *PersistentKVStoreApplication) { +func InitKVStore(ctx context.Context, app *PersistentKVStoreApplication) error { val := RandValidatorSetUpdate(1) - app.InitChain(types.RequestInitChain{ + _, err := app.InitChain(ctx, &types.RequestInitChain{ ValidatorSet: &val, }) + return err } func randNodeAddrs(n int) []string { diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index e2f7f34d28..c1ea46108c 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -2,17 +2,27 @@ package kvstore import ( "bytes" + "context" + "encoding/base64" "encoding/binary" "encoding/json" "fmt" + "strings" + "sync" + "github.com/gogo/protobuf/proto" dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/internal/libs/protoio" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/version" ) +const ValidatorSetUpdatePrefix string = "vsu:" + var ( stateKey = []byte("stateKey") kvPairPrefixKey = []byte("kvPairKey:") @@ -65,35 +75,72 @@ var _ types.Application = (*Application)(nil) type Application struct { types.BaseApplication - + mu sync.Mutex state State RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight) + logger log.Logger + + // validator set update + valUpdatesRepo *repository + valSetUpdate types.ValidatorSetUpdate + valsIndex map[string]*types.ValidatorUpdate } func NewApplication() *Application { - state := loadState(dbm.NewMemDB()) - return &Application{state: state} + db := dbm.NewMemDB() + return &Application{ + logger: log.NewNopLogger(), + state: loadState(db), + valsIndex: make(map[string]*types.ValidatorUpdate), + valUpdatesRepo: &repository{db}, + } +} + +func (app *Application) InitChain(_ context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) { + app.mu.Lock() + defer app.mu.Unlock() + err := app.setValSetUpdate(req.ValidatorSet) + if err != nil { + return nil, err + } + return &types.ResponseInitChain{}, nil } -func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) { - return types.ResponseInfo{ +func (app *Application) Info(_ context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) { + app.mu.Lock() + defer app.mu.Unlock() + return &types.ResponseInfo{ Data: fmt.Sprintf("{\"size\":%v}", app.state.Size), Version: version.ABCIVersion, AppVersion: ProtocolVersion, LastBlockHeight: app.state.Height, LastBlockAppHash: app.state.AppHash, - } + }, nil } -// tx is either "key=value" or just arbitrary bytes -func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx { - var key, value string +// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes +func (app *Application) handleTx(tx []byte) *types.ExecTxResult { + if isValidatorSetUpdateTx(tx) { + err := app.execValidatorSetTx(tx) + if err != nil { + return &types.ExecTxResult{ + Code: code.CodeTypeUnknownError, + Log: err.Error(), + } + } + return &types.ExecTxResult{Code: code.CodeTypeOK} + } + + if isPrepareTx(tx) { + return app.execPrepareTx(tx) + } - parts := bytes.Split(req.Tx, []byte("=")) + var key, value string + parts := bytes.Split(tx, []byte("=")) if len(parts) == 2 { key, value = string(parts[0]), string(parts[1]) } else { - key, value = string(req.Tx), string(req.Tx) + key, value = string(tx), string(tx) } err := app.state.db.Set(prefixKey([]byte(key)), []byte(value)) @@ -114,14 +161,56 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli }, } - return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events} + return &types.ExecTxResult{Code: code.CodeTypeOK, Events: events} } -func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx { - return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1} +func (app *Application) Close() error { + app.mu.Lock() + defer app.mu.Unlock() + + return app.state.db.Close() } -func (app *Application) Commit() types.ResponseCommit { +func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { + app.mu.Lock() + defer app.mu.Unlock() + + // reset valset changes + app.valSetUpdate = types.ValidatorSetUpdate{} + app.valSetUpdate.ValidatorUpdates = make([]types.ValidatorUpdate, 0) + + // Punish validators who committed equivocation. + for _, ev := range req.ByzantineValidators { + // TODO it seems this code is not needed to keep here + if ev.Type == types.MisbehaviorType_DUPLICATE_VOTE { + proTxHash := crypto.ProTxHash(ev.Validator.ProTxHash) + v, ok := app.valsIndex[proTxHash.String()] + if !ok { + return nil, fmt.Errorf("wanted to punish val %q but can't find it", proTxHash.ShortString()) + } + v.Power = ev.Validator.Power - 1 + } + } + + respTxs := make([]*types.ExecTxResult, len(req.Txs)) + for i, tx := range req.Txs { + respTxs[i] = app.handleTx(tx) + } + + return &types.ResponseFinalizeBlock{ + TxResults: respTxs, + ValidatorSetUpdate: proto.Clone(&app.valSetUpdate).(*types.ValidatorSetUpdate), + }, nil +} + +func (*Application) CheckTx(_ context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) { + return &types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}, nil +} + +func (app *Application) Commit(_ context.Context) (*types.ResponseCommit, error) { + app.mu.Lock() + defer app.mu.Unlock() + // Using a memdb - just return the big endian size of the db appHash := make([]byte, 32) binary.PutVarint(appHash, app.state.Size) @@ -129,52 +218,276 @@ func (app *Application) Commit() types.ResponseCommit { app.state.Height++ saveState(app.state) - resp := types.ResponseCommit{Data: appHash} + resp := &types.ResponseCommit{Data: appHash} if app.RetainBlocks > 0 && app.state.Height >= app.RetainBlocks { resp.RetainHeight = app.state.Height - app.RetainBlocks + 1 } - return resp + return resp, nil } -// Returns an associated value or nil if missing. -func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { +// Query returns an associated value or nil if missing. +func (app *Application) Query(_ context.Context, reqQuery *types.RequestQuery) (*types.ResponseQuery, error) { + app.mu.Lock() + defer app.mu.Unlock() + switch reqQuery.Path { + case "/vsu": + vsu, err := app.valUpdatesRepo.get() + if err != nil { + return &types.ResponseQuery{ + Code: code.CodeTypeUnknownError, + Log: err.Error(), + }, nil + } + data, err := encodeMsg(vsu) + if err != nil { + return &types.ResponseQuery{ + Code: code.CodeTypeEncodingError, + Log: err.Error(), + }, nil + } + return &types.ResponseQuery{ + Key: reqQuery.Data, + Value: data, + }, nil case "/verify-chainlock": - resQuery.Code = 0 - - return resQuery - default: - if reqQuery.Prove { - value, err := app.state.db.Get(prefixKey(reqQuery.Data)) - if err != nil { - panic(err) - } - if value == nil { - resQuery.Log = "does not exist" - } else { - resQuery.Log = "exists" - } - resQuery.Index = -1 // TODO make Proof return index - resQuery.Key = reqQuery.Data - resQuery.Value = value - resQuery.Height = app.state.Height - - return + return &types.ResponseQuery{ + Code: 0, + }, nil + case "/val": + vu, err := app.valUpdatesRepo.findBy(reqQuery.Data) + if err != nil { + return &types.ResponseQuery{ + Code: code.CodeTypeUnknownError, + Log: err.Error(), + }, nil + } + value, err := encodeMsg(vu) + if err != nil { + return &types.ResponseQuery{ + Code: code.CodeTypeEncodingError, + Log: err.Error(), + }, nil } + return &types.ResponseQuery{ + Key: reqQuery.Data, + Value: value, + }, nil + } - resQuery.Key = reqQuery.Data + if reqQuery.Prove { value, err := app.state.db.Get(prefixKey(reqQuery.Data)) if err != nil { panic(err) } + + resQuery := types.ResponseQuery{ + Index: -1, + Key: reqQuery.Data, + Value: value, + Height: app.state.Height, + } + if value == nil { resQuery.Log = "does not exist" } else { resQuery.Log = "exists" } - resQuery.Value = value - resQuery.Height = app.state.Height - return resQuery + return &resQuery, nil } + + value, err := app.state.db.Get(prefixKey(reqQuery.Data)) + if err != nil { + panic(err) + } + + resQuery := types.ResponseQuery{ + Key: reqQuery.Data, + Value: value, + Height: app.state.Height, + } + + if value == nil { + resQuery.Log = "does not exist" + } else { + resQuery.Log = "exists" + } + + return &resQuery, nil +} + +func (app *Application) PrepareProposal(_ context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { + app.mu.Lock() + defer app.mu.Unlock() + + return &types.ResponsePrepareProposal{ + TxRecords: app.substPrepareTx(req.Txs, req.MaxTxBytes), + }, nil +} + +func (*Application) ProcessProposal(_ context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { + for _, tx := range req.Txs { + if len(tx) == 0 { + return &types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}, nil + } + } + return &types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}, nil +} + +//--------------------------------------------- +// update validators + +func (app *Application) ValidatorSet() (*types.ValidatorSetUpdate, error) { + return app.valUpdatesRepo.get() +} + +func (app *Application) execValidatorSetTx(tx []byte) error { + vsu, err := UnmarshalValidatorSetUpdate(tx) + if err != nil { + return err + } + err = app.setValSetUpdate(vsu) + if err != nil { + return err + } + app.valSetUpdate = *vsu + return nil +} + +// MarshalValidatorSetUpdate encodes validator-set-update into protobuf, encode into base64 and add "vsu:" prefix +func MarshalValidatorSetUpdate(vsu *types.ValidatorSetUpdate) ([]byte, error) { + pbData, err := proto.Marshal(vsu) + if err != nil { + return nil, err + } + return []byte(ValidatorSetUpdatePrefix + base64.StdEncoding.EncodeToString(pbData)), nil +} + +// UnmarshalValidatorSetUpdate removes "vsu:" prefix and unmarshal a string into validator-set-update +func UnmarshalValidatorSetUpdate(data []byte) (*types.ValidatorSetUpdate, error) { + l := len(ValidatorSetUpdatePrefix) + data, err := base64.StdEncoding.DecodeString(string(data[l:])) + if err != nil { + return nil, err + } + vsu := new(types.ValidatorSetUpdate) + err = proto.Unmarshal(data, vsu) + return vsu, err +} + +type repository struct { + db dbm.DB +} + +func (r *repository) set(vsu *types.ValidatorSetUpdate) error { + data, err := proto.Marshal(vsu) + if err != nil { + return err + } + return r.db.Set([]byte(ValidatorSetUpdatePrefix), data) +} + +func (r *repository) get() (*types.ValidatorSetUpdate, error) { + data, err := r.db.Get([]byte(ValidatorSetUpdatePrefix)) + if err != nil { + return nil, err + } + vsu := new(types.ValidatorSetUpdate) + err = proto.Unmarshal(data, vsu) + if err != nil { + return nil, err + } + return vsu, nil +} + +func (r *repository) findBy(proTxHash crypto.ProTxHash) (*types.ValidatorUpdate, error) { + vsu, err := r.get() + if err != nil { + return nil, err + } + for _, vu := range vsu.ValidatorUpdates { + if bytes.Equal(vu.ProTxHash, proTxHash) { + return &vu, nil + } + } + return nil, err +} + +func isValidatorSetUpdateTx(tx []byte) bool { + return strings.HasPrefix(string(tx), ValidatorSetUpdatePrefix) +} + +func encodeMsg(data proto.Message) ([]byte, error) { + buf := bytes.NewBufferString("") + w := protoio.NewDelimitedWriter(buf) + _, err := w.WriteMsg(data) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// ----------------------------- +// prepare proposal machinery + +const PreparePrefix = "prepare" + +func isPrepareTx(tx []byte) bool { + return bytes.HasPrefix(tx, []byte(PreparePrefix)) +} + +// execPrepareTx is noop. tx data is considered as placeholder +// and is substitute at the PrepareProposal. +func (app *Application) execPrepareTx(tx []byte) *types.ExecTxResult { + // noop + return &types.ExecTxResult{} +} + +// substPrepareTx substitutes all the transactions prefixed with 'prepare' in the +// proposal for transactions with the prefix stripped. +// It marks all of the original transactions as 'REMOVED' so that +// Tendermint will remove them from its mempool. +func (app *Application) substPrepareTx(blockData [][]byte, maxTxBytes int64) []*types.TxRecord { + trs := make([]*types.TxRecord, 0, len(blockData)) + var removed []*types.TxRecord + var totalBytes int64 + for _, tx := range blockData { + txMod := tx + action := types.TxRecord_UNMODIFIED + if isPrepareTx(tx) { + removed = append(removed, &types.TxRecord{ + Tx: tx, + Action: types.TxRecord_REMOVED, + }) + txMod = bytes.TrimPrefix(tx, []byte(PreparePrefix)) + action = types.TxRecord_ADDED + } + totalBytes += int64(len(txMod)) + if totalBytes > maxTxBytes { + break + } + trs = append(trs, &types.TxRecord{ + Tx: txMod, + Action: action, + }) + } + + return append(trs, removed...) +} + +func (app *Application) setValSetUpdate(valSetUpdate *types.ValidatorSetUpdate) error { + err := app.valUpdatesRepo.set(valSetUpdate) + if err != nil { + return err + } + app.valsIndex = make(map[string]*types.ValidatorUpdate) + for i, v := range valSetUpdate.ValidatorUpdates { + app.valsIndex[proTxHashString(v.ProTxHash)] = &valSetUpdate.ValidatorUpdates[i] + } + return nil +} + +func proTxHashString(proTxHash crypto.ProTxHash) string { + return proTxHash.String() } diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index a06c8a7912..922e3b25d4 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -4,10 +4,10 @@ import ( "bytes" "context" "fmt" - "io/ioutil" "sort" "testing" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/require" abciclient "github.com/tendermint/tendermint/abci/client" @@ -16,7 +16,6 @@ import ( "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) const ( @@ -24,38 +23,44 @@ const ( testValue = "def" ) -var ctx = context.Background() - -func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) { - req := types.RequestDeliverTx{Tx: tx} - ar := app.DeliverTx(req) - require.False(t, ar.IsErr(), ar) +func testKVStore(ctx context.Context, t *testing.T, app types.Application, tx []byte, key, value string) { + req := &types.RequestFinalizeBlock{Txs: [][]byte{tx}} + ar, err := app.FinalizeBlock(ctx, req) + require.NoError(t, err) + require.Equal(t, 1, len(ar.TxResults)) + require.False(t, ar.TxResults[0].IsErr()) // repeating tx doesn't raise error - ar = app.DeliverTx(req) - require.False(t, ar.IsErr(), ar) + ar, err = app.FinalizeBlock(ctx, req) + require.NoError(t, err) + require.Equal(t, 1, len(ar.TxResults)) + require.False(t, ar.TxResults[0].IsErr()) // commit - app.Commit() + _, err = app.Commit(ctx) + require.NoError(t, err) - info := app.Info(types.RequestInfo{}) + info, err := app.Info(ctx, &types.RequestInfo{}) + require.NoError(t, err) require.NotZero(t, info.LastBlockHeight) // make sure query is fine - resQuery := app.Query(types.RequestQuery{ + resQuery, err := app.Query(ctx, &types.RequestQuery{ Path: "/store", Data: []byte(key), }) + require.NoError(t, err) require.Equal(t, code.CodeTypeOK, resQuery.Code) require.Equal(t, key, string(resQuery.Key)) require.Equal(t, value, string(resQuery.Value)) require.EqualValues(t, info.LastBlockHeight, resQuery.Height) // make sure proof is fine - resQuery = app.Query(types.RequestQuery{ + resQuery, err = app.Query(ctx, &types.RequestQuery{ Path: "/store", Data: []byte(key), Prove: true, }) + require.NoError(t, err) require.EqualValues(t, code.CodeTypeOK, resQuery.Code) require.Equal(t, key, string(resQuery.Key)) require.Equal(t, value, string(resQuery.Value)) @@ -63,43 +68,55 @@ func testKVStore(t *testing.T, app types.Application, tx []byte, key, value stri } func TestKVStoreKV(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + kvstore := NewApplication() key := testKey value := key tx := []byte(key) - testKVStore(t, kvstore, tx, key, value) + testKVStore(ctx, t, kvstore, tx, key, value) value = testValue tx = []byte(key + "=" + value) - testKVStore(t, kvstore, tx, key, value) + testKVStore(ctx, t, kvstore, tx, key, value) } func TestPersistentKVStoreKV(t *testing.T) { - dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO - if err != nil { - t.Fatal(err) - } - kvstore := NewPersistentKVStoreApplication(dir) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dir := t.TempDir() + logger := log.NewNopLogger() + + kvstore := NewPersistentKVStoreApplication(logger, dir) key := testKey value := key tx := []byte(key) - testKVStore(t, kvstore, tx, key, value) + testKVStore(ctx, t, kvstore, tx, key, value) value = testValue tx = []byte(key + "=" + value) - testKVStore(t, kvstore, tx, key, value) + testKVStore(ctx, t, kvstore, tx, key, value) } func TestPersistentKVStoreInfo(t *testing.T) { - dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO - if err != nil { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + dir := t.TempDir() + logger := log.NewNopLogger() + + kvstore := NewPersistentKVStoreApplication(logger, dir) + if err := InitKVStore(ctx, kvstore); err != nil { t.Fatal(err) } - kvstore := NewPersistentKVStoreApplication(dir) - InitKVStore(kvstore) height := int64(0) - resInfo := kvstore.Info(types.RequestInfo{}) + resInfo, err := kvstore.Info(ctx, &types.RequestInfo{}) + if err != nil { + t.Fatal(err) + } + if resInfo.LastBlockHeight != height { t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight) } @@ -107,14 +124,19 @@ func TestPersistentKVStoreInfo(t *testing.T) { // make and apply block height = int64(1) hash := []byte("foo") - header := tmproto.Header{ - Height: height, + if _, err := kvstore.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Hash: hash, Height: height}); err != nil { + t.Fatal(err) } - kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header}) - kvstore.EndBlock(types.RequestEndBlock{Height: header.Height}) - kvstore.Commit() - resInfo = kvstore.Info(types.RequestInfo{}) + if _, err := kvstore.Commit(ctx); err != nil { + t.Fatal(err) + + } + + resInfo, err = kvstore.Info(ctx, &types.RequestInfo{}) + if err != nil { + t.Fatal(err) + } if resInfo.LastBlockHeight != height { t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight) } @@ -123,11 +145,10 @@ func TestPersistentKVStoreInfo(t *testing.T) { // add a validator, remove a validator, update a validator func TestValUpdates(t *testing.T) { - dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO - if err != nil { - t.Fatal(err) - } - kvstore := NewPersistentKVStoreApplication(dir) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + kvstore := NewApplication() // init with some validators total := 10 @@ -136,26 +157,30 @@ func TestValUpdates(t *testing.T) { initVals := RandValidatorSetUpdate(nInit) // initialize with the first nInit - kvstore.InitChain(types.RequestInitChain{ + _, err := kvstore.InitChain(ctx, &types.RequestInitChain{ ValidatorSet: &initVals, }) + if err != nil { + t.Fatal(err) + } kvVals, err := kvstore.ValidatorSet() require.NoError(t, err) - valSetEqualTest(t, *kvVals, initVals) + valSetEqualTest(t, kvVals, &initVals) tx, err := MarshalValidatorSetUpdate(&fullVals) require.NoError(t, err) // change the validator set to the full validator set - makeApplyBlock(t, kvstore, 1, fullVals, tx) + makeApplyBlock(ctx, t, kvstore, 1, fullVals, tx) kvVals, err = kvstore.ValidatorSet() require.NoError(t, err) - valSetEqualTest(t, *kvVals, fullVals) + valSetEqualTest(t, kvVals, &fullVals) } func makeApplyBlock( + ctx context.Context, t *testing.T, kvstore types.Application, heightInt int, @@ -164,24 +189,23 @@ func makeApplyBlock( // make and apply block height := int64(heightInt) hash := []byte("foo") - header := tmproto.Header{ + resFinalizeBlock, err := kvstore.FinalizeBlock(ctx, &types.RequestFinalizeBlock{ + Hash: hash, Height: height, - } - - kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header}) - for i, tx := range txs { - r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}) - require.False(t, r.IsErr(), "i=%d, tx=%s, err=%s", i, tx, r.String()) - } - resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height}) - kvstore.Commit() + Txs: txs, + }) + require.NoError(t, err) - valSetEqualTest(t, diff, *resEndBlock.ValidatorSetUpdate) + _, err = kvstore.Commit(ctx) + require.NoError(t, err) + valSetEqualTest(t, &diff, resFinalizeBlock.ValidatorSetUpdate) } // order doesn't matter func valsEqualTest(t *testing.T, vals1, vals2 []types.ValidatorUpdate) { + t.Helper() + require.Equal(t, len(vals1), len(vals2), "vals dont match in len. got %d, expected %d", len(vals2), len(vals1)) sort.Sort(types.ValidatorUpdates(vals1)) sort.Sort(types.ValidatorUpdates(vals2)) @@ -189,153 +213,162 @@ func valsEqualTest(t *testing.T, vals1, vals2 []types.ValidatorUpdate) { v2 := vals2[i] if !v1.PubKey.Equal(v2.PubKey) || v1.Power != v2.Power { - t.Fatalf("vals dont match at index %d. got %X/%d , expected %X/%d", i, *v2.PubKey, v2.Power, *v1.PubKey, v1.Power) + t.Fatalf("vals dont match at index %d. got %X/%d , expected %X/%d", i, v2.PubKey, v2.Power, v1.PubKey, v1.Power) } } } -func valSetEqualTest(t *testing.T, vals1, vals2 types.ValidatorSetUpdate) { +func valSetEqualTest(t *testing.T, vals1, vals2 *types.ValidatorSetUpdate) { + t.Helper() + valsEqualTest(t, vals1.ValidatorUpdates, vals2.ValidatorUpdates) - if !vals1.ThresholdPublicKey.Equal(vals2.ThresholdPublicKey) { - t.Fatalf("val set threshold public key did not match. got %X, expected %X", - vals1.ThresholdPublicKey, vals2.ThresholdPublicKey) - } - if !bytes.Equal(vals1.QuorumHash, vals2.QuorumHash) { - t.Fatalf("val set quorum hash did not match. got %X, expected %X", - vals1.QuorumHash, vals2.QuorumHash) - } + require.True(t, + vals1.ThresholdPublicKey.Equal(vals2.ThresholdPublicKey), + "val set threshold public key did not match. got %X, expected %X", + vals1.ThresholdPublicKey, vals2.ThresholdPublicKey, + ) + require.True(t, + bytes.Equal(vals1.QuorumHash, vals2.QuorumHash), + "val set quorum hash did not match. got %X, expected %X", + vals1.QuorumHash, vals2.QuorumHash, + ) } -func makeSocketClientServer(app types.Application, name string) (abciclient.Client, service.Service, error) { +func makeSocketClientServer( + ctx context.Context, + t *testing.T, + logger log.Logger, + app types.Application, + name string, +) (abciclient.Client, service.Service, error) { + t.Helper() + + ctx, cancel := context.WithCancel(ctx) + t.Cleanup(cancel) + t.Cleanup(leaktest.Check(t)) + // Start the listener socket := fmt.Sprintf("unix://%s.sock", name) - logger := log.TestingLogger() - server := abciserver.NewSocketServer(socket, app) - server.SetLogger(logger.With("module", "abci-server")) - if err := server.Start(); err != nil { + server := abciserver.NewSocketServer(logger.With("module", "abci-server"), socket, app) + if err := server.Start(ctx); err != nil { + cancel() return nil, nil, err } // Connect to the socket - client := abciclient.NewSocketClient(socket, false) - client.SetLogger(logger.With("module", "abci-client")) - if err := client.Start(); err != nil { - if err = server.Stop(); err != nil { - return nil, nil, err - } + client := abciclient.NewSocketClient(logger.With("module", "abci-client"), socket, false) + if err := client.Start(ctx); err != nil { + cancel() return nil, nil, err } return client, server, nil } -func makeGRPCClientServer(app types.Application, name string) (abciclient.Client, service.Service, error) { +func makeGRPCClientServer( + ctx context.Context, + t *testing.T, + logger log.Logger, + app types.Application, + name string, +) (abciclient.Client, service.Service, error) { + ctx, cancel := context.WithCancel(ctx) + t.Cleanup(cancel) + t.Cleanup(leaktest.Check(t)) + // Start the listener socket := fmt.Sprintf("unix://%s.sock", name) - logger := log.TestingLogger() - gapp := types.NewGRPCApplication(app) - server := abciserver.NewGRPCServer(socket, gapp) - server.SetLogger(logger.With("module", "abci-server")) - if err := server.Start(); err != nil { + server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app) + + if err := server.Start(ctx); err != nil { + cancel() return nil, nil, err } - client := abciclient.NewGRPCClient(socket, true) - client.SetLogger(logger.With("module", "abci-client")) - if err := client.Start(); err != nil { - if err := server.Stop(); err != nil { - return nil, nil, err - } + client := abciclient.NewGRPCClient(logger.With("module", "abci-client"), socket, true) + + if err := client.Start(ctx); err != nil { + cancel() return nil, nil, err } return client, server, nil } func TestClientServer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewNopLogger() + // set up socket app kvstore := NewApplication() - client, server, err := makeSocketClientServer(kvstore, "kvstore-socket") + client, server, err := makeSocketClientServer(ctx, t, logger, kvstore, "kvstore-socket") require.NoError(t, err) - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := client.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); server.Wait() }) + t.Cleanup(func() { cancel(); client.Wait() }) - runClientTests(t, client) + runClientTests(ctx, t, client) // set up grpc app kvstore = NewApplication() - gclient, gserver, err := makeGRPCClientServer(kvstore, "/tmp/kvstore-grpc") + gclient, gserver, err := makeGRPCClientServer(ctx, t, logger, kvstore, "/tmp/kvstore-grpc") require.NoError(t, err) - t.Cleanup(func() { - if err := gserver.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := gclient.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); gserver.Wait() }) + t.Cleanup(func() { cancel(); gclient.Wait() }) - runClientTests(t, gclient) + runClientTests(ctx, t, gclient) } -func runClientTests(t *testing.T, client abciclient.Client) { +func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client) { // run some tests.... key := testKey value := key tx := []byte(key) - testClient(t, client, tx, key, value) + testClient(ctx, t, client, tx, key, value) value = testValue tx = []byte(key + "=" + value) - testClient(t, client, tx, key, value) + testClient(ctx, t, client, tx, key, value) } -func testClient(t *testing.T, app abciclient.Client, tx []byte, key, value string) { - ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx}) +func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) { + ar, err := app.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: [][]byte{tx}}) require.NoError(t, err) - require.False(t, ar.IsErr(), ar) - // repeating tx doesn't raise error - ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx}) + require.Equal(t, 1, len(ar.TxResults)) + require.False(t, ar.TxResults[0].IsErr()) + // repeating FinalizeBlock doesn't raise error + ar, err = app.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: [][]byte{tx}}) require.NoError(t, err) - require.False(t, ar.IsErr(), ar) + require.Equal(t, 1, len(ar.TxResults)) + require.False(t, ar.TxResults[0].IsErr()) // commit - _, err = app.CommitSync(ctx) + _, err = app.Commit(ctx) require.NoError(t, err) - info, err := app.InfoSync(ctx, types.RequestInfo{}) + info, err := app.Info(ctx, &types.RequestInfo{}) require.NoError(t, err) require.NotZero(t, info.LastBlockHeight) // make sure query is fine - resQuery, err := app.QuerySync(ctx, types.RequestQuery{ + resQuery, err := app.Query(ctx, &types.RequestQuery{ Path: "/store", Data: []byte(key), }) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, code.CodeTypeOK, resQuery.Code) require.Equal(t, key, string(resQuery.Key)) require.Equal(t, value, string(resQuery.Value)) require.EqualValues(t, info.LastBlockHeight, resQuery.Height) // make sure proof is fine - resQuery, err = app.QuerySync(ctx, types.RequestQuery{ + resQuery, err = app.Query(ctx, &types.RequestQuery{ Path: "/store", Data: []byte(key), Prove: true, }) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, code.CodeTypeOK, resQuery.Code) require.Equal(t, key, string(resQuery.Key)) require.Equal(t, value, string(resQuery.Value)) diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index 7e418beb47..f3d39d49a0 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -1,245 +1,42 @@ package kvstore import ( - "bytes" - "encoding/base64" - "strings" + "context" - "github.com/gogo/protobuf/proto" dbm "github.com/tendermint/tm-db" - "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/internal/libs/protoio" - "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" ) -const ValidatorSetUpdatePrefix string = "vsu:" - //----------------------------------------- var _ types.Application = (*PersistentKVStoreApplication)(nil) type PersistentKVStoreApplication struct { - mtx sync.Mutex - app *Application - logger log.Logger - - valUpdatesRepo *repository - ValidatorSetUpdates types.ValidatorSetUpdate + *Application } -func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication { - const name = "kvstore" - db, err := dbm.NewGoLevelDB(name, dbDir) +func NewPersistentKVStoreApplication(logger log.Logger, dbDir string) *PersistentKVStoreApplication { + db, err := dbm.NewGoLevelDB("kvstore", dbDir) if err != nil { panic(err) } - return &PersistentKVStoreApplication{ - app: &Application{state: loadState(db)}, - logger: log.NewNopLogger(), - - valUpdatesRepo: &repository{db: db}, - } -} - -func (app *PersistentKVStoreApplication) Close() error { - return app.app.state.db.Close() -} - -func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) { - app.logger = l -} - -func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.ResponseInfo { - res := app.app.Info(req) - res.LastBlockHeight = app.app.state.Height - res.LastBlockAppHash = app.app.state.AppHash - return res -} - -// DeliverTx will deliver a tx which is either "val:proTxHash!pubkey!power" or "key=value" or just arbitrary bytes -func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx { - app.mtx.Lock() - defer app.mtx.Unlock() - if isValidatorSetUpdateTx(req.Tx) { - err := app.execValidatorSetTx(req.Tx) - if err != nil { - return types.ResponseDeliverTx{ - Code: code.CodeTypeUnknownError, - Log: err.Error(), - } - } - return types.ResponseDeliverTx{Code: code.CodeTypeOK} - } - return app.app.DeliverTx(req) -} - -func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx { - return app.app.CheckTx(req) -} -// Commit makes a commit in application's state -func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit { - return app.app.Commit() -} - -// Query when path=/val and data={validator address}, returns the validator update (types.ValidatorUpdate) varint encoded. -// For any other path, returns an associated value or nil if missing. -func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { - switch reqQuery.Path { - case "/vsu": - vsu, err := app.valUpdatesRepo.get() - if err != nil { - return types.ResponseQuery{ - Code: code.CodeTypeUnknownError, - Log: err.Error(), - } - } - data, err := encodeMsg(vsu) - if err != nil { - return types.ResponseQuery{ - Code: code.CodeTypeEncodingError, - Log: err.Error(), - } - } - resQuery.Key = reqQuery.Data - resQuery.Value = data - return - case "/verify-chainlock": - resQuery.Code = 0 - return resQuery - default: - return app.app.Query(reqQuery) - } -} - -// InitChain saves the validators in the merkle tree -func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain { - err := app.valUpdatesRepo.set(req.ValidatorSet) - if err != nil { - app.logger.Error("error updating validators", "err", err) - return types.ResponseInitChain{} - } - return types.ResponseInitChain{} -} - -// BeginBlock tracks the block hash and header information -func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock { - app.mtx.Lock() - defer app.mtx.Unlock() - - // reset valset changes - app.ValidatorSetUpdates.ValidatorUpdates = make([]types.ValidatorUpdate, 0) - - return types.ResponseBeginBlock{} -} - -// EndBlock updates the validator set -func (app *PersistentKVStoreApplication) EndBlock(_ types.RequestEndBlock) types.ResponseEndBlock { - app.mtx.Lock() - defer app.mtx.Unlock() - c := proto.Clone(&app.ValidatorSetUpdates).(*types.ValidatorSetUpdate) - return types.ResponseEndBlock{ValidatorSetUpdate: c} -} - -func (app *PersistentKVStoreApplication) ListSnapshots( - req types.RequestListSnapshots) types.ResponseListSnapshots { - return types.ResponseListSnapshots{} -} - -func (app *PersistentKVStoreApplication) LoadSnapshotChunk( - req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk { - return types.ResponseLoadSnapshotChunk{} -} - -func (app *PersistentKVStoreApplication) OfferSnapshot( - req types.RequestOfferSnapshot) types.ResponseOfferSnapshot { - return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT} -} - -func (app *PersistentKVStoreApplication) ApplySnapshotChunk( - req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk { - return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT} -} - -//--------------------------------------------- -// update validators - -func (app *PersistentKVStoreApplication) ValidatorSet() (*types.ValidatorSetUpdate, error) { - return app.valUpdatesRepo.get() -} - -func (app *PersistentKVStoreApplication) execValidatorSetTx(tx []byte) error { - vsu, err := UnmarshalValidatorSetUpdate(tx) - if err != nil { - return err - } - err = app.valUpdatesRepo.set(vsu) - if err != nil { - return err - } - app.ValidatorSetUpdates = *vsu - return nil -} - -// MarshalValidatorSetUpdate encodes validator-set-update into protobuf, encode into base64 and add "vsu:" prefix -func MarshalValidatorSetUpdate(vsu *types.ValidatorSetUpdate) ([]byte, error) { - pbData, err := proto.Marshal(vsu) - if err != nil { - return nil, err - } - return []byte(ValidatorSetUpdatePrefix + base64.StdEncoding.EncodeToString(pbData)), nil -} - -// UnmarshalValidatorSetUpdate removes "vsu:" prefix and unmarshal a string into validator-set-update -func UnmarshalValidatorSetUpdate(data []byte) (*types.ValidatorSetUpdate, error) { - l := len(ValidatorSetUpdatePrefix) - data, err := base64.StdEncoding.DecodeString(string(data[l:])) - if err != nil { - return nil, err - } - vsu := new(types.ValidatorSetUpdate) - err = proto.Unmarshal(data, vsu) - return vsu, err -} - -type repository struct { - db dbm.DB -} - -func (r *repository) set(vsu *types.ValidatorSetUpdate) error { - data, err := proto.Marshal(vsu) - if err != nil { - return err - } - return r.db.Set([]byte(ValidatorSetUpdatePrefix), data) -} - -func (r *repository) get() (*types.ValidatorSetUpdate, error) { - data, err := r.db.Get([]byte(ValidatorSetUpdatePrefix)) - if err != nil { - return nil, err - } - vsu := new(types.ValidatorSetUpdate) - err = proto.Unmarshal(data, vsu) - if err != nil { - return nil, err + return &PersistentKVStoreApplication{ + Application: &Application{ + state: loadState(db), + logger: logger, + valsIndex: make(map[string]*types.ValidatorUpdate), + valUpdatesRepo: &repository{db}, + }, } - return vsu, nil } -func isValidatorSetUpdateTx(tx []byte) bool { - return strings.HasPrefix(string(tx), ValidatorSetUpdatePrefix) +func (app *PersistentKVStoreApplication) OfferSnapshot(_ context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + return &types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}, nil } -func encodeMsg(data proto.Message) ([]byte, error) { - buf := bytes.NewBufferString("") - w := protoio.NewDelimitedWriter(buf) - _, err := w.WriteMsg(data) - if err != nil { - return nil, err - } - return buf.Bytes(), nil +func (app *PersistentKVStoreApplication) ApplySnapshotChunk(_ context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + return &types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}, nil } diff --git a/abci/server/grpc_server.go b/abci/server/grpc_server.go index 503f0b64f1..0dfee8169d 100644 --- a/abci/server/grpc_server.go +++ b/abci/server/grpc_server.go @@ -1,61 +1,83 @@ package server import ( + "context" "net" "google.golang.org/grpc" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" ) type GRPCServer struct { service.BaseService + logger log.Logger - proto string - addr string - listener net.Listener - server *grpc.Server + proto string + addr string + server *grpc.Server - app types.ABCIApplicationServer + app types.Application } // NewGRPCServer returns a new gRPC ABCI server -func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) service.Service { +func NewGRPCServer(logger log.Logger, protoAddr string, app types.Application) service.Service { proto, addr := tmnet.ProtocolAndAddress(protoAddr) s := &GRPCServer{ - proto: proto, - addr: addr, - listener: nil, - app: app, + logger: logger, + proto: proto, + addr: addr, + app: app, } - s.BaseService = *service.NewBaseService(nil, "ABCIServer", s) + s.BaseService = *service.NewBaseService(logger, "ABCIServer", s) return s } // OnStart starts the gRPC service. -func (s *GRPCServer) OnStart() error { - +func (s *GRPCServer) OnStart(ctx context.Context) error { ln, err := net.Listen(s.proto, s.addr) if err != nil { return err } - s.listener = ln s.server = grpc.NewServer() - types.RegisterABCIApplicationServer(s.server, s.app) + types.RegisterABCIApplicationServer(s.server, &gRPCApplication{Application: s.app}) - s.Logger.Info("Listening", "proto", s.proto, "addr", s.addr) + s.logger.Info("Listening", "proto", s.proto, "addr", s.addr) go func() { - if err := s.server.Serve(s.listener); err != nil { - s.Logger.Error("Error serving gRPC server", "err", err) + go func() { + <-ctx.Done() + s.server.GracefulStop() + }() + + if err := s.server.Serve(ln); err != nil { + s.logger.Error("error serving gRPC server", "err", err) } }() return nil } // OnStop stops the gRPC server. -func (s *GRPCServer) OnStop() { - s.server.Stop() +func (s *GRPCServer) OnStop() { s.server.Stop() } + +//------------------------------------------------------- + +// gRPCApplication is a gRPC shim for Application +type gRPCApplication struct { + types.Application +} + +func (app *gRPCApplication) Echo(_ context.Context, req *types.RequestEcho) (*types.ResponseEcho, error) { + return &types.ResponseEcho{Message: req.Message}, nil +} + +func (app *gRPCApplication) Flush(_ context.Context, req *types.RequestFlush) (*types.ResponseFlush, error) { + return &types.ResponseFlush{}, nil +} + +func (app *gRPCApplication) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) { + return app.Application.Commit(ctx) } diff --git a/abci/server/server.go b/abci/server/server.go index 6dd13ad020..0e0173ca65 100644 --- a/abci/server/server.go +++ b/abci/server/server.go @@ -12,17 +12,18 @@ import ( "fmt" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) -func NewServer(protoAddr, transport string, app types.Application) (service.Service, error) { +func NewServer(logger log.Logger, protoAddr, transport string, app types.Application) (service.Service, error) { var s service.Service var err error switch transport { case "socket": - s = NewSocketServer(protoAddr, app) + s = NewSocketServer(logger, protoAddr, app) case "grpc": - s = NewGRPCServer(protoAddr, types.NewGRPCApplication(app)) + s = NewGRPCServer(logger, protoAddr, app) default: err = fmt.Errorf("unknown server type %s", transport) } diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index 85539645bf..570ecfb4e7 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -2,15 +2,16 @@ package server import ( "bufio" + "context" + "errors" "fmt" "io" "net" - "os" "runtime" + "sync" "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - tmlog "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" ) @@ -19,236 +20,298 @@ import ( type SocketServer struct { service.BaseService - isLoggerSet bool + logger log.Logger proto string addr string listener net.Listener - connsMtx tmsync.Mutex - conns map[int]net.Conn + connsMtx sync.Mutex + connsClose map[int]func() nextConnID int - appMtx tmsync.Mutex - app types.Application + app types.Application } -func NewSocketServer(protoAddr string, app types.Application) service.Service { +func NewSocketServer(logger log.Logger, protoAddr string, app types.Application) service.Service { proto, addr := tmnet.ProtocolAndAddress(protoAddr) s := &SocketServer{ - proto: proto, - addr: addr, - listener: nil, - app: app, - conns: make(map[int]net.Conn), + logger: logger, + proto: proto, + addr: addr, + listener: nil, + app: app, + connsClose: make(map[int]func()), } - s.BaseService = *service.NewBaseService(nil, "ABCIServer", s) + s.BaseService = *service.NewBaseService(logger, "ABCIServer", s) return s } -func (s *SocketServer) SetLogger(l tmlog.Logger) { - s.BaseService.SetLogger(l) - s.isLoggerSet = true -} - -func (s *SocketServer) OnStart() error { +func (s *SocketServer) OnStart(ctx context.Context) error { ln, err := net.Listen(s.proto, s.addr) if err != nil { return err } s.listener = ln - go s.acceptConnectionsRoutine() + go s.acceptConnectionsRoutine(ctx) return nil } func (s *SocketServer) OnStop() { if err := s.listener.Close(); err != nil { - s.Logger.Error("Error closing listener", "err", err) + s.logger.Error("error closing listener", "err", err) } s.connsMtx.Lock() defer s.connsMtx.Unlock() - for id, conn := range s.conns { - delete(s.conns, id) - if err := conn.Close(); err != nil { - s.Logger.Error("Error closing connection", "id", id, "conn", conn, "err", err) - } + + for _, closer := range s.connsClose { + closer() } } -func (s *SocketServer) addConn(conn net.Conn) int { +func (s *SocketServer) addConn(closer func()) int { s.connsMtx.Lock() defer s.connsMtx.Unlock() connID := s.nextConnID s.nextConnID++ - s.conns[connID] = conn - + s.connsClose[connID] = closer return connID } // deletes conn even if close errs -func (s *SocketServer) rmConn(connID int) error { +func (s *SocketServer) rmConn(connID int) { s.connsMtx.Lock() defer s.connsMtx.Unlock() - - conn, ok := s.conns[connID] - if !ok { - return fmt.Errorf("connection %d does not exist", connID) + if closer, ok := s.connsClose[connID]; ok { + closer() + delete(s.connsClose, connID) } - - delete(s.conns, connID) - return conn.Close() } -func (s *SocketServer) acceptConnectionsRoutine() { +func (s *SocketServer) acceptConnectionsRoutine(ctx context.Context) { for { + if ctx.Err() != nil { + return + } + // Accept a connection - s.Logger.Info("Waiting for new connection...") + s.logger.Info("Waiting for new connection...") conn, err := s.listener.Accept() if err != nil { if !s.IsRunning() { return // Ignore error from listener closing. } - s.Logger.Error("Failed to accept connection", "err", err) + s.logger.Error("Failed to accept connection", "err", err) continue } - s.Logger.Info("Accepted a new connection") + cctx, ccancel := context.WithCancel(ctx) + connID := s.addConn(ccancel) - connID := s.addConn(conn) + s.logger.Info("Accepted a new connection", "id", connID) - closeConn := make(chan error, 2) // Push to signal connection closed responses := make(chan *types.Response, 1000) // A channel to buffer responses + once := &sync.Once{} + closer := func(err error) { + ccancel() + once.Do(func() { + if cerr := conn.Close(); err != nil { + s.logger.Error("error closing connection", + "id", connID, + "close_err", cerr, + "err", err) + } + s.rmConn(connID) + + switch { + case errors.Is(err, context.Canceled): + s.logger.Error("Connection terminated", + "id", connID, + "err", err) + case errors.Is(err, context.DeadlineExceeded): + s.logger.Error("Connection encountered timeout", + "id", connID, + "err", err) + case errors.Is(err, io.EOF): + s.logger.Error("Connection was closed by client", + "id", connID) + case err != nil: + s.logger.Error("Connection error", + "id", connID, + "err", err) + default: + s.logger.Error("Connection was closed", + "id", connID) + } + }) + } + // Read requests from conn and deal with them - go s.handleRequests(closeConn, conn, responses) + go s.handleRequests(cctx, closer, conn, responses) // Pull responses from 'responses' and write them to conn. - go s.handleResponses(closeConn, conn, responses) - - // Wait until signal to close connection - go s.waitForClose(closeConn, connID) - } -} - -func (s *SocketServer) waitForClose(closeConn chan error, connID int) { - err := <-closeConn - switch { - case err == io.EOF: - s.Logger.Error("Connection was closed by client") - case err != nil: - s.Logger.Error("Connection error", "err", err) - default: - // never happens - s.Logger.Error("Connection was closed") - } - - // Close the connection - if err := s.rmConn(connID); err != nil { - s.Logger.Error("Error closing connection", "err", err) + go s.handleResponses(cctx, closer, conn, responses) } } // Read requests from conn and deal with them -func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, responses chan<- *types.Response) { - var count int +func (s *SocketServer) handleRequests(ctx context.Context, closer func(error), conn io.Reader, responses chan<- *types.Response) { var bufReader = bufio.NewReader(conn) defer func() { // make sure to recover from any app-related panics to allow proper socket cleanup - r := recover() - if r != nil { + if r := recover(); r != nil { const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] - err := fmt.Errorf("recovered from panic: %v\n%s", r, buf) - if !s.isLoggerSet { - fmt.Fprintln(os.Stderr, err) - } - closeConn <- err - s.appMtx.Unlock() + closer(fmt.Errorf("recovered from panic: %v\n%s", r, buf)) } }() for { + req := &types.Request{} + if err := types.ReadMessage(bufReader, req); err != nil { + closer(fmt.Errorf("error reading message: %w", err)) + return + } - var req = &types.Request{} - err := types.ReadMessage(bufReader, req) + resp, err := s.processRequest(ctx, req) if err != nil { - if err == io.EOF { - closeConn <- err - } else { - closeConn <- fmt.Errorf("error reading message: %w", err) - } + closer(err) return } - s.appMtx.Lock() - count++ - s.handleRequest(req, responses) - s.appMtx.Unlock() + + select { + case <-ctx.Done(): + closer(ctx.Err()) + return + case responses <- resp: + } } } -func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types.Response) { +func (s *SocketServer) processRequest(ctx context.Context, req *types.Request) (*types.Response, error) { switch r := req.Value.(type) { case *types.Request_Echo: - responses <- types.ToResponseEcho(r.Echo.Message) + return types.ToResponseEcho(r.Echo.Message), nil case *types.Request_Flush: - responses <- types.ToResponseFlush() + return types.ToResponseFlush(), nil case *types.Request_Info: - res := s.app.Info(*r.Info) - responses <- types.ToResponseInfo(res) - case *types.Request_DeliverTx: - res := s.app.DeliverTx(*r.DeliverTx) - responses <- types.ToResponseDeliverTx(res) + res, err := s.app.Info(ctx, r.Info) + if err != nil { + return nil, err + } + + return types.ToResponseInfo(res), nil case *types.Request_CheckTx: - res := s.app.CheckTx(*r.CheckTx) - responses <- types.ToResponseCheckTx(res) + res, err := s.app.CheckTx(ctx, r.CheckTx) + if err != nil { + return nil, err + } + return types.ToResponseCheckTx(res), nil case *types.Request_Commit: - res := s.app.Commit() - responses <- types.ToResponseCommit(res) + res, err := s.app.Commit(ctx) + if err != nil { + return nil, err + } + return types.ToResponseCommit(res), nil case *types.Request_Query: - res := s.app.Query(*r.Query) - responses <- types.ToResponseQuery(res) + res, err := s.app.Query(ctx, r.Query) + if err != nil { + return nil, err + } + return types.ToResponseQuery(res), nil case *types.Request_InitChain: - res := s.app.InitChain(*r.InitChain) - responses <- types.ToResponseInitChain(res) - case *types.Request_BeginBlock: - res := s.app.BeginBlock(*r.BeginBlock) - responses <- types.ToResponseBeginBlock(res) - case *types.Request_EndBlock: - res := s.app.EndBlock(*r.EndBlock) - responses <- types.ToResponseEndBlock(res) + res, err := s.app.InitChain(ctx, r.InitChain) + if err != nil { + return nil, err + } + return types.ToResponseInitChain(res), nil case *types.Request_ListSnapshots: - res := s.app.ListSnapshots(*r.ListSnapshots) - responses <- types.ToResponseListSnapshots(res) + res, err := s.app.ListSnapshots(ctx, r.ListSnapshots) + if err != nil { + return nil, err + } + return types.ToResponseListSnapshots(res), nil case *types.Request_OfferSnapshot: - res := s.app.OfferSnapshot(*r.OfferSnapshot) - responses <- types.ToResponseOfferSnapshot(res) + res, err := s.app.OfferSnapshot(ctx, r.OfferSnapshot) + if err != nil { + return nil, err + } + return types.ToResponseOfferSnapshot(res), nil + case *types.Request_PrepareProposal: + res, err := s.app.PrepareProposal(ctx, r.PrepareProposal) + if err != nil { + return nil, err + } + return types.ToResponsePrepareProposal(res), nil + case *types.Request_ProcessProposal: + res, err := s.app.ProcessProposal(ctx, r.ProcessProposal) + if err != nil { + return nil, err + } + return types.ToResponseProcessProposal(res), nil case *types.Request_LoadSnapshotChunk: - res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk) - responses <- types.ToResponseLoadSnapshotChunk(res) + res, err := s.app.LoadSnapshotChunk(ctx, r.LoadSnapshotChunk) + if err != nil { + return nil, err + } + return types.ToResponseLoadSnapshotChunk(res), nil case *types.Request_ApplySnapshotChunk: - res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk) - responses <- types.ToResponseApplySnapshotChunk(res) + res, err := s.app.ApplySnapshotChunk(ctx, r.ApplySnapshotChunk) + if err != nil { + return nil, err + } + return types.ToResponseApplySnapshotChunk(res), nil + case *types.Request_ExtendVote: + res, err := s.app.ExtendVote(ctx, r.ExtendVote) + if err != nil { + return nil, err + } + return types.ToResponseExtendVote(res), nil + case *types.Request_VerifyVoteExtension: + res, err := s.app.VerifyVoteExtension(ctx, r.VerifyVoteExtension) + if err != nil { + return nil, err + } + return types.ToResponseVerifyVoteExtension(res), nil + case *types.Request_FinalizeBlock: + res, err := s.app.FinalizeBlock(ctx, r.FinalizeBlock) + if err != nil { + return nil, err + } + return types.ToResponseFinalizeBlock(res), nil default: - responses <- types.ToResponseException("Unknown request") + return types.ToResponseException("Unknown request"), errors.New("unknown request type") } } // Pull responses from 'responses' and write them to conn. -func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *types.Response) { +func (s *SocketServer) handleResponses( + ctx context.Context, + closer func(error), + conn io.Writer, + responses <-chan *types.Response, +) { bw := bufio.NewWriter(conn) - for res := range responses { - if err := types.WriteMessage(res, bw); err != nil { - closeConn <- fmt.Errorf("error writing message: %w", err) - return - } - if err := bw.Flush(); err != nil { - closeConn <- fmt.Errorf("error flushing write buffer: %w", err) + for { + select { + case <-ctx.Done(): + closer(ctx.Err()) return + case res := <-responses: + if err := types.WriteMessage(res, bw); err != nil { + closer(fmt.Errorf("error writing message: %w", err)) + return + } + if err := bw.Flush(); err != nil { + closer(fmt.Errorf("error writing message: %w", err)) + return + } } } } diff --git a/abci/tests/client_server_test.go b/abci/tests/client_server_test.go index 62dc6e07e4..a97c0c7c4c 100644 --- a/abci/tests/client_server_test.go +++ b/abci/tests/client_server_test.go @@ -1,27 +1,40 @@ package tests import ( + "context" "testing" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" abciclientent "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" + "github.com/tendermint/tendermint/libs/log" ) func TestClientServerNoAddrPrefix(t *testing.T) { - addr := "localhost:26658" - transport := "socket" + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const ( + addr = "localhost:26658" + transport = "socket" + ) app := kvstore.NewApplication() + logger := log.NewTestingLogger(t) - server, err := abciserver.NewServer(addr, transport, app) + server, err := abciserver.NewServer(logger, addr, transport, app) assert.NoError(t, err, "expected no error on NewServer") - err = server.Start() + err = server.Start(ctx) assert.NoError(t, err, "expected no error on server.Start") + t.Cleanup(server.Wait) - client, err := abciclientent.NewClient(addr, transport, true) + client, err := abciclientent.NewClient(logger, addr, transport, true) assert.NoError(t, err, "expected no error on NewClient") - err = client.Start() + err = client.Start(ctx) assert.NoError(t, err, "expected no error on client.Start") + t.Cleanup(client.Wait) } diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 55af386861..ed20d3cb07 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -6,15 +6,13 @@ import ( "errors" "fmt" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/dash/llmq" ) -var ctx = context.Background() - -func InitChain(client abcicli.Client) error { +func InitChain(ctx context.Context, client abciclient.Client) error { const total = 10 ld, err := llmq.Generate(crypto.RandProTxHashes(total)) if err != nil { @@ -24,7 +22,7 @@ func InitChain(client abcicli.Client) error { if err != nil { return err } - _, err = client.InitChainSync(context.Background(), types.RequestInitChain{ + _, err = client.InitChain(ctx, &types.RequestInitChain{ ValidatorSet: validatorSet, }) if err != nil { @@ -35,8 +33,8 @@ func InitChain(client abcicli.Client) error { return nil } -func Commit(client abcicli.Client, hashExp []byte) error { - res, err := client.CommitSync(ctx) +func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error { + res, err := client.Commit(ctx) data := res.Data if err != nil { fmt.Println("Failed test: Commit") @@ -52,27 +50,29 @@ func Commit(client abcicli.Client, hashExp []byte) error { return nil } -func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { - res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes}) - code, data, log := res.Code, res.Data, res.Log - if code != codeExp { - fmt.Println("Failed test: DeliverTx") - fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n", - code, codeExp, log) - return errors.New("deliverTx error") - } - if !bytes.Equal(data, dataExp) { - fmt.Println("Failed test: DeliverTx") - fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n", - data, dataExp) - return errors.New("deliverTx error") +func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte) error { + res, _ := client.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: txBytes}) + for i, tx := range res.TxResults { + code, data, log := tx.Code, tx.Data, tx.Log + if code != codeExp[i] { + fmt.Println("Failed test: FinalizeBlock") + fmt.Printf("FinalizeBlock response code was unexpected. Got %v expected %v. Log: %v\n", + code, codeExp, log) + return errors.New("FinalizeBlock error") + } + if !bytes.Equal(data, dataExp) { + fmt.Println("Failed test: FinalizeBlock") + fmt.Printf("FinalizeBlock response data was unexpected. Got %X expected %X\n", + data, dataExp) + return errors.New("FinalizeBlock error") + } } - fmt.Println("Passed test: DeliverTx") + fmt.Println("Passed test: FinalizeBlock") return nil } -func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { - res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes}) +func CheckTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { + res, _ := client.CheckTx(ctx, &types.RequestCheckTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { fmt.Println("Failed test: CheckTx") diff --git a/abci/tests/test_cli/ex1.abci b/abci/tests/test_cli/ex1.abci index e909266ecf..09457189ed 100644 --- a/abci/tests/test_cli/ex1.abci +++ b/abci/tests/test_cli/ex1.abci @@ -1,10 +1,10 @@ echo hello info commit -deliver_tx "abc" +finalize_block "abc" info commit query "abc" -deliver_tx "def=xyz" +finalize_block "def=xyz" "ghi=123" commit query "def" diff --git a/abci/tests/test_cli/ex1.abci.out b/abci/tests/test_cli/ex1.abci.out index 735e4bea2e..01d0150f0f 100644 --- a/abci/tests/test_cli/ex1.abci.out +++ b/abci/tests/test_cli/ex1.abci.out @@ -3,24 +3,24 @@ -> data: hello -> data.hex: 0x68656C6C6F -> info +> info -> code: OK -> data: {"size":0} -> data.hex: 0x7B2273697A65223A307D -> commit +> commit -> code: OK -> data.hex: 0x0000000000000000000000000000000000000000000000000000000000000000 -> deliver_tx "abc" +> finalize_block "abc" -> code: OK -> info +> info -> code: OK -> data: {"size":1} -> data.hex: 0x7B2273697A65223A317D -> commit +> commit -> code: OK -> data.hex: 0x0200000000000000000000000000000000000000000000000000000000000000 @@ -33,12 +33,14 @@ -> value: abc -> value.hex: 616263 -> deliver_tx "def=xyz" +> finalize_block "def=xyz" "ghi=123" +-> code: OK +> finalize_block "def=xyz" "ghi=123" -> code: OK -> commit +> commit -> code: OK --> data.hex: 0x0400000000000000000000000000000000000000000000000000000000000000 +-> data.hex: 0x0600000000000000000000000000000000000000000000000000000000000000 > query "def" -> code: OK diff --git a/abci/tests/test_cli/ex2.abci b/abci/tests/test_cli/ex2.abci index 965ca842c7..90e99c2f90 100644 --- a/abci/tests/test_cli/ex2.abci +++ b/abci/tests/test_cli/ex2.abci @@ -1,7 +1,7 @@ check_tx 0x00 check_tx 0xff -deliver_tx 0x00 +finalize_block 0x00 check_tx 0x00 -deliver_tx 0x01 -deliver_tx 0x04 +finalize_block 0x01 +finalize_block 0x04 info diff --git a/abci/tests/test_cli/ex2.abci.out b/abci/tests/test_cli/ex2.abci.out index 7ef8abbc45..aab0b1966f 100644 --- a/abci/tests/test_cli/ex2.abci.out +++ b/abci/tests/test_cli/ex2.abci.out @@ -4,20 +4,20 @@ > check_tx 0xff -> code: OK -> deliver_tx 0x00 +> finalize_block 0x00 -> code: OK > check_tx 0x00 -> code: OK -> deliver_tx 0x01 +> finalize_block 0x01 -> code: OK -> deliver_tx 0x04 +> finalize_block 0x04 -> code: OK > info -> code: OK --> data: {"hashes":0,"txs":3} --> data.hex: 0x7B22686173686573223A302C22747873223A337D +-> data: {"size":3} +-> data.hex: 0x7B2273697A65223A337D diff --git a/abci/types/application.go b/abci/types/application.go index 2a3cabd8bb..e74b877438 100644 --- a/abci/types/application.go +++ b/abci/types/application.go @@ -1,33 +1,36 @@ package types -import ( - "context" -) +import "context" +//go:generate ../../scripts/mockery_generate.sh Application // Application is an interface that enables any finite, deterministic state machine // to be driven by a blockchain-based replication engine via the ABCI. -// All methods take a RequestXxx argument and return a ResponseXxx argument, -// except CheckTx/DeliverTx, which take `tx []byte`, and `Commit`, which takes nothing. type Application interface { // Info/Query Connection - Info(RequestInfo) ResponseInfo // Return application info - Query(RequestQuery) ResponseQuery // Query for state + Info(context.Context, *RequestInfo) (*ResponseInfo, error) // Return application info + Query(context.Context, *RequestQuery) (*ResponseQuery, error) // Query for state // Mempool Connection - CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool + CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) // Validate a tx for the mempool // Consensus Connection - InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore - BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block - DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing - EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set - Commit() ResponseCommit // Commit the state and return the application Merkle root hash + InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) // Initialize blockchain w validators/other info from TendermintCore + PrepareProposal(context.Context, *RequestPrepareProposal) (*ResponsePrepareProposal, error) + ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error) + // Commit the state and return the application Merkle root hash + Commit(context.Context) (*ResponseCommit, error) + // Create application specific vote extension + ExtendVote(context.Context, *RequestExtendVote) (*ResponseExtendVote, error) + // Verify application's vote extension data + VerifyVoteExtension(context.Context, *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) + // Deliver the decided block with its txs to the Application + FinalizeBlock(context.Context, *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) // State Sync Connection - ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots - OfferSnapshot(RequestOfferSnapshot) ResponseOfferSnapshot // Offer a snapshot to the application - LoadSnapshotChunk(RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk // Load a snapshot chunk - ApplySnapshotChunk(RequestApplySnapshotChunk) ResponseApplySnapshotChunk // Apply a shapshot chunk + ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) // List available snapshots + OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) // Offer a snapshot to the application + LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) // Load a snapshot chunk + ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) // Apply a shapshot chunk } //------------------------------------------------------- @@ -35,140 +38,84 @@ type Application interface { var _ Application = (*BaseApplication)(nil) -type BaseApplication struct { -} +type BaseApplication struct{} func NewBaseApplication() *BaseApplication { return &BaseApplication{} } -func (BaseApplication) Info(req RequestInfo) ResponseInfo { - return ResponseInfo{} -} - -func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx { - return ResponseDeliverTx{Code: CodeTypeOK} -} - -func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx { - return ResponseCheckTx{Code: CodeTypeOK} -} - -func (BaseApplication) Commit() ResponseCommit { - return ResponseCommit{} -} - -func (BaseApplication) Query(req RequestQuery) ResponseQuery { - return ResponseQuery{Code: CodeTypeOK} -} - -func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain { - return ResponseInitChain{} -} - -func (BaseApplication) BeginBlock(req RequestBeginBlock) ResponseBeginBlock { - return ResponseBeginBlock{} -} - -func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock { - return ResponseEndBlock{} -} - -func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots { - return ResponseListSnapshots{} -} - -func (BaseApplication) OfferSnapshot(req RequestOfferSnapshot) ResponseOfferSnapshot { - return ResponseOfferSnapshot{} -} - -func (BaseApplication) LoadSnapshotChunk(req RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk { - return ResponseLoadSnapshotChunk{} -} - -func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) ResponseApplySnapshotChunk { - return ResponseApplySnapshotChunk{} -} - -//------------------------------------------------------- - -// GRPCApplication is a GRPC wrapper for Application -type GRPCApplication struct { - app Application -} - -func NewGRPCApplication(app Application) *GRPCApplication { - return &GRPCApplication{app} -} - -func (app *GRPCApplication) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) { - return &ResponseEcho{Message: req.Message}, nil +func (BaseApplication) Info(_ context.Context, req *RequestInfo) (*ResponseInfo, error) { + return &ResponseInfo{}, nil } -func (app *GRPCApplication) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) { - return &ResponseFlush{}, nil +func (BaseApplication) CheckTx(_ context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) { + return &ResponseCheckTx{Code: CodeTypeOK}, nil } -func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*ResponseInfo, error) { - res := app.app.Info(*req) - return &res, nil +func (BaseApplication) Commit(_ context.Context) (*ResponseCommit, error) { + return &ResponseCommit{}, nil } -func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) { - res := app.app.DeliverTx(*req) - return &res, nil +func (BaseApplication) ExtendVote(_ context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) { + return &ResponseExtendVote{}, nil } -func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) { - res := app.app.CheckTx(*req) - return &res, nil +func (BaseApplication) VerifyVoteExtension(_ context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) { + return &ResponseVerifyVoteExtension{ + Status: ResponseVerifyVoteExtension_ACCEPT, + }, nil } -func (app *GRPCApplication) Query(ctx context.Context, req *RequestQuery) (*ResponseQuery, error) { - res := app.app.Query(*req) - return &res, nil +func (BaseApplication) Query(_ context.Context, req *RequestQuery) (*ResponseQuery, error) { + return &ResponseQuery{Code: CodeTypeOK}, nil } -func (app *GRPCApplication) Commit(ctx context.Context, req *RequestCommit) (*ResponseCommit, error) { - res := app.app.Commit() - return &res, nil +func (BaseApplication) InitChain(_ context.Context, req *RequestInitChain) (*ResponseInitChain, error) { + return &ResponseInitChain{}, nil } -func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) { - res := app.app.InitChain(*req) - return &res, nil +func (BaseApplication) ListSnapshots(_ context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) { + return &ResponseListSnapshots{}, nil } -func (app *GRPCApplication) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) { - res := app.app.BeginBlock(*req) - return &res, nil +func (BaseApplication) OfferSnapshot(_ context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { + return &ResponseOfferSnapshot{}, nil } -func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) { - res := app.app.EndBlock(*req) - return &res, nil +func (BaseApplication) LoadSnapshotChunk(_ context.Context, _ *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) { + return &ResponseLoadSnapshotChunk{}, nil } -func (app *GRPCApplication) ListSnapshots( - ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) { - res := app.app.ListSnapshots(*req) - return &res, nil +func (BaseApplication) ApplySnapshotChunk(_ context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { + return &ResponseApplySnapshotChunk{}, nil } -func (app *GRPCApplication) OfferSnapshot( - ctx context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { - res := app.app.OfferSnapshot(*req) - return &res, nil +func (BaseApplication) PrepareProposal(_ context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) { + trs := make([]*TxRecord, 0, len(req.Txs)) + var totalBytes int64 + for _, tx := range req.Txs { + totalBytes += int64(len(tx)) + if totalBytes > req.MaxTxBytes { + break + } + trs = append(trs, &TxRecord{ + Action: TxRecord_UNMODIFIED, + Tx: tx, + }) + } + return &ResponsePrepareProposal{TxRecords: trs}, nil } -func (app *GRPCApplication) LoadSnapshotChunk( - ctx context.Context, req *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) { - res := app.app.LoadSnapshotChunk(*req) - return &res, nil +func (BaseApplication) ProcessProposal(_ context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) { + return &ResponseProcessProposal{Status: ResponseProcessProposal_ACCEPT}, nil } -func (app *GRPCApplication) ApplySnapshotChunk( - ctx context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { - res := app.app.ApplySnapshotChunk(*req) - return &res, nil +func (BaseApplication) FinalizeBlock(_ context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) { + txs := make([]*ExecTxResult, len(req.Txs)) + for i := range req.Txs { + txs[i] = &ExecTxResult{Code: CodeTypeOK} + } + return &ResponseFinalizeBlock{ + TxResults: txs, + }, nil } diff --git a/abci/types/client.go b/abci/types/client.go deleted file mode 100644 index ab1254f4c2..0000000000 --- a/abci/types/client.go +++ /dev/null @@ -1 +0,0 @@ -package types diff --git a/abci/types/messages.go b/abci/types/messages.go index 74f3cc75c8..80ab195259 100644 --- a/abci/types/messages.go +++ b/abci/types/messages.go @@ -4,6 +4,7 @@ import ( "io" "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/internal/libs/protoio" ) @@ -38,75 +39,87 @@ func ToRequestFlush() *Request { } } -func ToRequestInfo(req RequestInfo) *Request { +func ToRequestInfo(req *RequestInfo) *Request { return &Request{ - Value: &Request_Info{&req}, + Value: &Request_Info{req}, } } -func ToRequestDeliverTx(req RequestDeliverTx) *Request { +func ToRequestCheckTx(req *RequestCheckTx) *Request { return &Request{ - Value: &Request_DeliverTx{&req}, + Value: &Request_CheckTx{req}, } } -func ToRequestCheckTx(req RequestCheckTx) *Request { +func ToRequestCommit() *Request { return &Request{ - Value: &Request_CheckTx{&req}, + Value: &Request_Commit{&RequestCommit{}}, } } -func ToRequestCommit() *Request { +func ToRequestQuery(req *RequestQuery) *Request { return &Request{ - Value: &Request_Commit{&RequestCommit{}}, + Value: &Request_Query{req}, + } +} + +func ToRequestInitChain(req *RequestInitChain) *Request { + return &Request{ + Value: &Request_InitChain{req}, + } +} + +func ToRequestListSnapshots(req *RequestListSnapshots) *Request { + return &Request{ + Value: &Request_ListSnapshots{req}, } } -func ToRequestQuery(req RequestQuery) *Request { +func ToRequestOfferSnapshot(req *RequestOfferSnapshot) *Request { return &Request{ - Value: &Request_Query{&req}, + Value: &Request_OfferSnapshot{req}, } } -func ToRequestInitChain(req RequestInitChain) *Request { +func ToRequestLoadSnapshotChunk(req *RequestLoadSnapshotChunk) *Request { return &Request{ - Value: &Request_InitChain{&req}, + Value: &Request_LoadSnapshotChunk{req}, } } -func ToRequestBeginBlock(req RequestBeginBlock) *Request { +func ToRequestApplySnapshotChunk(req *RequestApplySnapshotChunk) *Request { return &Request{ - Value: &Request_BeginBlock{&req}, + Value: &Request_ApplySnapshotChunk{req}, } } -func ToRequestEndBlock(req RequestEndBlock) *Request { +func ToRequestExtendVote(req *RequestExtendVote) *Request { return &Request{ - Value: &Request_EndBlock{&req}, + Value: &Request_ExtendVote{req}, } } -func ToRequestListSnapshots(req RequestListSnapshots) *Request { +func ToRequestVerifyVoteExtension(req *RequestVerifyVoteExtension) *Request { return &Request{ - Value: &Request_ListSnapshots{&req}, + Value: &Request_VerifyVoteExtension{req}, } } -func ToRequestOfferSnapshot(req RequestOfferSnapshot) *Request { +func ToRequestPrepareProposal(req *RequestPrepareProposal) *Request { return &Request{ - Value: &Request_OfferSnapshot{&req}, + Value: &Request_PrepareProposal{req}, } } -func ToRequestLoadSnapshotChunk(req RequestLoadSnapshotChunk) *Request { +func ToRequestProcessProposal(req *RequestProcessProposal) *Request { return &Request{ - Value: &Request_LoadSnapshotChunk{&req}, + Value: &Request_ProcessProposal{req}, } } -func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request { +func ToRequestFinalizeBlock(req *RequestFinalizeBlock) *Request { return &Request{ - Value: &Request_ApplySnapshotChunk{&req}, + Value: &Request_FinalizeBlock{req}, } } @@ -130,73 +143,86 @@ func ToResponseFlush() *Response { } } -func ToResponseInfo(res ResponseInfo) *Response { +func ToResponseInfo(res *ResponseInfo) *Response { + return &Response{ + Value: &Response_Info{res}, + } +} + +func ToResponseCheckTx(res *ResponseCheckTx) *Response { return &Response{ - Value: &Response_Info{&res}, + Value: &Response_CheckTx{res}, } } -func ToResponseDeliverTx(res ResponseDeliverTx) *Response { + +func ToResponseCommit(res *ResponseCommit) *Response { + return &Response{ + Value: &Response_Commit{res}, + } +} + +func ToResponseQuery(res *ResponseQuery) *Response { return &Response{ - Value: &Response_DeliverTx{&res}, + Value: &Response_Query{res}, } } -func ToResponseCheckTx(res ResponseCheckTx) *Response { +func ToResponseInitChain(res *ResponseInitChain) *Response { return &Response{ - Value: &Response_CheckTx{&res}, + Value: &Response_InitChain{res}, } } -func ToResponseCommit(res ResponseCommit) *Response { +func ToResponseListSnapshots(res *ResponseListSnapshots) *Response { return &Response{ - Value: &Response_Commit{&res}, + Value: &Response_ListSnapshots{res}, } } -func ToResponseQuery(res ResponseQuery) *Response { +func ToResponseOfferSnapshot(res *ResponseOfferSnapshot) *Response { return &Response{ - Value: &Response_Query{&res}, + Value: &Response_OfferSnapshot{res}, } } -func ToResponseInitChain(res ResponseInitChain) *Response { +func ToResponseLoadSnapshotChunk(res *ResponseLoadSnapshotChunk) *Response { return &Response{ - Value: &Response_InitChain{&res}, + Value: &Response_LoadSnapshotChunk{res}, } } -func ToResponseBeginBlock(res ResponseBeginBlock) *Response { +func ToResponseApplySnapshotChunk(res *ResponseApplySnapshotChunk) *Response { return &Response{ - Value: &Response_BeginBlock{&res}, + Value: &Response_ApplySnapshotChunk{res}, } } -func ToResponseEndBlock(res ResponseEndBlock) *Response { +func ToResponseExtendVote(res *ResponseExtendVote) *Response { return &Response{ - Value: &Response_EndBlock{&res}, + Value: &Response_ExtendVote{res}, } } -func ToResponseListSnapshots(res ResponseListSnapshots) *Response { +func ToResponseVerifyVoteExtension(res *ResponseVerifyVoteExtension) *Response { return &Response{ - Value: &Response_ListSnapshots{&res}, + Value: &Response_VerifyVoteExtension{res}, } } -func ToResponseOfferSnapshot(res ResponseOfferSnapshot) *Response { +func ToResponsePrepareProposal(res *ResponsePrepareProposal) *Response { return &Response{ - Value: &Response_OfferSnapshot{&res}, + Value: &Response_PrepareProposal{res}, } } -func ToResponseLoadSnapshotChunk(res ResponseLoadSnapshotChunk) *Response { +func ToResponseProcessProposal(res *ResponseProcessProposal) *Response { return &Response{ - Value: &Response_LoadSnapshotChunk{&res}, + Value: &Response_ProcessProposal{res}, } } -func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response { +func ToResponseFinalizeBlock(res *ResponseFinalizeBlock) *Response { return &Response{ - Value: &Response_ApplySnapshotChunk{&res}, + Value: &Response_FinalizeBlock{res}, } } diff --git a/abci/types/messages_test.go b/abci/types/messages_test.go index 491d10c7f8..4f17f9f83c 100644 --- a/abci/types/messages_test.go +++ b/abci/types/messages_test.go @@ -13,8 +13,8 @@ import ( ) func TestMarshalJSON(t *testing.T) { - b, err := json.Marshal(&ResponseDeliverTx{}) - assert.Nil(t, err) + b, err := json.Marshal(&ExecTxResult{Code: 1}) + assert.NoError(t, err) // include empty fields. assert.True(t, strings.Contains(string(b), "code")) r1 := ResponseCheckTx{ @@ -31,11 +31,11 @@ func TestMarshalJSON(t *testing.T) { }, } b, err = json.Marshal(&r1) - assert.Nil(t, err) + assert.NoError(t, err) var r2 ResponseCheckTx err = json.Unmarshal(b, &r2) - assert.Nil(t, err) + assert.NoError(t, err) assert.Equal(t, r1, r2) } @@ -49,11 +49,11 @@ func TestWriteReadMessageSimple(t *testing.T) { for _, c := range cases { buf := new(bytes.Buffer) err := WriteMessage(c, buf) - assert.Nil(t, err) + assert.NoError(t, err) msg := new(RequestEcho) err = ReadMessage(buf, msg) - assert.Nil(t, err) + assert.NoError(t, err) assert.True(t, proto.Equal(c, msg)) } @@ -71,11 +71,11 @@ func TestWriteReadMessage(t *testing.T) { for _, c := range cases { buf := new(bytes.Buffer) err := WriteMessage(c, buf) - assert.Nil(t, err) + assert.NoError(t, err) msg := new(tmproto.Header) err = ReadMessage(buf, msg) - assert.Nil(t, err) + assert.NoError(t, err) assert.True(t, proto.Equal(c, msg)) } @@ -103,11 +103,11 @@ func TestWriteReadMessage2(t *testing.T) { for _, c := range cases { buf := new(bytes.Buffer) err := WriteMessage(c, buf) - assert.Nil(t, err) + assert.NoError(t, err) msg := new(ResponseCheckTx) err = ReadMessage(buf, msg) - assert.Nil(t, err) + assert.NoError(t, err) assert.True(t, proto.Equal(c, msg)) } diff --git a/abci/types/mocks/application.go b/abci/types/mocks/application.go new file mode 100644 index 0000000000..2d35c481f0 --- /dev/null +++ b/abci/types/mocks/application.go @@ -0,0 +1,349 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + testing "testing" + + mock "github.com/stretchr/testify/mock" + + types "github.com/tendermint/tendermint/abci/types" +) + +// Application is an autogenerated mock type for the Application type +type Application struct { + mock.Mock +} + +// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1 +func (_m *Application) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseApplySnapshotChunk + if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *types.RequestApplySnapshotChunk) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CheckTx provides a mock function with given fields: _a0, _a1 +func (_m *Application) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseCheckTx + if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *types.ResponseCheckTx); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseCheckTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Commit provides a mock function with given fields: _a0 +func (_m *Application) Commit(_a0 context.Context) (*types.ResponseCommit, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseCommit + if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseCommit) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ExtendVote provides a mock function with given fields: _a0, _a1 +func (_m *Application) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseExtendVote + if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) *types.ResponseExtendVote); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseExtendVote) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *types.RequestExtendVote) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// FinalizeBlock provides a mock function with given fields: _a0, _a1 +func (_m *Application) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseFinalizeBlock + if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseFinalizeBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *types.RequestFinalizeBlock) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Info provides a mock function with given fields: _a0, _a1 +func (_m *Application) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseInfo + if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) *types.ResponseInfo); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInfo) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// InitChain provides a mock function with given fields: _a0, _a1 +func (_m *Application) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseInitChain + if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) *types.ResponseInitChain); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseInitChain) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInitChain) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ListSnapshots provides a mock function with given fields: _a0, _a1 +func (_m *Application) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseListSnapshots + if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) *types.ResponseListSnapshots); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseListSnapshots) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *types.RequestListSnapshots) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1 +func (_m *Application) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseLoadSnapshotChunk + if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *types.RequestLoadSnapshotChunk) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OfferSnapshot provides a mock function with given fields: _a0, _a1 +func (_m *Application) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseOfferSnapshot + if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseOfferSnapshot) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *types.RequestOfferSnapshot) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// PrepareProposal provides a mock function with given fields: _a0, _a1 +func (_m *Application) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponsePrepareProposal + if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponsePrepareProposal) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *types.RequestPrepareProposal) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ProcessProposal provides a mock function with given fields: _a0, _a1 +func (_m *Application) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseProcessProposal + if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) *types.ResponseProcessProposal); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseProcessProposal) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *types.RequestProcessProposal) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Query provides a mock function with given fields: _a0, _a1 +func (_m *Application) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseQuery + if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) *types.ResponseQuery); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseQuery) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *types.RequestQuery) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// VerifyVoteExtension provides a mock function with given fields: _a0, _a1 +func (_m *Application) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { + ret := _m.Called(_a0, _a1) + + var r0 *types.ResponseVerifyVoteExtension + if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *types.RequestVerifyVoteExtension) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewApplication creates a new instance of Application. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewApplication(t testing.TB) *Application { + mock := &Application{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/abci/types/result.go b/abci/types/types.go similarity index 59% rename from abci/types/result.go rename to abci/types/types.go index dba6bfd159..d13947d1a9 100644 --- a/abci/types/result.go +++ b/abci/types/types.go @@ -31,6 +31,16 @@ func (r ResponseDeliverTx) IsErr() bool { return r.Code != CodeTypeOK } +// IsOK returns true if Code is OK. +func (r ExecTxResult) IsOK() bool { + return r.Code == CodeTypeOK +} + +// IsErr returns true if Code is something other than OK. +func (r ExecTxResult) IsErr() bool { + return r.Code != CodeTypeOK +} + // IsOK returns true if Code is OK. func (r ResponseQuery) IsOK() bool { return r.Code == CodeTypeOK @@ -41,6 +51,29 @@ func (r ResponseQuery) IsErr() bool { return r.Code != CodeTypeOK } +func (r ResponseProcessProposal) IsAccepted() bool { + return r.Status == ResponseProcessProposal_ACCEPT +} + +func (r ResponseProcessProposal) IsStatusUnknown() bool { + return r.Status == ResponseProcessProposal_UNKNOWN +} + +// IsStatusUnknown returns true if Code is Unknown +func (r ResponseVerifyVoteExtension) IsStatusUnknown() bool { + return r.Status == ResponseVerifyVoteExtension_UNKNOWN +} + +// IsOK returns true if Code is OK +func (r ResponseVerifyVoteExtension) IsOK() bool { + return r.Status == ResponseVerifyVoteExtension_ACCEPT +} + +// IsErr returns true if Code is something other than OK. +func (r ResponseVerifyVoteExtension) IsErr() bool { + return r.Status != ResponseVerifyVoteExtension_ACCEPT +} + //--------------------------------------------------------------------------- // override JSON marshaling so we emit defaults (ie. disable omitempty) @@ -118,3 +151,44 @@ var _ jsonRoundTripper = (*ResponseDeliverTx)(nil) var _ jsonRoundTripper = (*ResponseCheckTx)(nil) var _ jsonRoundTripper = (*EventAttribute)(nil) + +// ----------------------------------------------- +// construct Result data + +func RespondVerifyVoteExtension(ok bool) ResponseVerifyVoteExtension { + status := ResponseVerifyVoteExtension_REJECT + if ok { + status = ResponseVerifyVoteExtension_ACCEPT + } + return ResponseVerifyVoteExtension{ + Status: status, + } +} + +// deterministicExecTxResult constructs a copy of response that omits +// non-deterministic fields. The input response is not modified. +func deterministicExecTxResult(response *ExecTxResult) *ExecTxResult { + return &ExecTxResult{ + Code: response.Code, + Data: response.Data, + GasWanted: response.GasWanted, + GasUsed: response.GasUsed, + } +} + +// MarshalTxResults encodes the the TxResults as a list of byte +// slices. It strips off the non-deterministic pieces of the TxResults +// so that the resulting data can be used for hash comparisons and used +// in Merkle proofs. +func MarshalTxResults(r []*ExecTxResult) ([][]byte, error) { + s := make([][]byte, len(r)) + for i, e := range r { + d := deterministicExecTxResult(e) + b, err := d.Marshal() + if err != nil { + return nil, err + } + s[i] = b + } + return s, nil +} diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 0a290664bc..0d6fc9cd68 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -58,31 +58,31 @@ func (CheckTxType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_252557cfdd89a31a, []int{0} } -type EvidenceType int32 +type MisbehaviorType int32 const ( - EvidenceType_UNKNOWN EvidenceType = 0 - EvidenceType_DUPLICATE_VOTE EvidenceType = 1 - EvidenceType_LIGHT_CLIENT_ATTACK EvidenceType = 2 + MisbehaviorType_UNKNOWN MisbehaviorType = 0 + MisbehaviorType_DUPLICATE_VOTE MisbehaviorType = 1 + MisbehaviorType_LIGHT_CLIENT_ATTACK MisbehaviorType = 2 ) -var EvidenceType_name = map[int32]string{ +var MisbehaviorType_name = map[int32]string{ 0: "UNKNOWN", 1: "DUPLICATE_VOTE", 2: "LIGHT_CLIENT_ATTACK", } -var EvidenceType_value = map[string]int32{ +var MisbehaviorType_value = map[string]int32{ "UNKNOWN": 0, "DUPLICATE_VOTE": 1, "LIGHT_CLIENT_ATTACK": 2, } -func (x EvidenceType) String() string { - return proto.EnumName(EvidenceType_name, int32(x)) +func (x MisbehaviorType) String() string { + return proto.EnumName(MisbehaviorType_name, int32(x)) } -func (EvidenceType) EnumDescriptor() ([]byte, []int) { +func (MisbehaviorType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_252557cfdd89a31a, []int{1} } @@ -120,7 +120,7 @@ func (x ResponseOfferSnapshot_Result) String() string { } func (ResponseOfferSnapshot_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{28, 0} + return fileDescriptor_252557cfdd89a31a, []int{33, 0} } type ResponseApplySnapshotChunk_Result int32 @@ -157,7 +157,95 @@ func (x ResponseApplySnapshotChunk_Result) String() string { } func (ResponseApplySnapshotChunk_Result) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{30, 0} + return fileDescriptor_252557cfdd89a31a, []int{35, 0} +} + +type ResponseProcessProposal_ProposalStatus int32 + +const ( + ResponseProcessProposal_UNKNOWN ResponseProcessProposal_ProposalStatus = 0 + ResponseProcessProposal_ACCEPT ResponseProcessProposal_ProposalStatus = 1 + ResponseProcessProposal_REJECT ResponseProcessProposal_ProposalStatus = 2 +) + +var ResponseProcessProposal_ProposalStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ACCEPT", + 2: "REJECT", +} + +var ResponseProcessProposal_ProposalStatus_value = map[string]int32{ + "UNKNOWN": 0, + "ACCEPT": 1, + "REJECT": 2, +} + +func (x ResponseProcessProposal_ProposalStatus) String() string { + return proto.EnumName(ResponseProcessProposal_ProposalStatus_name, int32(x)) +} + +func (ResponseProcessProposal_ProposalStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{37, 0} +} + +type ResponseVerifyVoteExtension_VerifyStatus int32 + +const ( + ResponseVerifyVoteExtension_UNKNOWN ResponseVerifyVoteExtension_VerifyStatus = 0 + ResponseVerifyVoteExtension_ACCEPT ResponseVerifyVoteExtension_VerifyStatus = 1 + ResponseVerifyVoteExtension_REJECT ResponseVerifyVoteExtension_VerifyStatus = 2 +) + +var ResponseVerifyVoteExtension_VerifyStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "ACCEPT", + 2: "REJECT", +} + +var ResponseVerifyVoteExtension_VerifyStatus_value = map[string]int32{ + "UNKNOWN": 0, + "ACCEPT": 1, + "REJECT": 2, +} + +func (x ResponseVerifyVoteExtension_VerifyStatus) String() string { + return proto.EnumName(ResponseVerifyVoteExtension_VerifyStatus_name, int32(x)) +} + +func (ResponseVerifyVoteExtension_VerifyStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{39, 0} +} + +// TxAction contains App-provided information on what to do with a transaction that is part of a raw proposal +type TxRecord_TxAction int32 + +const ( + TxRecord_UNKNOWN TxRecord_TxAction = 0 + TxRecord_UNMODIFIED TxRecord_TxAction = 1 + TxRecord_ADDED TxRecord_TxAction = 2 + TxRecord_REMOVED TxRecord_TxAction = 3 +) + +var TxRecord_TxAction_name = map[int32]string{ + 0: "UNKNOWN", + 1: "UNMODIFIED", + 2: "ADDED", + 3: "REMOVED", +} + +var TxRecord_TxAction_value = map[string]int32{ + "UNKNOWN": 0, + "UNMODIFIED": 1, + "ADDED": 2, + "REMOVED": 3, +} + +func (x TxRecord_TxAction) String() string { + return proto.EnumName(TxRecord_TxAction_name, int32(x)) +} + +func (TxRecord_TxAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{47, 0} } type Request struct { @@ -176,6 +264,11 @@ type Request struct { // *Request_OfferSnapshot // *Request_LoadSnapshotChunk // *Request_ApplySnapshotChunk + // *Request_PrepareProposal + // *Request_ProcessProposal + // *Request_ExtendVote + // *Request_VerifyVoteExtension + // *Request_FinalizeBlock Value isRequest_Value `protobuf_oneof:"value"` } @@ -260,21 +353,41 @@ type Request_LoadSnapshotChunk struct { type Request_ApplySnapshotChunk struct { ApplySnapshotChunk *RequestApplySnapshotChunk `protobuf:"bytes,14,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` } - -func (*Request_Echo) isRequest_Value() {} -func (*Request_Flush) isRequest_Value() {} -func (*Request_Info) isRequest_Value() {} -func (*Request_InitChain) isRequest_Value() {} -func (*Request_Query) isRequest_Value() {} -func (*Request_BeginBlock) isRequest_Value() {} -func (*Request_CheckTx) isRequest_Value() {} -func (*Request_DeliverTx) isRequest_Value() {} -func (*Request_EndBlock) isRequest_Value() {} -func (*Request_Commit) isRequest_Value() {} -func (*Request_ListSnapshots) isRequest_Value() {} -func (*Request_OfferSnapshot) isRequest_Value() {} -func (*Request_LoadSnapshotChunk) isRequest_Value() {} -func (*Request_ApplySnapshotChunk) isRequest_Value() {} +type Request_PrepareProposal struct { + PrepareProposal *RequestPrepareProposal `protobuf:"bytes,15,opt,name=prepare_proposal,json=prepareProposal,proto3,oneof" json:"prepare_proposal,omitempty"` +} +type Request_ProcessProposal struct { + ProcessProposal *RequestProcessProposal `protobuf:"bytes,16,opt,name=process_proposal,json=processProposal,proto3,oneof" json:"process_proposal,omitempty"` +} +type Request_ExtendVote struct { + ExtendVote *RequestExtendVote `protobuf:"bytes,17,opt,name=extend_vote,json=extendVote,proto3,oneof" json:"extend_vote,omitempty"` +} +type Request_VerifyVoteExtension struct { + VerifyVoteExtension *RequestVerifyVoteExtension `protobuf:"bytes,18,opt,name=verify_vote_extension,json=verifyVoteExtension,proto3,oneof" json:"verify_vote_extension,omitempty"` +} +type Request_FinalizeBlock struct { + FinalizeBlock *RequestFinalizeBlock `protobuf:"bytes,19,opt,name=finalize_block,json=finalizeBlock,proto3,oneof" json:"finalize_block,omitempty"` +} + +func (*Request_Echo) isRequest_Value() {} +func (*Request_Flush) isRequest_Value() {} +func (*Request_Info) isRequest_Value() {} +func (*Request_InitChain) isRequest_Value() {} +func (*Request_Query) isRequest_Value() {} +func (*Request_BeginBlock) isRequest_Value() {} +func (*Request_CheckTx) isRequest_Value() {} +func (*Request_DeliverTx) isRequest_Value() {} +func (*Request_EndBlock) isRequest_Value() {} +func (*Request_Commit) isRequest_Value() {} +func (*Request_ListSnapshots) isRequest_Value() {} +func (*Request_OfferSnapshot) isRequest_Value() {} +func (*Request_LoadSnapshotChunk) isRequest_Value() {} +func (*Request_ApplySnapshotChunk) isRequest_Value() {} +func (*Request_PrepareProposal) isRequest_Value() {} +func (*Request_ProcessProposal) isRequest_Value() {} +func (*Request_ExtendVote) isRequest_Value() {} +func (*Request_VerifyVoteExtension) isRequest_Value() {} +func (*Request_FinalizeBlock) isRequest_Value() {} func (m *Request) GetValue() isRequest_Value { if m != nil { @@ -318,6 +431,7 @@ func (m *Request) GetQuery() *RequestQuery { return nil } +// Deprecated: Do not use. func (m *Request) GetBeginBlock() *RequestBeginBlock { if x, ok := m.GetValue().(*Request_BeginBlock); ok { return x.BeginBlock @@ -332,6 +446,7 @@ func (m *Request) GetCheckTx() *RequestCheckTx { return nil } +// Deprecated: Do not use. func (m *Request) GetDeliverTx() *RequestDeliverTx { if x, ok := m.GetValue().(*Request_DeliverTx); ok { return x.DeliverTx @@ -339,6 +454,7 @@ func (m *Request) GetDeliverTx() *RequestDeliverTx { return nil } +// Deprecated: Do not use. func (m *Request) GetEndBlock() *RequestEndBlock { if x, ok := m.GetValue().(*Request_EndBlock); ok { return x.EndBlock @@ -381,6 +497,41 @@ func (m *Request) GetApplySnapshotChunk() *RequestApplySnapshotChunk { return nil } +func (m *Request) GetPrepareProposal() *RequestPrepareProposal { + if x, ok := m.GetValue().(*Request_PrepareProposal); ok { + return x.PrepareProposal + } + return nil +} + +func (m *Request) GetProcessProposal() *RequestProcessProposal { + if x, ok := m.GetValue().(*Request_ProcessProposal); ok { + return x.ProcessProposal + } + return nil +} + +func (m *Request) GetExtendVote() *RequestExtendVote { + if x, ok := m.GetValue().(*Request_ExtendVote); ok { + return x.ExtendVote + } + return nil +} + +func (m *Request) GetVerifyVoteExtension() *RequestVerifyVoteExtension { + if x, ok := m.GetValue().(*Request_VerifyVoteExtension); ok { + return x.VerifyVoteExtension + } + return nil +} + +func (m *Request) GetFinalizeBlock() *RequestFinalizeBlock { + if x, ok := m.GetValue().(*Request_FinalizeBlock); ok { + return x.FinalizeBlock + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Request) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -398,6 +549,11 @@ func (*Request) XXX_OneofWrappers() []interface{} { (*Request_OfferSnapshot)(nil), (*Request_LoadSnapshotChunk)(nil), (*Request_ApplySnapshotChunk)(nil), + (*Request_PrepareProposal)(nil), + (*Request_ProcessProposal)(nil), + (*Request_ExtendVote)(nil), + (*Request_VerifyVoteExtension)(nil), + (*Request_FinalizeBlock)(nil), } } @@ -710,10 +866,10 @@ func (m *RequestQuery) GetProve() bool { } type RequestBeginBlock struct { - Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` - Header types1.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` - LastCommitInfo LastCommitInfo `protobuf:"bytes,3,opt,name=last_commit_info,json=lastCommitInfo,proto3" json:"last_commit_info"` - ByzantineValidators []Evidence `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Header types1.Header `protobuf:"bytes,2,opt,name=header,proto3" json:"header"` + LastCommitInfo CommitInfo `protobuf:"bytes,3,opt,name=last_commit_info,json=lastCommitInfo,proto3" json:"last_commit_info"` + ByzantineValidators []Misbehavior `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` } func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } @@ -763,14 +919,14 @@ func (m *RequestBeginBlock) GetHeader() types1.Header { return types1.Header{} } -func (m *RequestBeginBlock) GetLastCommitInfo() LastCommitInfo { +func (m *RequestBeginBlock) GetLastCommitInfo() CommitInfo { if m != nil { return m.LastCommitInfo } - return LastCommitInfo{} + return CommitInfo{} } -func (m *RequestBeginBlock) GetByzantineValidators() []Evidence { +func (m *RequestBeginBlock) GetByzantineValidators() []Misbehavior { if m != nil { return m.ByzantineValidators } @@ -1165,38 +1321,32 @@ func (m *RequestApplySnapshotChunk) GetSender() string { return "" } -type Response struct { - // Types that are valid to be assigned to Value: - // *Response_Exception - // *Response_Echo - // *Response_Flush - // *Response_Info - // *Response_InitChain - // *Response_Query - // *Response_BeginBlock - // *Response_CheckTx - // *Response_DeliverTx - // *Response_EndBlock - // *Response_Commit - // *Response_ListSnapshots - // *Response_OfferSnapshot - // *Response_LoadSnapshotChunk - // *Response_ApplySnapshotChunk - Value isResponse_Value `protobuf_oneof:"value"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} -func (*Response) Descriptor() ([]byte, []int) { +type RequestPrepareProposal struct { + // the modified transactions cannot exceed this size. + MaxTxBytes int64 `protobuf:"varint,1,opt,name=max_tx_bytes,json=maxTxBytes,proto3" json:"max_tx_bytes,omitempty"` + // txs is an array of transactions that will be included in a block, + // sent to the app for possible modifications. + Txs [][]byte `protobuf:"bytes,2,rep,name=txs,proto3" json:"txs,omitempty"` + LocalLastCommit ExtendedCommitInfo `protobuf:"bytes,3,opt,name=local_last_commit,json=localLastCommit,proto3" json:"local_last_commit"` + ByzantineValidators []Misbehavior `protobuf:"bytes,4,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` + Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` + NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + ProposerProTxHash []byte `protobuf:"bytes,8,opt,name=proposer_pro_tx_hash,json=proposerProTxHash,proto3" json:"proposer_pro_tx_hash,omitempty"` +} + +func (m *RequestPrepareProposal) Reset() { *m = RequestPrepareProposal{} } +func (m *RequestPrepareProposal) String() string { return proto.CompactTextString(m) } +func (*RequestPrepareProposal) ProtoMessage() {} +func (*RequestPrepareProposal) Descriptor() ([]byte, []int) { return fileDescriptor_252557cfdd89a31a, []int{15} } -func (m *Response) XXX_Unmarshal(b []byte) error { +func (m *RequestPrepareProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RequestPrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_Response.Marshal(b, m, deterministic) + return xxx_messageInfo_RequestPrepareProposal.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1206,236 +1356,193 @@ func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (m *Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_Response.Merge(m, src) +func (m *RequestPrepareProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestPrepareProposal.Merge(m, src) } -func (m *Response) XXX_Size() int { +func (m *RequestPrepareProposal) XXX_Size() int { return m.Size() } -func (m *Response) XXX_DiscardUnknown() { - xxx_messageInfo_Response.DiscardUnknown(m) +func (m *RequestPrepareProposal) XXX_DiscardUnknown() { + xxx_messageInfo_RequestPrepareProposal.DiscardUnknown(m) } -var xxx_messageInfo_Response proto.InternalMessageInfo - -type isResponse_Value interface { - isResponse_Value() - MarshalTo([]byte) (int, error) - Size() int -} +var xxx_messageInfo_RequestPrepareProposal proto.InternalMessageInfo -type Response_Exception struct { - Exception *ResponseException `protobuf:"bytes,1,opt,name=exception,proto3,oneof" json:"exception,omitempty"` -} -type Response_Echo struct { - Echo *ResponseEcho `protobuf:"bytes,2,opt,name=echo,proto3,oneof" json:"echo,omitempty"` -} -type Response_Flush struct { - Flush *ResponseFlush `protobuf:"bytes,3,opt,name=flush,proto3,oneof" json:"flush,omitempty"` -} -type Response_Info struct { - Info *ResponseInfo `protobuf:"bytes,4,opt,name=info,proto3,oneof" json:"info,omitempty"` -} -type Response_InitChain struct { - InitChain *ResponseInitChain `protobuf:"bytes,5,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` -} -type Response_Query struct { - Query *ResponseQuery `protobuf:"bytes,6,opt,name=query,proto3,oneof" json:"query,omitempty"` -} -type Response_BeginBlock struct { - BeginBlock *ResponseBeginBlock `protobuf:"bytes,7,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` -} -type Response_CheckTx struct { - CheckTx *ResponseCheckTx `protobuf:"bytes,8,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` -} -type Response_DeliverTx struct { - DeliverTx *ResponseDeliverTx `protobuf:"bytes,9,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` -} -type Response_EndBlock struct { - EndBlock *ResponseEndBlock `protobuf:"bytes,10,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` -} -type Response_Commit struct { - Commit *ResponseCommit `protobuf:"bytes,11,opt,name=commit,proto3,oneof" json:"commit,omitempty"` -} -type Response_ListSnapshots struct { - ListSnapshots *ResponseListSnapshots `protobuf:"bytes,12,opt,name=list_snapshots,json=listSnapshots,proto3,oneof" json:"list_snapshots,omitempty"` -} -type Response_OfferSnapshot struct { - OfferSnapshot *ResponseOfferSnapshot `protobuf:"bytes,13,opt,name=offer_snapshot,json=offerSnapshot,proto3,oneof" json:"offer_snapshot,omitempty"` -} -type Response_LoadSnapshotChunk struct { - LoadSnapshotChunk *ResponseLoadSnapshotChunk `protobuf:"bytes,14,opt,name=load_snapshot_chunk,json=loadSnapshotChunk,proto3,oneof" json:"load_snapshot_chunk,omitempty"` -} -type Response_ApplySnapshotChunk struct { - ApplySnapshotChunk *ResponseApplySnapshotChunk `protobuf:"bytes,15,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` +func (m *RequestPrepareProposal) GetMaxTxBytes() int64 { + if m != nil { + return m.MaxTxBytes + } + return 0 } -func (*Response_Exception) isResponse_Value() {} -func (*Response_Echo) isResponse_Value() {} -func (*Response_Flush) isResponse_Value() {} -func (*Response_Info) isResponse_Value() {} -func (*Response_InitChain) isResponse_Value() {} -func (*Response_Query) isResponse_Value() {} -func (*Response_BeginBlock) isResponse_Value() {} -func (*Response_CheckTx) isResponse_Value() {} -func (*Response_DeliverTx) isResponse_Value() {} -func (*Response_EndBlock) isResponse_Value() {} -func (*Response_Commit) isResponse_Value() {} -func (*Response_ListSnapshots) isResponse_Value() {} -func (*Response_OfferSnapshot) isResponse_Value() {} -func (*Response_LoadSnapshotChunk) isResponse_Value() {} -func (*Response_ApplySnapshotChunk) isResponse_Value() {} - -func (m *Response) GetValue() isResponse_Value { +func (m *RequestPrepareProposal) GetTxs() [][]byte { if m != nil { - return m.Value + return m.Txs } return nil } -func (m *Response) GetException() *ResponseException { - if x, ok := m.GetValue().(*Response_Exception); ok { - return x.Exception +func (m *RequestPrepareProposal) GetLocalLastCommit() ExtendedCommitInfo { + if m != nil { + return m.LocalLastCommit } - return nil + return ExtendedCommitInfo{} } -func (m *Response) GetEcho() *ResponseEcho { - if x, ok := m.GetValue().(*Response_Echo); ok { - return x.Echo +func (m *RequestPrepareProposal) GetByzantineValidators() []Misbehavior { + if m != nil { + return m.ByzantineValidators } return nil } -func (m *Response) GetFlush() *ResponseFlush { - if x, ok := m.GetValue().(*Response_Flush); ok { - return x.Flush +func (m *RequestPrepareProposal) GetHeight() int64 { + if m != nil { + return m.Height } - return nil + return 0 } -func (m *Response) GetInfo() *ResponseInfo { - if x, ok := m.GetValue().(*Response_Info); ok { - return x.Info +func (m *RequestPrepareProposal) GetTime() time.Time { + if m != nil { + return m.Time } - return nil + return time.Time{} } -func (m *Response) GetInitChain() *ResponseInitChain { - if x, ok := m.GetValue().(*Response_InitChain); ok { - return x.InitChain +func (m *RequestPrepareProposal) GetNextValidatorsHash() []byte { + if m != nil { + return m.NextValidatorsHash } return nil } -func (m *Response) GetQuery() *ResponseQuery { - if x, ok := m.GetValue().(*Response_Query); ok { - return x.Query +func (m *RequestPrepareProposal) GetProposerProTxHash() []byte { + if m != nil { + return m.ProposerProTxHash } return nil } -func (m *Response) GetBeginBlock() *ResponseBeginBlock { - if x, ok := m.GetValue().(*Response_BeginBlock); ok { - return x.BeginBlock +type RequestProcessProposal struct { + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + ProposedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=proposed_last_commit,json=proposedLastCommit,proto3" json:"proposed_last_commit"` + ByzantineValidators []Misbehavior `protobuf:"bytes,3,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` + // hash is the merkle root hash of the fields of the proposed block. + Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` + Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` + NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + ProposerProTxHash []byte `protobuf:"bytes,8,opt,name=proposer_pro_tx_hash,json=proposerProTxHash,proto3" json:"proposer_pro_tx_hash,omitempty"` +} + +func (m *RequestProcessProposal) Reset() { *m = RequestProcessProposal{} } +func (m *RequestProcessProposal) String() string { return proto.CompactTextString(m) } +func (*RequestProcessProposal) ProtoMessage() {} +func (*RequestProcessProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{16} +} +func (m *RequestProcessProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestProcessProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestProcessProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return nil +} +func (m *RequestProcessProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestProcessProposal.Merge(m, src) +} +func (m *RequestProcessProposal) XXX_Size() int { + return m.Size() +} +func (m *RequestProcessProposal) XXX_DiscardUnknown() { + xxx_messageInfo_RequestProcessProposal.DiscardUnknown(m) } -func (m *Response) GetCheckTx() *ResponseCheckTx { - if x, ok := m.GetValue().(*Response_CheckTx); ok { - return x.CheckTx +var xxx_messageInfo_RequestProcessProposal proto.InternalMessageInfo + +func (m *RequestProcessProposal) GetTxs() [][]byte { + if m != nil { + return m.Txs } return nil } -func (m *Response) GetDeliverTx() *ResponseDeliverTx { - if x, ok := m.GetValue().(*Response_DeliverTx); ok { - return x.DeliverTx +func (m *RequestProcessProposal) GetProposedLastCommit() CommitInfo { + if m != nil { + return m.ProposedLastCommit } - return nil + return CommitInfo{} } -func (m *Response) GetEndBlock() *ResponseEndBlock { - if x, ok := m.GetValue().(*Response_EndBlock); ok { - return x.EndBlock +func (m *RequestProcessProposal) GetByzantineValidators() []Misbehavior { + if m != nil { + return m.ByzantineValidators } return nil } -func (m *Response) GetCommit() *ResponseCommit { - if x, ok := m.GetValue().(*Response_Commit); ok { - return x.Commit +func (m *RequestProcessProposal) GetHash() []byte { + if m != nil { + return m.Hash } return nil } -func (m *Response) GetListSnapshots() *ResponseListSnapshots { - if x, ok := m.GetValue().(*Response_ListSnapshots); ok { - return x.ListSnapshots +func (m *RequestProcessProposal) GetHeight() int64 { + if m != nil { + return m.Height } - return nil + return 0 } -func (m *Response) GetOfferSnapshot() *ResponseOfferSnapshot { - if x, ok := m.GetValue().(*Response_OfferSnapshot); ok { - return x.OfferSnapshot +func (m *RequestProcessProposal) GetTime() time.Time { + if m != nil { + return m.Time } - return nil + return time.Time{} } -func (m *Response) GetLoadSnapshotChunk() *ResponseLoadSnapshotChunk { - if x, ok := m.GetValue().(*Response_LoadSnapshotChunk); ok { - return x.LoadSnapshotChunk +func (m *RequestProcessProposal) GetNextValidatorsHash() []byte { + if m != nil { + return m.NextValidatorsHash } return nil } -func (m *Response) GetApplySnapshotChunk() *ResponseApplySnapshotChunk { - if x, ok := m.GetValue().(*Response_ApplySnapshotChunk); ok { - return x.ApplySnapshotChunk +func (m *RequestProcessProposal) GetProposerProTxHash() []byte { + if m != nil { + return m.ProposerProTxHash } return nil } -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Response) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Response_Exception)(nil), - (*Response_Echo)(nil), - (*Response_Flush)(nil), - (*Response_Info)(nil), - (*Response_InitChain)(nil), - (*Response_Query)(nil), - (*Response_BeginBlock)(nil), - (*Response_CheckTx)(nil), - (*Response_DeliverTx)(nil), - (*Response_EndBlock)(nil), - (*Response_Commit)(nil), - (*Response_ListSnapshots)(nil), - (*Response_OfferSnapshot)(nil), - (*Response_LoadSnapshotChunk)(nil), - (*Response_ApplySnapshotChunk)(nil), - } -} - -// nondeterministic -type ResponseException struct { - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +// Extends a vote with application-side injection +type RequestExtendVote struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` } -func (m *ResponseException) Reset() { *m = ResponseException{} } -func (m *ResponseException) String() string { return proto.CompactTextString(m) } -func (*ResponseException) ProtoMessage() {} -func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{16} +func (m *RequestExtendVote) Reset() { *m = RequestExtendVote{} } +func (m *RequestExtendVote) String() string { return proto.CompactTextString(m) } +func (*RequestExtendVote) ProtoMessage() {} +func (*RequestExtendVote) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{17} } -func (m *ResponseException) XXX_Unmarshal(b []byte) error { +func (m *RequestExtendVote) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseException) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RequestExtendVote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseException.Marshal(b, m, deterministic) + return xxx_messageInfo_RequestExtendVote.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1445,126 +1552,52 @@ func (m *ResponseException) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return b[:n], nil } } -func (m *ResponseException) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseException.Merge(m, src) +func (m *RequestExtendVote) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestExtendVote.Merge(m, src) } -func (m *ResponseException) XXX_Size() int { +func (m *RequestExtendVote) XXX_Size() int { return m.Size() } -func (m *ResponseException) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseException.DiscardUnknown(m) +func (m *RequestExtendVote) XXX_DiscardUnknown() { + xxx_messageInfo_RequestExtendVote.DiscardUnknown(m) } -var xxx_messageInfo_ResponseException proto.InternalMessageInfo +var xxx_messageInfo_RequestExtendVote proto.InternalMessageInfo -func (m *ResponseException) GetError() string { +func (m *RequestExtendVote) GetHash() []byte { if m != nil { - return m.Error - } - return "" -} - -type ResponseEcho struct { - Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` -} - -func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } -func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } -func (*ResponseEcho) ProtoMessage() {} -func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{17} -} -func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseEcho) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseEcho.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil + return m.Hash } + return nil } -func (m *ResponseEcho) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseEcho.Merge(m, src) -} -func (m *ResponseEcho) XXX_Size() int { - return m.Size() -} -func (m *ResponseEcho) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseEcho.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseEcho proto.InternalMessageInfo -func (m *ResponseEcho) GetMessage() string { +func (m *RequestExtendVote) GetHeight() int64 { if m != nil { - return m.Message + return m.Height } - return "" + return 0 } -type ResponseFlush struct { +// Verify the vote extension +type RequestVerifyVoteExtension struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + ValidatorProTxHash []byte `protobuf:"bytes,2,opt,name=validator_pro_tx_hash,json=validatorProTxHash,proto3" json:"validator_pro_tx_hash,omitempty"` + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + VoteExtension []byte `protobuf:"bytes,4,opt,name=vote_extension,json=voteExtension,proto3" json:"vote_extension,omitempty"` } -func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } -func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } -func (*ResponseFlush) ProtoMessage() {} -func (*ResponseFlush) Descriptor() ([]byte, []int) { +func (m *RequestVerifyVoteExtension) Reset() { *m = RequestVerifyVoteExtension{} } +func (m *RequestVerifyVoteExtension) String() string { return proto.CompactTextString(m) } +func (*RequestVerifyVoteExtension) ProtoMessage() {} +func (*RequestVerifyVoteExtension) Descriptor() ([]byte, []int) { return fileDescriptor_252557cfdd89a31a, []int{18} } -func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseFlush) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseFlush.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseFlush) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseFlush.Merge(m, src) -} -func (m *ResponseFlush) XXX_Size() int { - return m.Size() -} -func (m *ResponseFlush) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseFlush.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseFlush proto.InternalMessageInfo - -type ResponseInfo struct { - Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - // this is the software version of the application. TODO: remove? - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - AppVersion uint64 `protobuf:"varint,3,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` - LastBlockHeight int64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` - LastBlockAppHash []byte `protobuf:"bytes,5,opt,name=last_block_app_hash,json=lastBlockAppHash,proto3" json:"last_block_app_hash,omitempty"` -} - -func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } -func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } -func (*ResponseInfo) ProtoMessage() {} -func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{19} -} -func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { +func (m *RequestVerifyVoteExtension) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RequestVerifyVoteExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseInfo.Marshal(b, m, deterministic) + return xxx_messageInfo_RequestVerifyVoteExtension.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1574,73 +1607,70 @@ func (m *ResponseInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *ResponseInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseInfo.Merge(m, src) +func (m *RequestVerifyVoteExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestVerifyVoteExtension.Merge(m, src) } -func (m *ResponseInfo) XXX_Size() int { +func (m *RequestVerifyVoteExtension) XXX_Size() int { return m.Size() } -func (m *ResponseInfo) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseInfo.DiscardUnknown(m) +func (m *RequestVerifyVoteExtension) XXX_DiscardUnknown() { + xxx_messageInfo_RequestVerifyVoteExtension.DiscardUnknown(m) } -var xxx_messageInfo_ResponseInfo proto.InternalMessageInfo - -func (m *ResponseInfo) GetData() string { - if m != nil { - return m.Data - } - return "" -} +var xxx_messageInfo_RequestVerifyVoteExtension proto.InternalMessageInfo -func (m *ResponseInfo) GetVersion() string { +func (m *RequestVerifyVoteExtension) GetHash() []byte { if m != nil { - return m.Version + return m.Hash } - return "" + return nil } -func (m *ResponseInfo) GetAppVersion() uint64 { +func (m *RequestVerifyVoteExtension) GetValidatorProTxHash() []byte { if m != nil { - return m.AppVersion + return m.ValidatorProTxHash } - return 0 + return nil } -func (m *ResponseInfo) GetLastBlockHeight() int64 { +func (m *RequestVerifyVoteExtension) GetHeight() int64 { if m != nil { - return m.LastBlockHeight + return m.Height } return 0 } -func (m *ResponseInfo) GetLastBlockAppHash() []byte { +func (m *RequestVerifyVoteExtension) GetVoteExtension() []byte { if m != nil { - return m.LastBlockAppHash + return m.VoteExtension } return nil } -type ResponseInitChain struct { - ConsensusParams *types1.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` - AppHash []byte `protobuf:"bytes,3,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` - ValidatorSetUpdate ValidatorSetUpdate `protobuf:"bytes,100,opt,name=validator_set_update,json=validatorSetUpdate,proto3" json:"validator_set_update"` - NextCoreChainLockUpdate *types1.CoreChainLock `protobuf:"bytes,101,opt,name=next_core_chain_lock_update,json=nextCoreChainLockUpdate,proto3" json:"next_core_chain_lock_update,omitempty"` - InitialCoreHeight uint32 `protobuf:"varint,102,opt,name=initial_core_height,json=initialCoreHeight,proto3" json:"initial_core_height,omitempty"` +type RequestFinalizeBlock struct { + Txs [][]byte `protobuf:"bytes,1,rep,name=txs,proto3" json:"txs,omitempty"` + DecidedLastCommit CommitInfo `protobuf:"bytes,2,opt,name=decided_last_commit,json=decidedLastCommit,proto3" json:"decided_last_commit"` + ByzantineValidators []Misbehavior `protobuf:"bytes,3,rep,name=byzantine_validators,json=byzantineValidators,proto3" json:"byzantine_validators"` + // hash is the merkle root hash of the fields of the proposed block. + Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` + Height int64 `protobuf:"varint,5,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,6,opt,name=time,proto3,stdtime" json:"time"` + NextValidatorsHash []byte `protobuf:"bytes,7,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + ProposerProTxHash []byte `protobuf:"bytes,8,opt,name=proposer_pro_tx_hash,json=proposerProTxHash,proto3" json:"proposer_pro_tx_hash,omitempty"` } -func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } -func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } -func (*ResponseInitChain) ProtoMessage() {} -func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{20} +func (m *RequestFinalizeBlock) Reset() { *m = RequestFinalizeBlock{} } +func (m *RequestFinalizeBlock) String() string { return proto.CompactTextString(m) } +func (*RequestFinalizeBlock) ProtoMessage() {} +func (*RequestFinalizeBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{19} } -func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { +func (m *RequestFinalizeBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseInitChain) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *RequestFinalizeBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseInitChain.Marshal(b, m, deterministic) + return xxx_messageInfo_RequestFinalizeBlock.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1650,78 +1680,111 @@ func (m *ResponseInitChain) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return b[:n], nil } } -func (m *ResponseInitChain) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseInitChain.Merge(m, src) +func (m *RequestFinalizeBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestFinalizeBlock.Merge(m, src) } -func (m *ResponseInitChain) XXX_Size() int { +func (m *RequestFinalizeBlock) XXX_Size() int { return m.Size() } -func (m *ResponseInitChain) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseInitChain.DiscardUnknown(m) +func (m *RequestFinalizeBlock) XXX_DiscardUnknown() { + xxx_messageInfo_RequestFinalizeBlock.DiscardUnknown(m) } -var xxx_messageInfo_ResponseInitChain proto.InternalMessageInfo +var xxx_messageInfo_RequestFinalizeBlock proto.InternalMessageInfo -func (m *ResponseInitChain) GetConsensusParams() *types1.ConsensusParams { +func (m *RequestFinalizeBlock) GetTxs() [][]byte { if m != nil { - return m.ConsensusParams + return m.Txs } return nil } -func (m *ResponseInitChain) GetAppHash() []byte { +func (m *RequestFinalizeBlock) GetDecidedLastCommit() CommitInfo { if m != nil { - return m.AppHash + return m.DecidedLastCommit } - return nil + return CommitInfo{} } -func (m *ResponseInitChain) GetValidatorSetUpdate() ValidatorSetUpdate { +func (m *RequestFinalizeBlock) GetByzantineValidators() []Misbehavior { if m != nil { - return m.ValidatorSetUpdate + return m.ByzantineValidators } - return ValidatorSetUpdate{} + return nil } -func (m *ResponseInitChain) GetNextCoreChainLockUpdate() *types1.CoreChainLock { +func (m *RequestFinalizeBlock) GetHash() []byte { if m != nil { - return m.NextCoreChainLockUpdate + return m.Hash } return nil } -func (m *ResponseInitChain) GetInitialCoreHeight() uint32 { +func (m *RequestFinalizeBlock) GetHeight() int64 { if m != nil { - return m.InitialCoreHeight + return m.Height } return 0 } -type ResponseQuery struct { - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // bytes data = 2; // use "value" instead. - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - Index int64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` - Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` - ProofOps *crypto.ProofOps `protobuf:"bytes,8,opt,name=proof_ops,json=proofOps,proto3" json:"proof_ops,omitempty"` - Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` - Codespace string `protobuf:"bytes,10,opt,name=codespace,proto3" json:"codespace,omitempty"` +func (m *RequestFinalizeBlock) GetTime() time.Time { + if m != nil { + return m.Time + } + return time.Time{} } -func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } -func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } -func (*ResponseQuery) ProtoMessage() {} -func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{21} +func (m *RequestFinalizeBlock) GetNextValidatorsHash() []byte { + if m != nil { + return m.NextValidatorsHash + } + return nil } -func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { + +func (m *RequestFinalizeBlock) GetProposerProTxHash() []byte { + if m != nil { + return m.ProposerProTxHash + } + return nil +} + +type Response struct { + // Types that are valid to be assigned to Value: + // *Response_Exception + // *Response_Echo + // *Response_Flush + // *Response_Info + // *Response_InitChain + // *Response_Query + // *Response_BeginBlock + // *Response_CheckTx + // *Response_DeliverTx + // *Response_EndBlock + // *Response_Commit + // *Response_ListSnapshots + // *Response_OfferSnapshot + // *Response_LoadSnapshotChunk + // *Response_ApplySnapshotChunk + // *Response_PrepareProposal + // *Response_ProcessProposal + // *Response_ExtendVote + // *Response_VerifyVoteExtension + // *Response_FinalizeBlock + Value isResponse_Value `protobuf_oneof:"value"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{20} +} +func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseQuery.Marshal(b, m, deterministic) + return xxx_messageInfo_Response.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -1731,274 +1794,299 @@ func (m *ResponseQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error return b[:n], nil } } -func (m *ResponseQuery) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseQuery.Merge(m, src) +func (m *Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Response.Merge(m, src) } -func (m *ResponseQuery) XXX_Size() int { +func (m *Response) XXX_Size() int { return m.Size() } -func (m *ResponseQuery) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseQuery.DiscardUnknown(m) +func (m *Response) XXX_DiscardUnknown() { + xxx_messageInfo_Response.DiscardUnknown(m) } -var xxx_messageInfo_ResponseQuery proto.InternalMessageInfo +var xxx_messageInfo_Response proto.InternalMessageInfo -func (m *ResponseQuery) GetCode() uint32 { - if m != nil { - return m.Code - } - return 0 +type isResponse_Value interface { + isResponse_Value() + MarshalTo([]byte) (int, error) + Size() int } -func (m *ResponseQuery) GetLog() string { - if m != nil { - return m.Log - } - return "" +type Response_Exception struct { + Exception *ResponseException `protobuf:"bytes,1,opt,name=exception,proto3,oneof" json:"exception,omitempty"` +} +type Response_Echo struct { + Echo *ResponseEcho `protobuf:"bytes,2,opt,name=echo,proto3,oneof" json:"echo,omitempty"` +} +type Response_Flush struct { + Flush *ResponseFlush `protobuf:"bytes,3,opt,name=flush,proto3,oneof" json:"flush,omitempty"` +} +type Response_Info struct { + Info *ResponseInfo `protobuf:"bytes,4,opt,name=info,proto3,oneof" json:"info,omitempty"` +} +type Response_InitChain struct { + InitChain *ResponseInitChain `protobuf:"bytes,5,opt,name=init_chain,json=initChain,proto3,oneof" json:"init_chain,omitempty"` +} +type Response_Query struct { + Query *ResponseQuery `protobuf:"bytes,6,opt,name=query,proto3,oneof" json:"query,omitempty"` +} +type Response_BeginBlock struct { + BeginBlock *ResponseBeginBlock `protobuf:"bytes,7,opt,name=begin_block,json=beginBlock,proto3,oneof" json:"begin_block,omitempty"` +} +type Response_CheckTx struct { + CheckTx *ResponseCheckTx `protobuf:"bytes,8,opt,name=check_tx,json=checkTx,proto3,oneof" json:"check_tx,omitempty"` +} +type Response_DeliverTx struct { + DeliverTx *ResponseDeliverTx `protobuf:"bytes,9,opt,name=deliver_tx,json=deliverTx,proto3,oneof" json:"deliver_tx,omitempty"` +} +type Response_EndBlock struct { + EndBlock *ResponseEndBlock `protobuf:"bytes,10,opt,name=end_block,json=endBlock,proto3,oneof" json:"end_block,omitempty"` +} +type Response_Commit struct { + Commit *ResponseCommit `protobuf:"bytes,11,opt,name=commit,proto3,oneof" json:"commit,omitempty"` +} +type Response_ListSnapshots struct { + ListSnapshots *ResponseListSnapshots `protobuf:"bytes,12,opt,name=list_snapshots,json=listSnapshots,proto3,oneof" json:"list_snapshots,omitempty"` +} +type Response_OfferSnapshot struct { + OfferSnapshot *ResponseOfferSnapshot `protobuf:"bytes,13,opt,name=offer_snapshot,json=offerSnapshot,proto3,oneof" json:"offer_snapshot,omitempty"` +} +type Response_LoadSnapshotChunk struct { + LoadSnapshotChunk *ResponseLoadSnapshotChunk `protobuf:"bytes,14,opt,name=load_snapshot_chunk,json=loadSnapshotChunk,proto3,oneof" json:"load_snapshot_chunk,omitempty"` +} +type Response_ApplySnapshotChunk struct { + ApplySnapshotChunk *ResponseApplySnapshotChunk `protobuf:"bytes,15,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` } +type Response_PrepareProposal struct { + PrepareProposal *ResponsePrepareProposal `protobuf:"bytes,16,opt,name=prepare_proposal,json=prepareProposal,proto3,oneof" json:"prepare_proposal,omitempty"` +} +type Response_ProcessProposal struct { + ProcessProposal *ResponseProcessProposal `protobuf:"bytes,17,opt,name=process_proposal,json=processProposal,proto3,oneof" json:"process_proposal,omitempty"` +} +type Response_ExtendVote struct { + ExtendVote *ResponseExtendVote `protobuf:"bytes,18,opt,name=extend_vote,json=extendVote,proto3,oneof" json:"extend_vote,omitempty"` +} +type Response_VerifyVoteExtension struct { + VerifyVoteExtension *ResponseVerifyVoteExtension `protobuf:"bytes,19,opt,name=verify_vote_extension,json=verifyVoteExtension,proto3,oneof" json:"verify_vote_extension,omitempty"` +} +type Response_FinalizeBlock struct { + FinalizeBlock *ResponseFinalizeBlock `protobuf:"bytes,20,opt,name=finalize_block,json=finalizeBlock,proto3,oneof" json:"finalize_block,omitempty"` +} + +func (*Response_Exception) isResponse_Value() {} +func (*Response_Echo) isResponse_Value() {} +func (*Response_Flush) isResponse_Value() {} +func (*Response_Info) isResponse_Value() {} +func (*Response_InitChain) isResponse_Value() {} +func (*Response_Query) isResponse_Value() {} +func (*Response_BeginBlock) isResponse_Value() {} +func (*Response_CheckTx) isResponse_Value() {} +func (*Response_DeliverTx) isResponse_Value() {} +func (*Response_EndBlock) isResponse_Value() {} +func (*Response_Commit) isResponse_Value() {} +func (*Response_ListSnapshots) isResponse_Value() {} +func (*Response_OfferSnapshot) isResponse_Value() {} +func (*Response_LoadSnapshotChunk) isResponse_Value() {} +func (*Response_ApplySnapshotChunk) isResponse_Value() {} +func (*Response_PrepareProposal) isResponse_Value() {} +func (*Response_ProcessProposal) isResponse_Value() {} +func (*Response_ExtendVote) isResponse_Value() {} +func (*Response_VerifyVoteExtension) isResponse_Value() {} +func (*Response_FinalizeBlock) isResponse_Value() {} -func (m *ResponseQuery) GetInfo() string { +func (m *Response) GetValue() isResponse_Value { if m != nil { - return m.Info + return m.Value } - return "" + return nil } -func (m *ResponseQuery) GetIndex() int64 { - if m != nil { - return m.Index +func (m *Response) GetException() *ResponseException { + if x, ok := m.GetValue().(*Response_Exception); ok { + return x.Exception } - return 0 + return nil } -func (m *ResponseQuery) GetKey() []byte { - if m != nil { - return m.Key +func (m *Response) GetEcho() *ResponseEcho { + if x, ok := m.GetValue().(*Response_Echo); ok { + return x.Echo } return nil } -func (m *ResponseQuery) GetValue() []byte { - if m != nil { - return m.Value +func (m *Response) GetFlush() *ResponseFlush { + if x, ok := m.GetValue().(*Response_Flush); ok { + return x.Flush } return nil } -func (m *ResponseQuery) GetProofOps() *crypto.ProofOps { - if m != nil { - return m.ProofOps +func (m *Response) GetInfo() *ResponseInfo { + if x, ok := m.GetValue().(*Response_Info); ok { + return x.Info } return nil } -func (m *ResponseQuery) GetHeight() int64 { - if m != nil { - return m.Height +func (m *Response) GetInitChain() *ResponseInitChain { + if x, ok := m.GetValue().(*Response_InitChain); ok { + return x.InitChain } - return 0 + return nil } -func (m *ResponseQuery) GetCodespace() string { - if m != nil { - return m.Codespace +func (m *Response) GetQuery() *ResponseQuery { + if x, ok := m.GetValue().(*Response_Query); ok { + return x.Query } - return "" + return nil } -type ResponseBeginBlock struct { - Events []Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` +// Deprecated: Do not use. +func (m *Response) GetBeginBlock() *ResponseBeginBlock { + if x, ok := m.GetValue().(*Response_BeginBlock); ok { + return x.BeginBlock + } + return nil } -func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } -func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } -func (*ResponseBeginBlock) ProtoMessage() {} -func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{22} -} -func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseBeginBlock.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *Response) GetCheckTx() *ResponseCheckTx { + if x, ok := m.GetValue().(*Response_CheckTx); ok { + return x.CheckTx } + return nil } -func (m *ResponseBeginBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseBeginBlock.Merge(m, src) -} -func (m *ResponseBeginBlock) XXX_Size() int { - return m.Size() -} -func (m *ResponseBeginBlock) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseBeginBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseBeginBlock proto.InternalMessageInfo -func (m *ResponseBeginBlock) GetEvents() []Event { - if m != nil { - return m.Events +// Deprecated: Do not use. +func (m *Response) GetDeliverTx() *ResponseDeliverTx { + if x, ok := m.GetValue().(*Response_DeliverTx); ok { + return x.DeliverTx } return nil } -type ResponseCheckTx struct { - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` - GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` - Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` - Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` - Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` - Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` - // mempool_error is set by Tendermint. - // ABCI applictions creating a ResponseCheckTX should not set mempool_error. - MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` +// Deprecated: Do not use. +func (m *Response) GetEndBlock() *ResponseEndBlock { + if x, ok := m.GetValue().(*Response_EndBlock); ok { + return x.EndBlock + } + return nil } -func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } -func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } -func (*ResponseCheckTx) ProtoMessage() {} -func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{23} -} -func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseCheckTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseCheckTx.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *Response) GetCommit() *ResponseCommit { + if x, ok := m.GetValue().(*Response_Commit); ok { + return x.Commit } + return nil } -func (m *ResponseCheckTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseCheckTx.Merge(m, src) -} -func (m *ResponseCheckTx) XXX_Size() int { - return m.Size() -} -func (m *ResponseCheckTx) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseCheckTx.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseCheckTx proto.InternalMessageInfo -func (m *ResponseCheckTx) GetCode() uint32 { - if m != nil { - return m.Code +func (m *Response) GetListSnapshots() *ResponseListSnapshots { + if x, ok := m.GetValue().(*Response_ListSnapshots); ok { + return x.ListSnapshots } - return 0 + return nil } -func (m *ResponseCheckTx) GetData() []byte { - if m != nil { - return m.Data +func (m *Response) GetOfferSnapshot() *ResponseOfferSnapshot { + if x, ok := m.GetValue().(*Response_OfferSnapshot); ok { + return x.OfferSnapshot } return nil } -func (m *ResponseCheckTx) GetLog() string { - if m != nil { - return m.Log +func (m *Response) GetLoadSnapshotChunk() *ResponseLoadSnapshotChunk { + if x, ok := m.GetValue().(*Response_LoadSnapshotChunk); ok { + return x.LoadSnapshotChunk } - return "" + return nil } -func (m *ResponseCheckTx) GetInfo() string { - if m != nil { - return m.Info +func (m *Response) GetApplySnapshotChunk() *ResponseApplySnapshotChunk { + if x, ok := m.GetValue().(*Response_ApplySnapshotChunk); ok { + return x.ApplySnapshotChunk } - return "" + return nil } -func (m *ResponseCheckTx) GetGasWanted() int64 { - if m != nil { - return m.GasWanted +func (m *Response) GetPrepareProposal() *ResponsePrepareProposal { + if x, ok := m.GetValue().(*Response_PrepareProposal); ok { + return x.PrepareProposal } - return 0 + return nil } -func (m *ResponseCheckTx) GetGasUsed() int64 { - if m != nil { - return m.GasUsed +func (m *Response) GetProcessProposal() *ResponseProcessProposal { + if x, ok := m.GetValue().(*Response_ProcessProposal); ok { + return x.ProcessProposal } - return 0 + return nil } -func (m *ResponseCheckTx) GetEvents() []Event { - if m != nil { - return m.Events +func (m *Response) GetExtendVote() *ResponseExtendVote { + if x, ok := m.GetValue().(*Response_ExtendVote); ok { + return x.ExtendVote } return nil } -func (m *ResponseCheckTx) GetCodespace() string { - if m != nil { - return m.Codespace - } - return "" -} - -func (m *ResponseCheckTx) GetSender() string { - if m != nil { - return m.Sender +func (m *Response) GetVerifyVoteExtension() *ResponseVerifyVoteExtension { + if x, ok := m.GetValue().(*Response_VerifyVoteExtension); ok { + return x.VerifyVoteExtension } - return "" + return nil } -func (m *ResponseCheckTx) GetPriority() int64 { - if m != nil { - return m.Priority +func (m *Response) GetFinalizeBlock() *ResponseFinalizeBlock { + if x, ok := m.GetValue().(*Response_FinalizeBlock); ok { + return x.FinalizeBlock } - return 0 + return nil } -func (m *ResponseCheckTx) GetMempoolError() string { - if m != nil { - return m.MempoolError +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Response) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Response_Exception)(nil), + (*Response_Echo)(nil), + (*Response_Flush)(nil), + (*Response_Info)(nil), + (*Response_InitChain)(nil), + (*Response_Query)(nil), + (*Response_BeginBlock)(nil), + (*Response_CheckTx)(nil), + (*Response_DeliverTx)(nil), + (*Response_EndBlock)(nil), + (*Response_Commit)(nil), + (*Response_ListSnapshots)(nil), + (*Response_OfferSnapshot)(nil), + (*Response_LoadSnapshotChunk)(nil), + (*Response_ApplySnapshotChunk)(nil), + (*Response_PrepareProposal)(nil), + (*Response_ProcessProposal)(nil), + (*Response_ExtendVote)(nil), + (*Response_VerifyVoteExtension)(nil), + (*Response_FinalizeBlock)(nil), } - return "" } -type ResponseDeliverTx struct { - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` - GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` - Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` - Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` +// nondeterministic +type ResponseException struct { + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` } -func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } -func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } -func (*ResponseDeliverTx) ProtoMessage() {} -func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{24} +func (m *ResponseException) Reset() { *m = ResponseException{} } +func (m *ResponseException) String() string { return proto.CompactTextString(m) } +func (*ResponseException) ProtoMessage() {} +func (*ResponseException) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{21} } -func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { +func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseDeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseException) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseDeliverTx.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseException.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2008,93 +2096,84 @@ func (m *ResponseDeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return b[:n], nil } } -func (m *ResponseDeliverTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseDeliverTx.Merge(m, src) +func (m *ResponseException) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseException.Merge(m, src) } -func (m *ResponseDeliverTx) XXX_Size() int { +func (m *ResponseException) XXX_Size() int { return m.Size() } -func (m *ResponseDeliverTx) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseDeliverTx.DiscardUnknown(m) +func (m *ResponseException) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseException.DiscardUnknown(m) } -var xxx_messageInfo_ResponseDeliverTx proto.InternalMessageInfo +var xxx_messageInfo_ResponseException proto.InternalMessageInfo -func (m *ResponseDeliverTx) GetCode() uint32 { +func (m *ResponseException) GetError() string { if m != nil { - return m.Code + return m.Error } - return 0 + return "" } -func (m *ResponseDeliverTx) GetData() []byte { - if m != nil { - return m.Data - } - return nil +type ResponseEcho struct { + Message string `protobuf:"bytes,1,opt,name=message,proto3" json:"message,omitempty"` } -func (m *ResponseDeliverTx) GetLog() string { - if m != nil { - return m.Log - } - return "" +func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } +func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } +func (*ResponseEcho) ProtoMessage() {} +func (*ResponseEcho) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{22} } - -func (m *ResponseDeliverTx) GetInfo() string { - if m != nil { - return m.Info - } - return "" +func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (m *ResponseDeliverTx) GetGasWanted() int64 { - if m != nil { - return m.GasWanted +func (m *ResponseEcho) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseEcho.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return 0 } - -func (m *ResponseDeliverTx) GetGasUsed() int64 { - if m != nil { - return m.GasUsed - } - return 0 +func (m *ResponseEcho) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseEcho.Merge(m, src) } - -func (m *ResponseDeliverTx) GetEvents() []Event { - if m != nil { - return m.Events - } - return nil +func (m *ResponseEcho) XXX_Size() int { + return m.Size() +} +func (m *ResponseEcho) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseEcho.DiscardUnknown(m) } -func (m *ResponseDeliverTx) GetCodespace() string { +var xxx_messageInfo_ResponseEcho proto.InternalMessageInfo + +func (m *ResponseEcho) GetMessage() string { if m != nil { - return m.Codespace + return m.Message } return "" } -type ResponseEndBlock struct { - ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` - Events []Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` - NextCoreChainLockUpdate *types1.CoreChainLock `protobuf:"bytes,100,opt,name=next_core_chain_lock_update,json=nextCoreChainLockUpdate,proto3" json:"next_core_chain_lock_update,omitempty"` - ValidatorSetUpdate *ValidatorSetUpdate `protobuf:"bytes,101,opt,name=validator_set_update,json=validatorSetUpdate,proto3" json:"validator_set_update,omitempty"` +type ResponseFlush struct { } -func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } -func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } -func (*ResponseEndBlock) ProtoMessage() {} -func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{25} +func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } +func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } +func (*ResponseFlush) ProtoMessage() {} +func (*ResponseFlush) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{23} } -func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { +func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseFlush) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseEndBlock.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseFlush.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2104,64 +2183,39 @@ func (m *ResponseEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return b[:n], nil } } -func (m *ResponseEndBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseEndBlock.Merge(m, src) +func (m *ResponseFlush) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseFlush.Merge(m, src) } -func (m *ResponseEndBlock) XXX_Size() int { +func (m *ResponseFlush) XXX_Size() int { return m.Size() } -func (m *ResponseEndBlock) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseEndBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseEndBlock proto.InternalMessageInfo - -func (m *ResponseEndBlock) GetConsensusParamUpdates() *types1.ConsensusParams { - if m != nil { - return m.ConsensusParamUpdates - } - return nil -} - -func (m *ResponseEndBlock) GetEvents() []Event { - if m != nil { - return m.Events - } - return nil -} - -func (m *ResponseEndBlock) GetNextCoreChainLockUpdate() *types1.CoreChainLock { - if m != nil { - return m.NextCoreChainLockUpdate - } - return nil +func (m *ResponseFlush) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseFlush.DiscardUnknown(m) } -func (m *ResponseEndBlock) GetValidatorSetUpdate() *ValidatorSetUpdate { - if m != nil { - return m.ValidatorSetUpdate - } - return nil -} +var xxx_messageInfo_ResponseFlush proto.InternalMessageInfo -type ResponseCommit struct { - // reserve 1 - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` +type ResponseInfo struct { + Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // this is the software version of the application. TODO: remove? + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + AppVersion uint64 `protobuf:"varint,3,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` + LastBlockHeight int64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockAppHash []byte `protobuf:"bytes,5,opt,name=last_block_app_hash,json=lastBlockAppHash,proto3" json:"last_block_app_hash,omitempty"` } -func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } -func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } -func (*ResponseCommit) ProtoMessage() {} -func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{26} +func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } +func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } +func (*ResponseInfo) ProtoMessage() {} +func (*ResponseInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{24} } -func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { +func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseCommit.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseInfo.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2171,92 +2225,73 @@ func (m *ResponseCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return b[:n], nil } } -func (m *ResponseCommit) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseCommit.Merge(m, src) +func (m *ResponseInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseInfo.Merge(m, src) } -func (m *ResponseCommit) XXX_Size() int { +func (m *ResponseInfo) XXX_Size() int { return m.Size() } -func (m *ResponseCommit) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseCommit.DiscardUnknown(m) +func (m *ResponseInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseInfo.DiscardUnknown(m) } -var xxx_messageInfo_ResponseCommit proto.InternalMessageInfo +var xxx_messageInfo_ResponseInfo proto.InternalMessageInfo -func (m *ResponseCommit) GetData() []byte { +func (m *ResponseInfo) GetData() string { if m != nil { return m.Data } - return nil + return "" } -func (m *ResponseCommit) GetRetainHeight() int64 { +func (m *ResponseInfo) GetVersion() string { if m != nil { - return m.RetainHeight + return m.Version } - return 0 + return "" } -type ResponseListSnapshots struct { - Snapshots []*Snapshot `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"` +func (m *ResponseInfo) GetAppVersion() uint64 { + if m != nil { + return m.AppVersion + } + return 0 } -func (m *ResponseListSnapshots) Reset() { *m = ResponseListSnapshots{} } -func (m *ResponseListSnapshots) String() string { return proto.CompactTextString(m) } -func (*ResponseListSnapshots) ProtoMessage() {} -func (*ResponseListSnapshots) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{27} -} -func (m *ResponseListSnapshots) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseListSnapshots) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseListSnapshots.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *ResponseInfo) GetLastBlockHeight() int64 { + if m != nil { + return m.LastBlockHeight } + return 0 } -func (m *ResponseListSnapshots) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseListSnapshots.Merge(m, src) -} -func (m *ResponseListSnapshots) XXX_Size() int { - return m.Size() -} -func (m *ResponseListSnapshots) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseListSnapshots.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseListSnapshots proto.InternalMessageInfo -func (m *ResponseListSnapshots) GetSnapshots() []*Snapshot { +func (m *ResponseInfo) GetLastBlockAppHash() []byte { if m != nil { - return m.Snapshots + return m.LastBlockAppHash } return nil } -type ResponseOfferSnapshot struct { - Result ResponseOfferSnapshot_Result `protobuf:"varint,1,opt,name=result,proto3,enum=tendermint.abci.ResponseOfferSnapshot_Result" json:"result,omitempty"` +type ResponseInitChain struct { + ConsensusParams *types1.ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params,omitempty"` + AppHash []byte `protobuf:"bytes,3,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + ValidatorSetUpdate ValidatorSetUpdate `protobuf:"bytes,100,opt,name=validator_set_update,json=validatorSetUpdate,proto3" json:"validator_set_update"` + NextCoreChainLockUpdate *types1.CoreChainLock `protobuf:"bytes,101,opt,name=next_core_chain_lock_update,json=nextCoreChainLockUpdate,proto3" json:"next_core_chain_lock_update,omitempty"` + InitialCoreHeight uint32 `protobuf:"varint,102,opt,name=initial_core_height,json=initialCoreHeight,proto3" json:"initial_core_height,omitempty"` } -func (m *ResponseOfferSnapshot) Reset() { *m = ResponseOfferSnapshot{} } -func (m *ResponseOfferSnapshot) String() string { return proto.CompactTextString(m) } -func (*ResponseOfferSnapshot) ProtoMessage() {} -func (*ResponseOfferSnapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{28} +func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } +func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } +func (*ResponseInitChain) ProtoMessage() {} +func (*ResponseInitChain) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{25} } -func (m *ResponseOfferSnapshot) XXX_Unmarshal(b []byte) error { +func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseOfferSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseInitChain) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseOfferSnapshot.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseInitChain.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2266,87 +2301,78 @@ func (m *ResponseOfferSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byt return b[:n], nil } } -func (m *ResponseOfferSnapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseOfferSnapshot.Merge(m, src) +func (m *ResponseInitChain) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseInitChain.Merge(m, src) } -func (m *ResponseOfferSnapshot) XXX_Size() int { +func (m *ResponseInitChain) XXX_Size() int { return m.Size() } -func (m *ResponseOfferSnapshot) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseOfferSnapshot.DiscardUnknown(m) +func (m *ResponseInitChain) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseInitChain.DiscardUnknown(m) } -var xxx_messageInfo_ResponseOfferSnapshot proto.InternalMessageInfo +var xxx_messageInfo_ResponseInitChain proto.InternalMessageInfo -func (m *ResponseOfferSnapshot) GetResult() ResponseOfferSnapshot_Result { +func (m *ResponseInitChain) GetConsensusParams() *types1.ConsensusParams { if m != nil { - return m.Result + return m.ConsensusParams } - return ResponseOfferSnapshot_UNKNOWN + return nil } -type ResponseLoadSnapshotChunk struct { - Chunk []byte `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"` +func (m *ResponseInitChain) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil } -func (m *ResponseLoadSnapshotChunk) Reset() { *m = ResponseLoadSnapshotChunk{} } -func (m *ResponseLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } -func (*ResponseLoadSnapshotChunk) ProtoMessage() {} -func (*ResponseLoadSnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{29} -} -func (m *ResponseLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseLoadSnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseLoadSnapshotChunk.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *ResponseInitChain) GetValidatorSetUpdate() ValidatorSetUpdate { + if m != nil { + return m.ValidatorSetUpdate } + return ValidatorSetUpdate{} } -func (m *ResponseLoadSnapshotChunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseLoadSnapshotChunk.Merge(m, src) -} -func (m *ResponseLoadSnapshotChunk) XXX_Size() int { - return m.Size() -} -func (m *ResponseLoadSnapshotChunk) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseLoadSnapshotChunk.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseLoadSnapshotChunk proto.InternalMessageInfo -func (m *ResponseLoadSnapshotChunk) GetChunk() []byte { +func (m *ResponseInitChain) GetNextCoreChainLockUpdate() *types1.CoreChainLock { if m != nil { - return m.Chunk + return m.NextCoreChainLockUpdate } return nil } -type ResponseApplySnapshotChunk struct { - Result ResponseApplySnapshotChunk_Result `protobuf:"varint,1,opt,name=result,proto3,enum=tendermint.abci.ResponseApplySnapshotChunk_Result" json:"result,omitempty"` - RefetchChunks []uint32 `protobuf:"varint,2,rep,packed,name=refetch_chunks,json=refetchChunks,proto3" json:"refetch_chunks,omitempty"` - RejectSenders []string `protobuf:"bytes,3,rep,name=reject_senders,json=rejectSenders,proto3" json:"reject_senders,omitempty"` +func (m *ResponseInitChain) GetInitialCoreHeight() uint32 { + if m != nil { + return m.InitialCoreHeight + } + return 0 } -func (m *ResponseApplySnapshotChunk) Reset() { *m = ResponseApplySnapshotChunk{} } -func (m *ResponseApplySnapshotChunk) String() string { return proto.CompactTextString(m) } -func (*ResponseApplySnapshotChunk) ProtoMessage() {} -func (*ResponseApplySnapshotChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{30} +type ResponseQuery struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // bytes data = 2; // use "value" instead. + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + Index int64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` + Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` + ProofOps *crypto.ProofOps `protobuf:"bytes,8,opt,name=proof_ops,json=proofOps,proto3" json:"proof_ops,omitempty"` + Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` + Codespace string `protobuf:"bytes,10,opt,name=codespace,proto3" json:"codespace,omitempty"` } -func (m *ResponseApplySnapshotChunk) XXX_Unmarshal(b []byte) error { + +func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } +func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } +func (*ResponseQuery) ProtoMessage() {} +func (*ResponseQuery) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{26} +} +func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ResponseApplySnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ResponseApplySnapshotChunk.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseQuery.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2356,127 +2382,97 @@ func (m *ResponseApplySnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ( return b[:n], nil } } -func (m *ResponseApplySnapshotChunk) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseApplySnapshotChunk.Merge(m, src) +func (m *ResponseQuery) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseQuery.Merge(m, src) } -func (m *ResponseApplySnapshotChunk) XXX_Size() int { +func (m *ResponseQuery) XXX_Size() int { return m.Size() } -func (m *ResponseApplySnapshotChunk) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseApplySnapshotChunk.DiscardUnknown(m) +func (m *ResponseQuery) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseQuery.DiscardUnknown(m) } -var xxx_messageInfo_ResponseApplySnapshotChunk proto.InternalMessageInfo +var xxx_messageInfo_ResponseQuery proto.InternalMessageInfo -func (m *ResponseApplySnapshotChunk) GetResult() ResponseApplySnapshotChunk_Result { +func (m *ResponseQuery) GetCode() uint32 { if m != nil { - return m.Result + return m.Code } - return ResponseApplySnapshotChunk_UNKNOWN + return 0 } -func (m *ResponseApplySnapshotChunk) GetRefetchChunks() []uint32 { +func (m *ResponseQuery) GetLog() string { if m != nil { - return m.RefetchChunks + return m.Log } - return nil + return "" } -func (m *ResponseApplySnapshotChunk) GetRejectSenders() []string { +func (m *ResponseQuery) GetInfo() string { if m != nil { - return m.RejectSenders - } - return nil -} - -type LastCommitInfo struct { - Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` - QuorumHash []byte `protobuf:"bytes,3,opt,name=quorum_hash,json=quorumHash,proto3" json:"quorum_hash,omitempty"` - BlockSignature []byte `protobuf:"bytes,4,opt,name=block_signature,json=blockSignature,proto3" json:"block_signature,omitempty"` - StateSignature []byte `protobuf:"bytes,5,opt,name=state_signature,json=stateSignature,proto3" json:"state_signature,omitempty"` -} - -func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } -func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } -func (*LastCommitInfo) ProtoMessage() {} -func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{31} -} -func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LastCommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LastCommitInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil + return m.Info } + return "" } -func (m *LastCommitInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_LastCommitInfo.Merge(m, src) -} -func (m *LastCommitInfo) XXX_Size() int { - return m.Size() -} -func (m *LastCommitInfo) XXX_DiscardUnknown() { - xxx_messageInfo_LastCommitInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_LastCommitInfo proto.InternalMessageInfo -func (m *LastCommitInfo) GetRound() int32 { +func (m *ResponseQuery) GetIndex() int64 { if m != nil { - return m.Round + return m.Index } return 0 } -func (m *LastCommitInfo) GetQuorumHash() []byte { +func (m *ResponseQuery) GetKey() []byte { if m != nil { - return m.QuorumHash + return m.Key } return nil } -func (m *LastCommitInfo) GetBlockSignature() []byte { +func (m *ResponseQuery) GetValue() []byte { if m != nil { - return m.BlockSignature + return m.Value } return nil } -func (m *LastCommitInfo) GetStateSignature() []byte { +func (m *ResponseQuery) GetProofOps() *crypto.ProofOps { if m != nil { - return m.StateSignature + return m.ProofOps } return nil } -// Event allows application developers to attach additional information to -// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. -// Later, transactions may be queried using these events. -type Event struct { - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Attributes []EventAttribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` +func (m *ResponseQuery) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 } -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{32} +func (m *ResponseQuery) GetCodespace() string { + if m != nil { + return m.Codespace + } + return "" } -func (m *Event) XXX_Unmarshal(b []byte) error { + +type ResponseBeginBlock struct { + Events []Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` +} + +func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } +func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } +func (*ResponseBeginBlock) ProtoMessage() {} +func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{27} +} +func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseBeginBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_Event.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseBeginBlock.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2486,51 +2482,52 @@ func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (m *Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Event.Merge(m, src) +func (m *ResponseBeginBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseBeginBlock.Merge(m, src) } -func (m *Event) XXX_Size() int { +func (m *ResponseBeginBlock) XXX_Size() int { return m.Size() } -func (m *Event) XXX_DiscardUnknown() { - xxx_messageInfo_Event.DiscardUnknown(m) +func (m *ResponseBeginBlock) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseBeginBlock.DiscardUnknown(m) } -var xxx_messageInfo_Event proto.InternalMessageInfo - -func (m *Event) GetType() string { - if m != nil { - return m.Type - } - return "" -} +var xxx_messageInfo_ResponseBeginBlock proto.InternalMessageInfo -func (m *Event) GetAttributes() []EventAttribute { +func (m *ResponseBeginBlock) GetEvents() []Event { if m != nil { - return m.Attributes + return m.Events } return nil } -// EventAttribute is a single key-value pair, associated with an event. -type EventAttribute struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - Index bool `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` +type ResponseCheckTx struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` + Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"` + Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"` + // ABCI applications creating a ResponseCheckTX should not set mempool_error. + MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"` } -func (m *EventAttribute) Reset() { *m = EventAttribute{} } -func (m *EventAttribute) String() string { return proto.CompactTextString(m) } -func (*EventAttribute) ProtoMessage() {} -func (*EventAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{33} +func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } +func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } +func (*ResponseCheckTx) ProtoMessage() {} +func (*ResponseCheckTx) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{28} } -func (m *EventAttribute) XXX_Unmarshal(b []byte) error { +func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *EventAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseCheckTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_EventAttribute.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseCheckTx.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2540,130 +2537,118 @@ func (m *EventAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return b[:n], nil } } -func (m *EventAttribute) XXX_Merge(src proto.Message) { - xxx_messageInfo_EventAttribute.Merge(m, src) +func (m *ResponseCheckTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseCheckTx.Merge(m, src) } -func (m *EventAttribute) XXX_Size() int { +func (m *ResponseCheckTx) XXX_Size() int { return m.Size() } -func (m *EventAttribute) XXX_DiscardUnknown() { - xxx_messageInfo_EventAttribute.DiscardUnknown(m) +func (m *ResponseCheckTx) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseCheckTx.DiscardUnknown(m) } -var xxx_messageInfo_EventAttribute proto.InternalMessageInfo +var xxx_messageInfo_ResponseCheckTx proto.InternalMessageInfo -func (m *EventAttribute) GetKey() string { +func (m *ResponseCheckTx) GetCode() uint32 { if m != nil { - return m.Key + return m.Code } - return "" + return 0 } -func (m *EventAttribute) GetValue() string { +func (m *ResponseCheckTx) GetData() []byte { if m != nil { - return m.Value + return m.Data } - return "" + return nil } -func (m *EventAttribute) GetIndex() bool { +func (m *ResponseCheckTx) GetLog() string { if m != nil { - return m.Index + return m.Log } - return false + return "" } -// TxResult contains results of executing the transaction. -// -// One usage is indexing transaction results. -type TxResult struct { - Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` - Tx []byte `protobuf:"bytes,3,opt,name=tx,proto3" json:"tx,omitempty"` - Result ResponseDeliverTx `protobuf:"bytes,4,opt,name=result,proto3" json:"result"` +func (m *ResponseCheckTx) GetInfo() string { + if m != nil { + return m.Info + } + return "" } -func (m *TxResult) Reset() { *m = TxResult{} } -func (m *TxResult) String() string { return proto.CompactTextString(m) } -func (*TxResult) ProtoMessage() {} -func (*TxResult) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{34} -} -func (m *TxResult) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TxResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TxResult.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (m *ResponseCheckTx) GetGasWanted() int64 { + if m != nil { + return m.GasWanted } + return 0 } -func (m *TxResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_TxResult.Merge(m, src) -} -func (m *TxResult) XXX_Size() int { - return m.Size() -} -func (m *TxResult) XXX_DiscardUnknown() { - xxx_messageInfo_TxResult.DiscardUnknown(m) + +func (m *ResponseCheckTx) GetGasUsed() int64 { + if m != nil { + return m.GasUsed + } + return 0 } -var xxx_messageInfo_TxResult proto.InternalMessageInfo +func (m *ResponseCheckTx) GetEvents() []Event { + if m != nil { + return m.Events + } + return nil +} -func (m *TxResult) GetHeight() int64 { +func (m *ResponseCheckTx) GetCodespace() string { if m != nil { - return m.Height + return m.Codespace } - return 0 + return "" } -func (m *TxResult) GetIndex() uint32 { +func (m *ResponseCheckTx) GetSender() string { if m != nil { - return m.Index + return m.Sender } - return 0 + return "" } -func (m *TxResult) GetTx() []byte { +func (m *ResponseCheckTx) GetPriority() int64 { if m != nil { - return m.Tx + return m.Priority } - return nil + return 0 } -func (m *TxResult) GetResult() ResponseDeliverTx { +func (m *ResponseCheckTx) GetMempoolError() string { if m != nil { - return m.Result + return m.MempoolError } - return ResponseDeliverTx{} + return "" } -// Validator -type Validator struct { - // bytes address = 1; // The first 20 bytes of SHA256(public key) - // PubKey pub_key = 2 [(gogoproto.nullable)=false]; - Power int64 `protobuf:"varint,3,opt,name=power,proto3" json:"power,omitempty"` - ProTxHash []byte `protobuf:"bytes,4,opt,name=pro_tx_hash,json=proTxHash,proto3" json:"pro_tx_hash,omitempty"` +type ResponseDeliverTx struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` } -func (m *Validator) Reset() { *m = Validator{} } -func (m *Validator) String() string { return proto.CompactTextString(m) } -func (*Validator) ProtoMessage() {} -func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{35} +func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } +func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } +func (*ResponseDeliverTx) ProtoMessage() {} +func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{29} } -func (m *Validator) XXX_Unmarshal(b []byte) error { +func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *Validator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseDeliverTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_Validator.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseDeliverTx.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2673,119 +2658,93 @@ func (m *Validator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (m *Validator) XXX_Merge(src proto.Message) { - xxx_messageInfo_Validator.Merge(m, src) +func (m *ResponseDeliverTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseDeliverTx.Merge(m, src) } -func (m *Validator) XXX_Size() int { +func (m *ResponseDeliverTx) XXX_Size() int { return m.Size() } -func (m *Validator) XXX_DiscardUnknown() { - xxx_messageInfo_Validator.DiscardUnknown(m) +func (m *ResponseDeliverTx) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseDeliverTx.DiscardUnknown(m) } -var xxx_messageInfo_Validator proto.InternalMessageInfo +var xxx_messageInfo_ResponseDeliverTx proto.InternalMessageInfo -func (m *Validator) GetPower() int64 { +func (m *ResponseDeliverTx) GetCode() uint32 { if m != nil { - return m.Power + return m.Code } return 0 } -func (m *Validator) GetProTxHash() []byte { +func (m *ResponseDeliverTx) GetData() []byte { if m != nil { - return m.ProTxHash + return m.Data } return nil } -// ValidatorUpdate -type ValidatorUpdate struct { - PubKey *crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` - Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"` - ProTxHash []byte `protobuf:"bytes,3,opt,name=pro_tx_hash,json=proTxHash,proto3" json:"pro_tx_hash,omitempty"` - NodeAddress string `protobuf:"bytes,4,opt,name=node_address,json=nodeAddress,proto3" json:"node_address,omitempty"` +func (m *ResponseDeliverTx) GetLog() string { + if m != nil { + return m.Log + } + return "" } -func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } -func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } -func (*ValidatorUpdate) ProtoMessage() {} -func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{36} +func (m *ResponseDeliverTx) GetInfo() string { + if m != nil { + return m.Info + } + return "" } -func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ValidatorUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ValidatorUpdate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ValidatorUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidatorUpdate.Merge(m, src) -} -func (m *ValidatorUpdate) XXX_Size() int { - return m.Size() -} -func (m *ValidatorUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_ValidatorUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidatorUpdate proto.InternalMessageInfo -func (m *ValidatorUpdate) GetPubKey() *crypto.PublicKey { +func (m *ResponseDeliverTx) GetGasWanted() int64 { if m != nil { - return m.PubKey + return m.GasWanted } - return nil + return 0 } -func (m *ValidatorUpdate) GetPower() int64 { +func (m *ResponseDeliverTx) GetGasUsed() int64 { if m != nil { - return m.Power + return m.GasUsed } return 0 } -func (m *ValidatorUpdate) GetProTxHash() []byte { +func (m *ResponseDeliverTx) GetEvents() []Event { if m != nil { - return m.ProTxHash + return m.Events } return nil } -func (m *ValidatorUpdate) GetNodeAddress() string { +func (m *ResponseDeliverTx) GetCodespace() string { if m != nil { - return m.NodeAddress + return m.Codespace } return "" } -type ValidatorSetUpdate struct { - ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` - ThresholdPublicKey crypto.PublicKey `protobuf:"bytes,2,opt,name=threshold_public_key,json=thresholdPublicKey,proto3" json:"threshold_public_key"` - QuorumHash []byte `protobuf:"bytes,3,opt,name=quorum_hash,json=quorumHash,proto3" json:"quorum_hash,omitempty"` +type ResponseEndBlock struct { + ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` + Events []Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` + NextCoreChainLockUpdate *types1.CoreChainLock `protobuf:"bytes,100,opt,name=next_core_chain_lock_update,json=nextCoreChainLockUpdate,proto3" json:"next_core_chain_lock_update,omitempty"` + ValidatorSetUpdate *ValidatorSetUpdate `protobuf:"bytes,101,opt,name=validator_set_update,json=validatorSetUpdate,proto3" json:"validator_set_update,omitempty"` } -func (m *ValidatorSetUpdate) Reset() { *m = ValidatorSetUpdate{} } -func (m *ValidatorSetUpdate) String() string { return proto.CompactTextString(m) } -func (*ValidatorSetUpdate) ProtoMessage() {} -func (*ValidatorSetUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{37} +func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } +func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } +func (*ResponseEndBlock) ProtoMessage() {} +func (*ResponseEndBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{30} } -func (m *ValidatorSetUpdate) XXX_Unmarshal(b []byte) error { +func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ValidatorSetUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseEndBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ValidatorSetUpdate.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseEndBlock.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2795,55 +2754,64 @@ func (m *ValidatorSetUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, return b[:n], nil } } -func (m *ValidatorSetUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidatorSetUpdate.Merge(m, src) +func (m *ResponseEndBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseEndBlock.Merge(m, src) } -func (m *ValidatorSetUpdate) XXX_Size() int { +func (m *ResponseEndBlock) XXX_Size() int { return m.Size() } -func (m *ValidatorSetUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_ValidatorSetUpdate.DiscardUnknown(m) +func (m *ResponseEndBlock) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseEndBlock.DiscardUnknown(m) } -var xxx_messageInfo_ValidatorSetUpdate proto.InternalMessageInfo +var xxx_messageInfo_ResponseEndBlock proto.InternalMessageInfo -func (m *ValidatorSetUpdate) GetValidatorUpdates() []ValidatorUpdate { +func (m *ResponseEndBlock) GetConsensusParamUpdates() *types1.ConsensusParams { if m != nil { - return m.ValidatorUpdates + return m.ConsensusParamUpdates } return nil } -func (m *ValidatorSetUpdate) GetThresholdPublicKey() crypto.PublicKey { +func (m *ResponseEndBlock) GetEvents() []Event { if m != nil { - return m.ThresholdPublicKey + return m.Events } - return crypto.PublicKey{} + return nil } -func (m *ValidatorSetUpdate) GetQuorumHash() []byte { +func (m *ResponseEndBlock) GetNextCoreChainLockUpdate() *types1.CoreChainLock { if m != nil { - return m.QuorumHash + return m.NextCoreChainLockUpdate } return nil } -type ThresholdPublicKeyUpdate struct { - ThresholdPublicKey crypto.PublicKey `protobuf:"bytes,1,opt,name=threshold_public_key,json=thresholdPublicKey,proto3" json:"threshold_public_key"` +func (m *ResponseEndBlock) GetValidatorSetUpdate() *ValidatorSetUpdate { + if m != nil { + return m.ValidatorSetUpdate + } + return nil } -func (m *ThresholdPublicKeyUpdate) Reset() { *m = ThresholdPublicKeyUpdate{} } -func (m *ThresholdPublicKeyUpdate) String() string { return proto.CompactTextString(m) } -func (*ThresholdPublicKeyUpdate) ProtoMessage() {} -func (*ThresholdPublicKeyUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{38} +type ResponseCommit struct { + // reserve 1 + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` } -func (m *ThresholdPublicKeyUpdate) XXX_Unmarshal(b []byte) error { + +func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } +func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } +func (*ResponseCommit) ProtoMessage() {} +func (*ResponseCommit) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{31} +} +func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *ThresholdPublicKeyUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_ThresholdPublicKeyUpdate.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseCommit.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2853,41 +2821,48 @@ func (m *ThresholdPublicKeyUpdate) XXX_Marshal(b []byte, deterministic bool) ([] return b[:n], nil } } -func (m *ThresholdPublicKeyUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ThresholdPublicKeyUpdate.Merge(m, src) +func (m *ResponseCommit) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseCommit.Merge(m, src) } -func (m *ThresholdPublicKeyUpdate) XXX_Size() int { +func (m *ResponseCommit) XXX_Size() int { return m.Size() } -func (m *ThresholdPublicKeyUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_ThresholdPublicKeyUpdate.DiscardUnknown(m) +func (m *ResponseCommit) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseCommit.DiscardUnknown(m) } -var xxx_messageInfo_ThresholdPublicKeyUpdate proto.InternalMessageInfo +var xxx_messageInfo_ResponseCommit proto.InternalMessageInfo -func (m *ThresholdPublicKeyUpdate) GetThresholdPublicKey() crypto.PublicKey { +func (m *ResponseCommit) GetData() []byte { if m != nil { - return m.ThresholdPublicKey + return m.Data } - return crypto.PublicKey{} + return nil } -type QuorumHashUpdate struct { - QuorumHash []byte `protobuf:"bytes,1,opt,name=quorum_hash,json=quorumHash,proto3" json:"quorum_hash,omitempty"` +func (m *ResponseCommit) GetRetainHeight() int64 { + if m != nil { + return m.RetainHeight + } + return 0 } -func (m *QuorumHashUpdate) Reset() { *m = QuorumHashUpdate{} } -func (m *QuorumHashUpdate) String() string { return proto.CompactTextString(m) } -func (*QuorumHashUpdate) ProtoMessage() {} -func (*QuorumHashUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{39} +type ResponseListSnapshots struct { + Snapshots []*Snapshot `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"` } -func (m *QuorumHashUpdate) XXX_Unmarshal(b []byte) error { + +func (m *ResponseListSnapshots) Reset() { *m = ResponseListSnapshots{} } +func (m *ResponseListSnapshots) String() string { return proto.CompactTextString(m) } +func (*ResponseListSnapshots) ProtoMessage() {} +func (*ResponseListSnapshots) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{32} +} +func (m *ResponseListSnapshots) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *QuorumHashUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseListSnapshots) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_QuorumHashUpdate.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseListSnapshots.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2897,43 +2872,41 @@ func (m *QuorumHashUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return b[:n], nil } } -func (m *QuorumHashUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_QuorumHashUpdate.Merge(m, src) +func (m *ResponseListSnapshots) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseListSnapshots.Merge(m, src) } -func (m *QuorumHashUpdate) XXX_Size() int { +func (m *ResponseListSnapshots) XXX_Size() int { return m.Size() } -func (m *QuorumHashUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_QuorumHashUpdate.DiscardUnknown(m) +func (m *ResponseListSnapshots) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseListSnapshots.DiscardUnknown(m) } -var xxx_messageInfo_QuorumHashUpdate proto.InternalMessageInfo +var xxx_messageInfo_ResponseListSnapshots proto.InternalMessageInfo -func (m *QuorumHashUpdate) GetQuorumHash() []byte { +func (m *ResponseListSnapshots) GetSnapshots() []*Snapshot { if m != nil { - return m.QuorumHash + return m.Snapshots } return nil } -// VoteInfo -type VoteInfo struct { - Validator Validator `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator"` - SignedLastBlock bool `protobuf:"varint,2,opt,name=signed_last_block,json=signedLastBlock,proto3" json:"signed_last_block,omitempty"` +type ResponseOfferSnapshot struct { + Result ResponseOfferSnapshot_Result `protobuf:"varint,1,opt,name=result,proto3,enum=tendermint.abci.ResponseOfferSnapshot_Result" json:"result,omitempty"` } -func (m *VoteInfo) Reset() { *m = VoteInfo{} } -func (m *VoteInfo) String() string { return proto.CompactTextString(m) } -func (*VoteInfo) ProtoMessage() {} -func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{40} +func (m *ResponseOfferSnapshot) Reset() { *m = ResponseOfferSnapshot{} } +func (m *ResponseOfferSnapshot) String() string { return proto.CompactTextString(m) } +func (*ResponseOfferSnapshot) ProtoMessage() {} +func (*ResponseOfferSnapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{33} } -func (m *VoteInfo) XXX_Unmarshal(b []byte) error { +func (m *ResponseOfferSnapshot) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *VoteInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseOfferSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_VoteInfo.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseOfferSnapshot.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -2943,58 +2916,41 @@ func (m *VoteInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (m *VoteInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_VoteInfo.Merge(m, src) +func (m *ResponseOfferSnapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseOfferSnapshot.Merge(m, src) } -func (m *VoteInfo) XXX_Size() int { +func (m *ResponseOfferSnapshot) XXX_Size() int { return m.Size() } -func (m *VoteInfo) XXX_DiscardUnknown() { - xxx_messageInfo_VoteInfo.DiscardUnknown(m) +func (m *ResponseOfferSnapshot) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseOfferSnapshot.DiscardUnknown(m) } -var xxx_messageInfo_VoteInfo proto.InternalMessageInfo - -func (m *VoteInfo) GetValidator() Validator { - if m != nil { - return m.Validator - } - return Validator{} -} +var xxx_messageInfo_ResponseOfferSnapshot proto.InternalMessageInfo -func (m *VoteInfo) GetSignedLastBlock() bool { +func (m *ResponseOfferSnapshot) GetResult() ResponseOfferSnapshot_Result { if m != nil { - return m.SignedLastBlock + return m.Result } - return false + return ResponseOfferSnapshot_UNKNOWN } -type Evidence struct { - Type EvidenceType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.abci.EvidenceType" json:"type,omitempty"` - // The offending validator - Validator Validator `protobuf:"bytes,2,opt,name=validator,proto3" json:"validator"` - // The height when the offense occurred - Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` - // The corresponding time where the offense occurred - Time time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time"` - // Total voting power of the validator set in case the ABCI application does - // not store historical validators. - // https://github.com/tendermint/tendermint/issues/4581 - TotalVotingPower int64 `protobuf:"varint,5,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` +type ResponseLoadSnapshotChunk struct { + Chunk []byte `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"` } -func (m *Evidence) Reset() { *m = Evidence{} } -func (m *Evidence) String() string { return proto.CompactTextString(m) } -func (*Evidence) ProtoMessage() {} -func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{41} +func (m *ResponseLoadSnapshotChunk) Reset() { *m = ResponseLoadSnapshotChunk{} } +func (m *ResponseLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } +func (*ResponseLoadSnapshotChunk) ProtoMessage() {} +func (*ResponseLoadSnapshotChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{34} } -func (m *Evidence) XXX_Unmarshal(b []byte) error { +func (m *ResponseLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *Evidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponseLoadSnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_Evidence.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponseLoadSnapshotChunk.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -3004,74 +2960,105 @@ func (m *Evidence) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (m *Evidence) XXX_Merge(src proto.Message) { - xxx_messageInfo_Evidence.Merge(m, src) +func (m *ResponseLoadSnapshotChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseLoadSnapshotChunk.Merge(m, src) } -func (m *Evidence) XXX_Size() int { +func (m *ResponseLoadSnapshotChunk) XXX_Size() int { return m.Size() } -func (m *Evidence) XXX_DiscardUnknown() { - xxx_messageInfo_Evidence.DiscardUnknown(m) -} +func (m *ResponseLoadSnapshotChunk) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseLoadSnapshotChunk.DiscardUnknown(m) +} -var xxx_messageInfo_Evidence proto.InternalMessageInfo +var xxx_messageInfo_ResponseLoadSnapshotChunk proto.InternalMessageInfo -func (m *Evidence) GetType() EvidenceType { +func (m *ResponseLoadSnapshotChunk) GetChunk() []byte { if m != nil { - return m.Type + return m.Chunk } - return EvidenceType_UNKNOWN + return nil } -func (m *Evidence) GetValidator() Validator { - if m != nil { - return m.Validator +type ResponseApplySnapshotChunk struct { + Result ResponseApplySnapshotChunk_Result `protobuf:"varint,1,opt,name=result,proto3,enum=tendermint.abci.ResponseApplySnapshotChunk_Result" json:"result,omitempty"` + RefetchChunks []uint32 `protobuf:"varint,2,rep,packed,name=refetch_chunks,json=refetchChunks,proto3" json:"refetch_chunks,omitempty"` + RejectSenders []string `protobuf:"bytes,3,rep,name=reject_senders,json=rejectSenders,proto3" json:"reject_senders,omitempty"` +} + +func (m *ResponseApplySnapshotChunk) Reset() { *m = ResponseApplySnapshotChunk{} } +func (m *ResponseApplySnapshotChunk) String() string { return proto.CompactTextString(m) } +func (*ResponseApplySnapshotChunk) ProtoMessage() {} +func (*ResponseApplySnapshotChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{35} +} +func (m *ResponseApplySnapshotChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseApplySnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseApplySnapshotChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return Validator{} } +func (m *ResponseApplySnapshotChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseApplySnapshotChunk.Merge(m, src) +} +func (m *ResponseApplySnapshotChunk) XXX_Size() int { + return m.Size() +} +func (m *ResponseApplySnapshotChunk) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseApplySnapshotChunk.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseApplySnapshotChunk proto.InternalMessageInfo -func (m *Evidence) GetHeight() int64 { +func (m *ResponseApplySnapshotChunk) GetResult() ResponseApplySnapshotChunk_Result { if m != nil { - return m.Height + return m.Result } - return 0 + return ResponseApplySnapshotChunk_UNKNOWN } -func (m *Evidence) GetTime() time.Time { +func (m *ResponseApplySnapshotChunk) GetRefetchChunks() []uint32 { if m != nil { - return m.Time + return m.RefetchChunks } - return time.Time{} + return nil } -func (m *Evidence) GetTotalVotingPower() int64 { +func (m *ResponseApplySnapshotChunk) GetRejectSenders() []string { if m != nil { - return m.TotalVotingPower + return m.RejectSenders } - return 0 + return nil } -type Snapshot struct { - Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` - Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` - Chunks uint32 `protobuf:"varint,3,opt,name=chunks,proto3" json:"chunks,omitempty"` - Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` - Metadata []byte `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` - CoreChainLockedHeight uint32 `protobuf:"varint,100,opt,name=core_chain_locked_height,json=coreChainLockedHeight,proto3" json:"core_chain_locked_height,omitempty"` +type ResponsePrepareProposal struct { + TxRecords []*TxRecord `protobuf:"bytes,1,rep,name=tx_records,json=txRecords,proto3" json:"tx_records,omitempty"` + AppHash []byte `protobuf:"bytes,2,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + TxResults []*ExecTxResult `protobuf:"bytes,3,rep,name=tx_results,json=txResults,proto3" json:"tx_results,omitempty"` + ValidatorUpdates []*ValidatorUpdate `protobuf:"bytes,4,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates,omitempty"` + ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,5,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` } -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_252557cfdd89a31a, []int{42} +func (m *ResponsePrepareProposal) Reset() { *m = ResponsePrepareProposal{} } +func (m *ResponsePrepareProposal) String() string { return proto.CompactTextString(m) } +func (*ResponsePrepareProposal) ProtoMessage() {} +func (*ResponsePrepareProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{36} } -func (m *Snapshot) XXX_Unmarshal(b []byte) error { +func (m *ResponsePrepareProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ResponsePrepareProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) + return xxx_messageInfo_ResponsePrepareProposal.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -3081,2589 +3068,2260 @@ func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (m *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(m, src) +func (m *ResponsePrepareProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponsePrepareProposal.Merge(m, src) } -func (m *Snapshot) XXX_Size() int { +func (m *ResponsePrepareProposal) XXX_Size() int { return m.Size() } -func (m *Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_Snapshot.DiscardUnknown(m) +func (m *ResponsePrepareProposal) XXX_DiscardUnknown() { + xxx_messageInfo_ResponsePrepareProposal.DiscardUnknown(m) } -var xxx_messageInfo_Snapshot proto.InternalMessageInfo +var xxx_messageInfo_ResponsePrepareProposal proto.InternalMessageInfo -func (m *Snapshot) GetHeight() uint64 { +func (m *ResponsePrepareProposal) GetTxRecords() []*TxRecord { if m != nil { - return m.Height + return m.TxRecords } - return 0 + return nil } -func (m *Snapshot) GetFormat() uint32 { +func (m *ResponsePrepareProposal) GetAppHash() []byte { if m != nil { - return m.Format + return m.AppHash } - return 0 + return nil } -func (m *Snapshot) GetChunks() uint32 { +func (m *ResponsePrepareProposal) GetTxResults() []*ExecTxResult { if m != nil { - return m.Chunks + return m.TxResults } - return 0 + return nil } -func (m *Snapshot) GetHash() []byte { +func (m *ResponsePrepareProposal) GetValidatorUpdates() []*ValidatorUpdate { if m != nil { - return m.Hash + return m.ValidatorUpdates } return nil } -func (m *Snapshot) GetMetadata() []byte { +func (m *ResponsePrepareProposal) GetConsensusParamUpdates() *types1.ConsensusParams { if m != nil { - return m.Metadata + return m.ConsensusParamUpdates } return nil } -func (m *Snapshot) GetCoreChainLockedHeight() uint32 { - if m != nil { - return m.CoreChainLockedHeight - } - return 0 +type ResponseProcessProposal struct { + Status ResponseProcessProposal_ProposalStatus `protobuf:"varint,1,opt,name=status,proto3,enum=tendermint.abci.ResponseProcessProposal_ProposalStatus" json:"status,omitempty"` + AppHash []byte `protobuf:"bytes,2,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + TxResults []*ExecTxResult `protobuf:"bytes,3,rep,name=tx_results,json=txResults,proto3" json:"tx_results,omitempty"` + ValidatorUpdates []*ValidatorUpdate `protobuf:"bytes,4,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates,omitempty"` + ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,5,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` } -func init() { - proto.RegisterEnum("tendermint.abci.CheckTxType", CheckTxType_name, CheckTxType_value) - proto.RegisterEnum("tendermint.abci.EvidenceType", EvidenceType_name, EvidenceType_value) - proto.RegisterEnum("tendermint.abci.ResponseOfferSnapshot_Result", ResponseOfferSnapshot_Result_name, ResponseOfferSnapshot_Result_value) - proto.RegisterEnum("tendermint.abci.ResponseApplySnapshotChunk_Result", ResponseApplySnapshotChunk_Result_name, ResponseApplySnapshotChunk_Result_value) - proto.RegisterType((*Request)(nil), "tendermint.abci.Request") - proto.RegisterType((*RequestEcho)(nil), "tendermint.abci.RequestEcho") - proto.RegisterType((*RequestFlush)(nil), "tendermint.abci.RequestFlush") - proto.RegisterType((*RequestInfo)(nil), "tendermint.abci.RequestInfo") - proto.RegisterType((*RequestInitChain)(nil), "tendermint.abci.RequestInitChain") - proto.RegisterType((*RequestQuery)(nil), "tendermint.abci.RequestQuery") - proto.RegisterType((*RequestBeginBlock)(nil), "tendermint.abci.RequestBeginBlock") - proto.RegisterType((*RequestCheckTx)(nil), "tendermint.abci.RequestCheckTx") - proto.RegisterType((*RequestDeliverTx)(nil), "tendermint.abci.RequestDeliverTx") - proto.RegisterType((*RequestEndBlock)(nil), "tendermint.abci.RequestEndBlock") - proto.RegisterType((*RequestCommit)(nil), "tendermint.abci.RequestCommit") - proto.RegisterType((*RequestListSnapshots)(nil), "tendermint.abci.RequestListSnapshots") - proto.RegisterType((*RequestOfferSnapshot)(nil), "tendermint.abci.RequestOfferSnapshot") - proto.RegisterType((*RequestLoadSnapshotChunk)(nil), "tendermint.abci.RequestLoadSnapshotChunk") - proto.RegisterType((*RequestApplySnapshotChunk)(nil), "tendermint.abci.RequestApplySnapshotChunk") - proto.RegisterType((*Response)(nil), "tendermint.abci.Response") - proto.RegisterType((*ResponseException)(nil), "tendermint.abci.ResponseException") - proto.RegisterType((*ResponseEcho)(nil), "tendermint.abci.ResponseEcho") - proto.RegisterType((*ResponseFlush)(nil), "tendermint.abci.ResponseFlush") - proto.RegisterType((*ResponseInfo)(nil), "tendermint.abci.ResponseInfo") - proto.RegisterType((*ResponseInitChain)(nil), "tendermint.abci.ResponseInitChain") - proto.RegisterType((*ResponseQuery)(nil), "tendermint.abci.ResponseQuery") - proto.RegisterType((*ResponseBeginBlock)(nil), "tendermint.abci.ResponseBeginBlock") - proto.RegisterType((*ResponseCheckTx)(nil), "tendermint.abci.ResponseCheckTx") - proto.RegisterType((*ResponseDeliverTx)(nil), "tendermint.abci.ResponseDeliverTx") - proto.RegisterType((*ResponseEndBlock)(nil), "tendermint.abci.ResponseEndBlock") - proto.RegisterType((*ResponseCommit)(nil), "tendermint.abci.ResponseCommit") - proto.RegisterType((*ResponseListSnapshots)(nil), "tendermint.abci.ResponseListSnapshots") - proto.RegisterType((*ResponseOfferSnapshot)(nil), "tendermint.abci.ResponseOfferSnapshot") - proto.RegisterType((*ResponseLoadSnapshotChunk)(nil), "tendermint.abci.ResponseLoadSnapshotChunk") - proto.RegisterType((*ResponseApplySnapshotChunk)(nil), "tendermint.abci.ResponseApplySnapshotChunk") - proto.RegisterType((*LastCommitInfo)(nil), "tendermint.abci.LastCommitInfo") - proto.RegisterType((*Event)(nil), "tendermint.abci.Event") - proto.RegisterType((*EventAttribute)(nil), "tendermint.abci.EventAttribute") - proto.RegisterType((*TxResult)(nil), "tendermint.abci.TxResult") - proto.RegisterType((*Validator)(nil), "tendermint.abci.Validator") - proto.RegisterType((*ValidatorUpdate)(nil), "tendermint.abci.ValidatorUpdate") - proto.RegisterType((*ValidatorSetUpdate)(nil), "tendermint.abci.ValidatorSetUpdate") - proto.RegisterType((*ThresholdPublicKeyUpdate)(nil), "tendermint.abci.ThresholdPublicKeyUpdate") - proto.RegisterType((*QuorumHashUpdate)(nil), "tendermint.abci.QuorumHashUpdate") - proto.RegisterType((*VoteInfo)(nil), "tendermint.abci.VoteInfo") - proto.RegisterType((*Evidence)(nil), "tendermint.abci.Evidence") - proto.RegisterType((*Snapshot)(nil), "tendermint.abci.Snapshot") +func (m *ResponseProcessProposal) Reset() { *m = ResponseProcessProposal{} } +func (m *ResponseProcessProposal) String() string { return proto.CompactTextString(m) } +func (*ResponseProcessProposal) ProtoMessage() {} +func (*ResponseProcessProposal) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{37} } - -func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } - -var fileDescriptor_252557cfdd89a31a = []byte{ - // 2912 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4b, 0x73, 0xe3, 0xc6, - 0xf1, 0x27, 0xf8, 0x66, 0xf3, 0x21, 0x6a, 0x56, 0x5e, 0xd3, 0xf4, 0xae, 0xb4, 0x86, 0xcb, 0xf6, - 0x7a, 0x6d, 0x4b, 0x7f, 0x6b, 0xcb, 0xaf, 0xfa, 0xe7, 0x61, 0x92, 0xe6, 0x86, 0xf2, 0x2a, 0x92, - 0x3c, 0xe2, 0xae, 0xcb, 0x71, 0xbc, 0x30, 0x48, 0x8c, 0x44, 0x78, 0x49, 0x00, 0x06, 0x86, 0xb2, - 0xe4, 0xb3, 0x73, 0xf1, 0xc9, 0xc7, 0xe4, 0xe0, 0xaa, 0x7c, 0x81, 0x54, 0x3e, 0x40, 0xaa, 0x72, - 0xf6, 0x25, 0x55, 0x3e, 0xe6, 0x90, 0x38, 0x2e, 0xef, 0x25, 0x95, 0x6b, 0x0e, 0x39, 0xa5, 0x2a, - 0x35, 0x0f, 0x80, 0x00, 0x49, 0x90, 0x54, 0xf6, 0x98, 0xdb, 0x4c, 0x4f, 0x77, 0x63, 0xa6, 0x07, - 0xf3, 0xeb, 0xdf, 0x34, 0x00, 0x4f, 0x53, 0x62, 0x19, 0xc4, 0x1d, 0x99, 0x16, 0xdd, 0xd1, 0x7b, - 0x7d, 0x73, 0x87, 0x5e, 0x38, 0xc4, 0xdb, 0x76, 0x5c, 0x9b, 0xda, 0x68, 0x6d, 0x32, 0xb8, 0xcd, - 0x06, 0xeb, 0xd7, 0x43, 0xda, 0x7d, 0xf7, 0xc2, 0xa1, 0xf6, 0x8e, 0xe3, 0xda, 0xf6, 0x89, 0xd0, - 0xaf, 0x5f, 0x0b, 0x0d, 0x73, 0x3f, 0x61, 0x6f, 0x91, 0x51, 0x69, 0xfc, 0x90, 0x5c, 0xf8, 0xa3, - 0xd7, 0x67, 0x6c, 0x1d, 0xdd, 0xd5, 0x47, 0xfe, 0xf0, 0xd6, 0xa9, 0x6d, 0x9f, 0x0e, 0xc9, 0x0e, - 0xef, 0xf5, 0xc6, 0x27, 0x3b, 0xd4, 0x1c, 0x11, 0x8f, 0xea, 0x23, 0x47, 0x2a, 0x6c, 0x9c, 0xda, - 0xa7, 0x36, 0x6f, 0xee, 0xb0, 0x96, 0x90, 0xaa, 0x7f, 0xca, 0x41, 0x0e, 0x93, 0x4f, 0xc7, 0xc4, - 0xa3, 0x68, 0x17, 0xd2, 0xa4, 0x3f, 0xb0, 0x6b, 0xca, 0x0d, 0xe5, 0x66, 0x71, 0xf7, 0xda, 0xf6, - 0xd4, 0xe2, 0xb6, 0xa5, 0x5e, 0xbb, 0x3f, 0xb0, 0x3b, 0x09, 0xcc, 0x75, 0xd1, 0x6b, 0x90, 0x39, - 0x19, 0x8e, 0xbd, 0x41, 0x2d, 0xc9, 0x8d, 0xae, 0xc7, 0x19, 0xdd, 0x61, 0x4a, 0x9d, 0x04, 0x16, - 0xda, 0xec, 0x51, 0xa6, 0x75, 0x62, 0xd7, 0x52, 0x8b, 0x1f, 0xb5, 0x67, 0x9d, 0xf0, 0x47, 0x31, - 0x5d, 0xd4, 0x04, 0x30, 0x2d, 0x93, 0x6a, 0xfd, 0x81, 0x6e, 0x5a, 0xb5, 0x34, 0xb7, 0x7c, 0x26, - 0xde, 0xd2, 0xa4, 0x2d, 0xa6, 0xd8, 0x49, 0xe0, 0x82, 0xe9, 0x77, 0xd8, 0x74, 0x3f, 0x1d, 0x13, - 0xf7, 0xa2, 0x96, 0x59, 0x3c, 0xdd, 0xf7, 0x98, 0x12, 0x9b, 0x2e, 0xd7, 0x46, 0x6d, 0x28, 0xf6, - 0xc8, 0xa9, 0x69, 0x69, 0xbd, 0xa1, 0xdd, 0x7f, 0x58, 0xcb, 0x72, 0x63, 0x35, 0xce, 0xb8, 0xc9, - 0x54, 0x9b, 0x4c, 0xb3, 0x93, 0xc0, 0xd0, 0x0b, 0x7a, 0xe8, 0x47, 0x90, 0xef, 0x0f, 0x48, 0xff, - 0xa1, 0x46, 0xcf, 0x6b, 0x39, 0xee, 0x63, 0x2b, 0xce, 0x47, 0x8b, 0xe9, 0x75, 0xcf, 0x3b, 0x09, - 0x9c, 0xeb, 0x8b, 0x26, 0x5b, 0xbf, 0x41, 0x86, 0xe6, 0x19, 0x71, 0x99, 0x7d, 0x7e, 0xf1, 0xfa, - 0xdf, 0x11, 0x9a, 0xdc, 0x43, 0xc1, 0xf0, 0x3b, 0xe8, 0xa7, 0x50, 0x20, 0x96, 0x21, 0x97, 0x51, - 0xe0, 0x2e, 0x6e, 0xc4, 0xee, 0xb3, 0x65, 0xf8, 0x8b, 0xc8, 0x13, 0xd9, 0x46, 0x6f, 0x42, 0xb6, - 0x6f, 0x8f, 0x46, 0x26, 0xad, 0x01, 0xb7, 0xde, 0x8c, 0x5d, 0x00, 0xd7, 0xea, 0x24, 0xb0, 0xd4, - 0x47, 0x07, 0x50, 0x19, 0x9a, 0x1e, 0xd5, 0x3c, 0x4b, 0x77, 0xbc, 0x81, 0x4d, 0xbd, 0x5a, 0x91, - 0x7b, 0x78, 0x2e, 0xce, 0xc3, 0xbe, 0xe9, 0xd1, 0x63, 0x5f, 0xb9, 0x93, 0xc0, 0xe5, 0x61, 0x58, - 0xc0, 0xfc, 0xd9, 0x27, 0x27, 0xc4, 0x0d, 0x1c, 0xd6, 0x4a, 0x8b, 0xfd, 0x1d, 0x32, 0x6d, 0xdf, - 0x9e, 0xf9, 0xb3, 0xc3, 0x02, 0xf4, 0x21, 0x5c, 0x19, 0xda, 0xba, 0x11, 0xb8, 0xd3, 0xfa, 0x83, - 0xb1, 0xf5, 0xb0, 0x56, 0xe6, 0x4e, 0x5f, 0x8c, 0x9d, 0xa4, 0xad, 0x1b, 0xbe, 0x8b, 0x16, 0x33, - 0xe8, 0x24, 0xf0, 0xfa, 0x70, 0x5a, 0x88, 0x1e, 0xc0, 0x86, 0xee, 0x38, 0xc3, 0x8b, 0x69, 0xef, - 0x15, 0xee, 0xfd, 0x56, 0x9c, 0xf7, 0x06, 0xb3, 0x99, 0x76, 0x8f, 0xf4, 0x19, 0x69, 0x33, 0x07, - 0x99, 0x33, 0x7d, 0x38, 0x26, 0xea, 0x0b, 0x50, 0x0c, 0x1d, 0x53, 0x54, 0x83, 0xdc, 0x88, 0x78, - 0x9e, 0x7e, 0x4a, 0xf8, 0xa9, 0x2e, 0x60, 0xbf, 0xab, 0x56, 0xa0, 0x14, 0x3e, 0x9a, 0xea, 0x57, - 0x4a, 0x60, 0xc9, 0x4e, 0x1d, 0xb3, 0x3c, 0x23, 0xae, 0x67, 0xda, 0x96, 0x6f, 0x29, 0xbb, 0xe8, - 0x59, 0x28, 0xf3, 0xf7, 0x47, 0xf3, 0xc7, 0xd9, 0xd1, 0x4f, 0xe3, 0x12, 0x17, 0xde, 0x97, 0x4a, - 0x5b, 0x50, 0x74, 0x76, 0x9d, 0x40, 0x25, 0xc5, 0x55, 0xc0, 0xd9, 0x75, 0x7c, 0x85, 0x67, 0xa0, - 0xc4, 0x56, 0x1a, 0x68, 0xa4, 0xf9, 0x43, 0x8a, 0x4c, 0x26, 0x55, 0xd4, 0x2f, 0x52, 0x50, 0x9d, - 0x3e, 0xce, 0xe8, 0x4d, 0x48, 0x33, 0x64, 0x93, 0x20, 0x55, 0xdf, 0x16, 0xb0, 0xb7, 0xed, 0xc3, - 0xde, 0x76, 0xd7, 0x87, 0xbd, 0x66, 0xfe, 0x9b, 0xef, 0xb6, 0x12, 0x5f, 0xfd, 0x6d, 0x4b, 0xc1, - 0xdc, 0x02, 0x3d, 0xc5, 0x4e, 0x9f, 0x6e, 0x5a, 0x9a, 0x69, 0xf0, 0x29, 0x17, 0xd8, 0xd1, 0xd2, - 0x4d, 0x6b, 0xcf, 0x40, 0xfb, 0x50, 0xed, 0xdb, 0x96, 0x47, 0x2c, 0x6f, 0xec, 0x69, 0x02, 0x56, - 0x25, 0x34, 0x45, 0x0e, 0x98, 0x00, 0xeb, 0x96, 0xaf, 0x79, 0xc4, 0x15, 0xf1, 0x5a, 0x3f, 0x2a, - 0x40, 0x07, 0x50, 0x3e, 0xd3, 0x87, 0xa6, 0xa1, 0x53, 0xdb, 0xd5, 0x3c, 0x42, 0x25, 0x56, 0x3d, - 0x3b, 0xb3, 0xcb, 0xf7, 0x7d, 0xad, 0x63, 0x42, 0xef, 0x39, 0x86, 0x4e, 0x49, 0x33, 0xfd, 0xcd, - 0x77, 0x5b, 0x0a, 0x2e, 0x9d, 0x85, 0x46, 0xd0, 0xf3, 0xb0, 0xa6, 0x3b, 0x8e, 0xe6, 0x51, 0x9d, - 0x12, 0xad, 0x77, 0x41, 0x89, 0xc7, 0xe1, 0xab, 0x84, 0xcb, 0xba, 0xe3, 0x1c, 0x33, 0x69, 0x93, - 0x09, 0xd1, 0x73, 0x50, 0x61, 0x48, 0x67, 0xea, 0x43, 0x6d, 0x40, 0xcc, 0xd3, 0x01, 0xe5, 0x40, - 0x95, 0xc2, 0x65, 0x29, 0xed, 0x70, 0x21, 0xda, 0x86, 0x2b, 0xbe, 0x5a, 0xdf, 0x76, 0x89, 0xaf, - 0xcb, 0x00, 0xa9, 0x8c, 0xd7, 0xe5, 0x50, 0xcb, 0x76, 0x89, 0xd0, 0x57, 0x8d, 0xe0, 0x4d, 0xe1, - 0xa8, 0x88, 0x10, 0xa4, 0x0d, 0x9d, 0xea, 0x7c, 0x07, 0x4a, 0x98, 0xb7, 0x99, 0xcc, 0xd1, 0xe9, - 0x40, 0xc6, 0x95, 0xb7, 0xd1, 0x55, 0xc8, 0x4a, 0xd7, 0x29, 0x3e, 0x0d, 0xd9, 0x43, 0x1b, 0x90, - 0x71, 0x5c, 0xfb, 0x8c, 0xf0, 0xb0, 0xe4, 0xb1, 0xe8, 0xa8, 0x5f, 0x24, 0x61, 0x7d, 0x06, 0x3f, - 0x99, 0xdf, 0x81, 0xee, 0x0d, 0xfc, 0x67, 0xb1, 0x36, 0x7a, 0x9d, 0xf9, 0xd5, 0x0d, 0xe2, 0xca, - 0x9c, 0x53, 0x9b, 0xdd, 0xa2, 0x0e, 0x1f, 0xe7, 0xc1, 0x4c, 0x60, 0xa9, 0x8d, 0x0e, 0xa1, 0x3a, - 0xd4, 0x3d, 0xaa, 0x09, 0x3c, 0xd2, 0x42, 0xf9, 0x67, 0x16, 0x85, 0xf7, 0x75, 0x1f, 0xc1, 0xd8, - 0x61, 0x90, 0x8e, 0x2a, 0xc3, 0x88, 0x14, 0x61, 0xd8, 0xe8, 0x5d, 0x7c, 0xae, 0x5b, 0xd4, 0xb4, - 0x88, 0x16, 0xec, 0x98, 0x57, 0x4b, 0xdf, 0x48, 0xdd, 0x2c, 0xee, 0x3e, 0x35, 0xe3, 0xb4, 0x7d, - 0x66, 0x1a, 0xc4, 0xea, 0x13, 0xe9, 0xee, 0x4a, 0x60, 0x1c, 0xbc, 0x07, 0x9e, 0x8a, 0xa1, 0x12, - 0xcd, 0x00, 0xa8, 0x02, 0x49, 0x7a, 0x2e, 0x03, 0x90, 0xa4, 0xe7, 0xe8, 0xff, 0x20, 0xcd, 0x16, - 0xc9, 0x17, 0x5f, 0x99, 0x93, 0x3a, 0xa5, 0x5d, 0xf7, 0xc2, 0x21, 0x98, 0x6b, 0xaa, 0x6a, 0x70, - 0x8c, 0x82, 0xac, 0x30, 0xed, 0x55, 0x7d, 0x11, 0xd6, 0xa6, 0x60, 0x3f, 0xb4, 0x7f, 0x4a, 0x78, - 0xff, 0xd4, 0x35, 0x28, 0x47, 0x30, 0x5e, 0xbd, 0x0a, 0x1b, 0xf3, 0x20, 0x5b, 0x1d, 0x04, 0xf2, - 0x08, 0xf4, 0xa2, 0xd7, 0x20, 0x1f, 0x60, 0xb6, 0x38, 0xc6, 0xb3, 0xb1, 0xf2, 0x95, 0x71, 0xa0, - 0xca, 0xce, 0x2f, 0x3b, 0x06, 0xfc, 0x7d, 0x48, 0xf2, 0x89, 0xe7, 0x74, 0xc7, 0xe9, 0xe8, 0xde, - 0x40, 0xfd, 0x18, 0x6a, 0x71, 0x78, 0x3c, 0xb5, 0x8c, 0x74, 0xf0, 0x1a, 0x5e, 0x85, 0xec, 0x89, - 0xed, 0x8e, 0x74, 0xca, 0x9d, 0x95, 0xb1, 0xec, 0xb1, 0xd7, 0x53, 0x60, 0x73, 0x8a, 0x8b, 0x45, - 0x47, 0xd5, 0xe0, 0xa9, 0x58, 0x4c, 0x66, 0x26, 0xa6, 0x65, 0x10, 0x11, 0xcf, 0x32, 0x16, 0x9d, - 0x89, 0x23, 0x31, 0x59, 0xd1, 0x61, 0x8f, 0xf5, 0xf8, 0x5a, 0xb9, 0xff, 0x02, 0x96, 0x3d, 0xf5, - 0xb7, 0x79, 0xc8, 0x63, 0xe2, 0x39, 0x0c, 0x4b, 0x50, 0x13, 0x0a, 0xe4, 0xbc, 0x4f, 0x1c, 0xea, - 0xc3, 0xef, 0x7c, 0xb6, 0x21, 0xb4, 0xdb, 0xbe, 0x26, 0x4b, 0xf5, 0x81, 0x19, 0xba, 0x2d, 0xd9, - 0x5c, 0x3c, 0x31, 0x93, 0xe6, 0x61, 0x3a, 0xf7, 0xba, 0x4f, 0xe7, 0x52, 0xb1, 0xd9, 0x5d, 0x58, - 0x4d, 0xf1, 0xb9, 0xdb, 0x92, 0xcf, 0xa5, 0x97, 0x3c, 0x2c, 0x42, 0xe8, 0x5a, 0x11, 0x42, 0x97, - 0x59, 0xb2, 0xcc, 0x18, 0x46, 0xf7, 0xba, 0xcf, 0xe8, 0xb2, 0x4b, 0x66, 0x3c, 0x45, 0xe9, 0xee, - 0x44, 0x29, 0x5d, 0x2e, 0x06, 0xa2, 0x7d, 0xeb, 0x58, 0x4e, 0xf7, 0xe3, 0x10, 0xa7, 0xcb, 0xc7, - 0x12, 0x2a, 0xe1, 0x64, 0x0e, 0xa9, 0x6b, 0x45, 0x48, 0x5d, 0x61, 0x49, 0x0c, 0x62, 0x58, 0xdd, - 0xdb, 0x61, 0x56, 0x07, 0xb1, 0xc4, 0x50, 0xee, 0xf7, 0x3c, 0x5a, 0xf7, 0x56, 0x40, 0xeb, 0x8a, - 0xb1, 0xbc, 0x54, 0xae, 0x61, 0x9a, 0xd7, 0x1d, 0xce, 0xf0, 0x3a, 0xc1, 0xc3, 0x9e, 0x8f, 0x75, - 0xb1, 0x84, 0xd8, 0x1d, 0xce, 0x10, 0xbb, 0xf2, 0x12, 0x87, 0x4b, 0x98, 0xdd, 0x2f, 0xe7, 0x33, - 0xbb, 0x78, 0xee, 0x25, 0xa7, 0xb9, 0x1a, 0xb5, 0xd3, 0x62, 0xa8, 0xdd, 0x1a, 0x77, 0xff, 0x52, - 0xac, 0xfb, 0xcb, 0x73, 0xbb, 0x17, 0x59, 0x86, 0x9c, 0x3a, 0xf3, 0x0c, 0x65, 0x88, 0xeb, 0xda, - 0xae, 0x64, 0x69, 0xa2, 0xa3, 0xde, 0x64, 0x39, 0x7b, 0x72, 0xbe, 0x17, 0xf0, 0x40, 0x8e, 0xe6, - 0xa1, 0x33, 0xad, 0xfe, 0x55, 0x99, 0xd8, 0xf2, 0x34, 0x17, 0xce, 0xf7, 0x05, 0x99, 0xef, 0x43, - 0xec, 0x30, 0x19, 0x65, 0x87, 0x5b, 0x50, 0x64, 0x28, 0x3d, 0x45, 0xfc, 0x74, 0x27, 0x20, 0x7e, - 0xb7, 0x60, 0x9d, 0xa7, 0x61, 0xc1, 0x21, 0x25, 0x34, 0xa7, 0x79, 0x86, 0x59, 0x63, 0x03, 0xe2, - 0xe5, 0x14, 0x18, 0xfd, 0x0a, 0x5c, 0x09, 0xe9, 0x06, 0xe8, 0x2f, 0xd8, 0x4f, 0x35, 0xd0, 0x6e, - 0x88, 0x34, 0xf0, 0x6e, 0x3a, 0x6f, 0x54, 0x09, 0xbe, 0x2e, 0xb3, 0xbc, 0x4b, 0x04, 0xb2, 0x68, - 0x4c, 0x85, 0x18, 0xf2, 0x51, 0xea, 0xdf, 0x93, 0x93, 0x30, 0x4e, 0x68, 0xe5, 0x3c, 0x06, 0xa8, - 0xfc, 0xd7, 0x0c, 0x30, 0x9c, 0xaa, 0x52, 0x91, 0x54, 0x85, 0x3e, 0x84, 0x8d, 0x08, 0x39, 0xd4, - 0xc6, 0x9c, 0xf8, 0xd5, 0x8c, 0xcb, 0x71, 0xc4, 0x04, 0x46, 0x67, 0x33, 0x23, 0xe8, 0x23, 0x78, - 0xda, 0x22, 0xe7, 0x33, 0x8b, 0xf7, 0x9f, 0x41, 0x66, 0xcf, 0xb6, 0xbf, 0x20, 0x97, 0xf0, 0x38, - 0xec, 0xdb, 0xfd, 0x87, 0xf8, 0x49, 0xe6, 0x23, 0x22, 0x92, 0xee, 0x63, 0x98, 0xe3, 0x49, 0x1c, - 0x73, 0xfc, 0x97, 0x32, 0x79, 0xb9, 0x02, 0xee, 0xd8, 0xb7, 0x0d, 0x22, 0x13, 0x25, 0x6f, 0xa3, - 0x2a, 0xa4, 0x86, 0xf6, 0xa9, 0x4c, 0x87, 0xac, 0xc9, 0xb4, 0x82, 0x6c, 0x52, 0x90, 0xc9, 0x22, - 0xc8, 0xb1, 0x19, 0xfe, 0xaa, 0xc8, 0x1c, 0x5b, 0x85, 0xd4, 0x43, 0x22, 0xb0, 0xbf, 0x84, 0x59, - 0x93, 0xe9, 0xf1, 0xd3, 0xc2, 0x11, 0xbd, 0x84, 0x45, 0x07, 0xbd, 0x09, 0x05, 0x5e, 0x87, 0xd1, - 0x6c, 0xc7, 0x93, 0x30, 0xfd, 0x74, 0x38, 0x0c, 0xa2, 0xdc, 0xb2, 0x7d, 0xc4, 0x74, 0x0e, 0x1d, - 0x0f, 0xe7, 0x1d, 0xd9, 0x0a, 0xd1, 0x87, 0x42, 0x84, 0xc5, 0x5e, 0x83, 0x02, 0x9b, 0xbd, 0xe7, - 0xe8, 0x7d, 0xc2, 0x31, 0xb7, 0x80, 0x27, 0x02, 0xf5, 0x01, 0xa0, 0xd9, 0xcc, 0x81, 0x3a, 0x90, - 0x25, 0x67, 0xc4, 0xa2, 0xec, 0xd5, 0x62, 0x14, 0xf1, 0xea, 0x1c, 0x8a, 0x48, 0x2c, 0xda, 0xac, - 0xb1, 0x0d, 0xfe, 0xc7, 0x77, 0x5b, 0x55, 0xa1, 0xfd, 0xb2, 0x3d, 0x32, 0x29, 0x19, 0x39, 0xf4, - 0x02, 0x4b, 0x7b, 0xf5, 0x2f, 0x49, 0xc6, 0xd7, 0x22, 0x59, 0x65, 0x6e, 0x6c, 0xfd, 0xb3, 0x9b, - 0x0c, 0x71, 0xf5, 0xd5, 0xe2, 0xbd, 0x09, 0x70, 0xaa, 0x7b, 0xda, 0x67, 0xba, 0x45, 0x89, 0x21, - 0x83, 0x1e, 0x92, 0xa0, 0x3a, 0xe4, 0x59, 0x6f, 0xec, 0x11, 0x43, 0x5e, 0x33, 0x82, 0x7e, 0x68, - 0x9d, 0xb9, 0xc7, 0x5b, 0x67, 0x34, 0xca, 0xf9, 0xa9, 0x28, 0x87, 0xb8, 0x54, 0x21, 0xcc, 0xa5, - 0xd8, 0xdc, 0x1c, 0xd7, 0xb4, 0x5d, 0x93, 0x5e, 0xf0, 0xad, 0x49, 0xe1, 0xa0, 0xcf, 0x6e, 0xaf, - 0x23, 0x32, 0x72, 0x6c, 0x7b, 0xa8, 0x09, 0xdc, 0x2c, 0x72, 0xd3, 0x92, 0x14, 0xb6, 0x39, 0x7c, - 0xfe, 0x2a, 0x84, 0x11, 0x13, 0xce, 0xfc, 0x3f, 0x17, 0x60, 0xf5, 0x9f, 0x49, 0x76, 0x75, 0x88, - 0xf2, 0x06, 0xf4, 0x01, 0x3c, 0x39, 0x05, 0x95, 0x12, 0x5f, 0x3c, 0xc9, 0x35, 0x57, 0x40, 0xcc, - 0x27, 0xa2, 0x88, 0x29, 0xf0, 0xc5, 0x0b, 0xad, 0x2b, 0xf5, 0x98, 0xeb, 0x5a, 0x82, 0x84, 0xc6, - 0x63, 0x22, 0x61, 0x1c, 0x8a, 0x93, 0xcb, 0xde, 0xf4, 0xe7, 0xa0, 0xb8, 0xba, 0xc7, 0xee, 0x80, - 0x61, 0xb6, 0x35, 0xf7, 0x2d, 0x7b, 0x16, 0xca, 0x2e, 0xa1, 0x6c, 0x61, 0x91, 0x5b, 0x76, 0x49, - 0x08, 0x25, 0x02, 0x1f, 0xc1, 0x13, 0x73, 0x59, 0x17, 0x7a, 0x03, 0x0a, 0x13, 0xc2, 0xa6, 0xc4, - 0x5c, 0x58, 0x83, 0x4b, 0xd8, 0x44, 0x57, 0xfd, 0xa3, 0x32, 0x71, 0x19, 0xbd, 0xd6, 0xb5, 0x21, - 0xeb, 0x12, 0x6f, 0x3c, 0x14, 0x17, 0xad, 0xca, 0xee, 0x2b, 0xab, 0xf1, 0x35, 0x26, 0x1d, 0x0f, - 0x29, 0x96, 0xc6, 0xea, 0x03, 0xc8, 0x0a, 0x09, 0x2a, 0x42, 0xee, 0xde, 0xc1, 0xdd, 0x83, 0xc3, - 0xf7, 0x0f, 0xaa, 0x09, 0x04, 0x90, 0x6d, 0xb4, 0x5a, 0xed, 0xa3, 0x6e, 0x55, 0x41, 0x05, 0xc8, - 0x34, 0x9a, 0x87, 0xb8, 0x5b, 0x4d, 0x32, 0x31, 0x6e, 0xbf, 0xdb, 0x6e, 0x75, 0xab, 0x29, 0xb4, - 0x0e, 0x65, 0xd1, 0xd6, 0xee, 0x1c, 0xe2, 0x9f, 0x37, 0xba, 0xd5, 0x74, 0x48, 0x74, 0xdc, 0x3e, - 0x78, 0xa7, 0x8d, 0xab, 0x19, 0xf5, 0x55, 0x76, 0x93, 0x8b, 0x61, 0x78, 0x93, 0x3b, 0x9b, 0x12, - 0xba, 0xb3, 0xa9, 0xbf, 0x4e, 0x42, 0x3d, 0x9e, 0xb6, 0xa1, 0x77, 0xa7, 0x16, 0xbe, 0x7b, 0x09, - 0xce, 0x37, 0xb5, 0x7a, 0xf4, 0x1c, 0x54, 0x5c, 0x72, 0x42, 0x68, 0x7f, 0x20, 0x68, 0x24, 0x3b, - 0x53, 0xa9, 0x9b, 0x65, 0x5c, 0x96, 0x52, 0x6e, 0xe4, 0x09, 0xb5, 0x4f, 0x48, 0x9f, 0x6a, 0x02, - 0xf2, 0xc4, 0x81, 0x29, 0x30, 0x35, 0x26, 0x3d, 0x16, 0x42, 0xf5, 0xe3, 0x4b, 0xc5, 0xb2, 0x00, - 0x19, 0xdc, 0xee, 0xe2, 0x0f, 0xaa, 0x29, 0x84, 0xa0, 0xc2, 0x9b, 0xda, 0xf1, 0x41, 0xe3, 0xe8, - 0xb8, 0x73, 0xc8, 0x62, 0x79, 0x05, 0xd6, 0xfc, 0x58, 0xfa, 0xc2, 0x8c, 0xfa, 0x1b, 0x05, 0x2a, - 0xd1, 0x62, 0x09, 0x8b, 0xa1, 0x6b, 0x8f, 0x2d, 0x83, 0x47, 0x23, 0x83, 0x45, 0x87, 0xf1, 0xc2, - 0x4f, 0xc7, 0xb6, 0x3b, 0x1e, 0x85, 0x59, 0x11, 0x08, 0x11, 0x27, 0x46, 0x2f, 0xc0, 0x9a, 0xa0, - 0x79, 0x9e, 0x79, 0x6a, 0xe9, 0x74, 0xec, 0x8a, 0x02, 0x51, 0x09, 0x57, 0xb8, 0xf8, 0xd8, 0x97, - 0x32, 0x45, 0x51, 0x0a, 0x9b, 0x28, 0x0a, 0x42, 0x58, 0xe1, 0xe2, 0x40, 0x51, 0xfd, 0x1c, 0x32, - 0x1c, 0x2e, 0xd8, 0xf1, 0xe1, 0x25, 0x13, 0xc9, 0x60, 0x59, 0x1b, 0x7d, 0x04, 0xa0, 0x53, 0xea, - 0x9a, 0xbd, 0xb1, 0x00, 0xae, 0xd4, 0xdc, 0x5b, 0x0f, 0xb7, 0x6f, 0xf8, 0x7a, 0xcd, 0x6b, 0x12, - 0x77, 0x36, 0x26, 0xa6, 0x21, 0xec, 0x09, 0x39, 0x54, 0x0f, 0xa0, 0x12, 0xb5, 0xf5, 0xa9, 0x8a, - 0x98, 0x43, 0x94, 0xaa, 0x08, 0x0a, 0x2d, 0xa9, 0x4a, 0x40, 0x74, 0x52, 0xa2, 0x3c, 0xc6, 0x3b, - 0xea, 0x97, 0x0a, 0xe4, 0xbb, 0xe7, 0x72, 0x33, 0x63, 0x2a, 0x33, 0x13, 0xd3, 0x64, 0xb8, 0x0e, - 0x21, 0x4a, 0x3d, 0xa9, 0xa0, 0x80, 0xf4, 0x76, 0xf0, 0xba, 0xa6, 0x57, 0xbd, 0x6e, 0xfa, 0x95, - 0x34, 0x79, 0x44, 0x1b, 0x50, 0x08, 0x00, 0x8d, 0x97, 0xf3, 0xec, 0xcf, 0x64, 0x3d, 0x23, 0x85, - 0x45, 0x07, 0x6d, 0x42, 0xd1, 0x71, 0x6d, 0x8d, 0x9e, 0x8b, 0xed, 0x16, 0x3b, 0xc9, 0x38, 0x58, - 0xf7, 0x9c, 0x57, 0x6c, 0x7e, 0xa7, 0xc0, 0x5a, 0xe0, 0x43, 0x82, 0xea, 0xff, 0x43, 0xce, 0x19, - 0xf7, 0x34, 0x3f, 0x4a, 0x53, 0xdf, 0x85, 0x7c, 0x8a, 0x36, 0xee, 0x0d, 0xcd, 0xfe, 0x5d, 0x72, - 0x21, 0x01, 0x34, 0xeb, 0x8c, 0x7b, 0x77, 0x45, 0x30, 0xc5, 0x34, 0x92, 0x0b, 0xa6, 0x91, 0x9a, - 0x9a, 0x06, 0x7a, 0x01, 0x4a, 0x96, 0x6d, 0x10, 0x4d, 0x37, 0x0c, 0x97, 0x78, 0x9e, 0x48, 0xd0, - 0xd2, 0x73, 0x91, 0x8d, 0x34, 0xc4, 0x80, 0xfa, 0xbd, 0x02, 0x68, 0x16, 0xc4, 0xd1, 0x31, 0xac, - 0x4f, 0xf2, 0x80, 0x9f, 0x05, 0x05, 0x9c, 0xde, 0x88, 0x4f, 0x02, 0x11, 0x1e, 0x5f, 0x3d, 0x8b, - 0x8a, 0x3d, 0xd4, 0x85, 0x0d, 0x3a, 0x70, 0x89, 0x37, 0xb0, 0x87, 0x86, 0xe6, 0xf0, 0xf5, 0xf2, - 0xa0, 0x24, 0x57, 0x0c, 0x4a, 0x02, 0xa3, 0xc0, 0x3e, 0x18, 0x59, 0x7a, 0x00, 0x55, 0x07, 0x6a, - 0xdd, 0x19, 0x33, 0xb9, 0xce, 0xb8, 0x29, 0x29, 0x8f, 0x33, 0x25, 0xf5, 0x36, 0x54, 0xdf, 0x0b, - 0x9e, 0x2f, 0x9f, 0x34, 0x35, 0x4d, 0x65, 0x66, 0x9a, 0x67, 0x90, 0xbf, 0x6f, 0x53, 0x71, 0x35, - 0xfd, 0x09, 0x14, 0x82, 0xe8, 0x05, 0x5f, 0x04, 0x62, 0xc3, 0x2e, 0x67, 0x32, 0x31, 0x61, 0x77, - 0x51, 0x06, 0x22, 0xc4, 0xd0, 0x26, 0xd7, 0x4c, 0x1e, 0xe6, 0x3c, 0x5e, 0x13, 0x03, 0xfb, 0xfe, - 0x1d, 0x53, 0xfd, 0xb7, 0x02, 0x79, 0xbf, 0x82, 0x8b, 0x5e, 0x0d, 0x21, 0x4a, 0x65, 0x4e, 0xbd, - 0xcb, 0x57, 0x9c, 0x54, 0x61, 0xa3, 0x73, 0x4d, 0x5e, 0x7e, 0xae, 0x71, 0xe5, 0x74, 0xff, 0x83, - 0x48, 0xfa, 0xd2, 0x1f, 0x44, 0x5e, 0x06, 0x44, 0x6d, 0xaa, 0x0f, 0xb5, 0x33, 0x9b, 0x9a, 0xd6, - 0xa9, 0x26, 0xce, 0x8f, 0x60, 0xa2, 0x55, 0x3e, 0x72, 0x9f, 0x0f, 0x1c, 0x31, 0xb9, 0xfa, 0x07, - 0x05, 0xf2, 0x41, 0xae, 0xbf, 0x6c, 0x51, 0xf5, 0x2a, 0x64, 0x65, 0x3a, 0x13, 0x55, 0x55, 0xd9, - 0x0b, 0xea, 0xfb, 0xe9, 0x50, 0x7d, 0xbf, 0x0e, 0xf9, 0x11, 0xa1, 0x3a, 0x27, 0x3c, 0x02, 0xd8, - 0x83, 0x3e, 0x7a, 0x03, 0x6a, 0x71, 0x17, 0x7b, 0xce, 0xe9, 0xca, 0x8c, 0x59, 0x86, 0xe8, 0x1a, - 0x31, 0x04, 0x11, 0xba, 0xf5, 0x16, 0x14, 0x43, 0x85, 0x71, 0x06, 0xc6, 0x07, 0xed, 0xf7, 0xab, - 0x89, 0x7a, 0xee, 0xcb, 0xaf, 0x6f, 0xa4, 0x0e, 0xc8, 0x67, 0xa8, 0x06, 0x39, 0xdc, 0x6e, 0x75, - 0xda, 0xad, 0xbb, 0x55, 0xa5, 0x5e, 0xfc, 0xf2, 0xeb, 0x1b, 0x39, 0x4c, 0x78, 0x91, 0xee, 0x56, - 0x07, 0x4a, 0xe1, 0xed, 0x8c, 0xa6, 0x52, 0x04, 0x95, 0x77, 0xee, 0x1d, 0xed, 0xef, 0xb5, 0x1a, - 0xdd, 0xb6, 0x76, 0xff, 0xb0, 0xdb, 0xae, 0x2a, 0xe8, 0x49, 0xb8, 0xb2, 0xbf, 0xf7, 0xb3, 0x4e, - 0x57, 0x6b, 0xed, 0xef, 0xb5, 0x0f, 0xba, 0x5a, 0xa3, 0xdb, 0x6d, 0xb4, 0xee, 0x56, 0x93, 0xbb, - 0xbf, 0x2f, 0xc0, 0x5a, 0xa3, 0xd9, 0xda, 0x63, 0x34, 0xc0, 0xec, 0xeb, 0xbc, 0x7e, 0xd3, 0x82, - 0x34, 0xaf, 0xd0, 0x2c, 0xfc, 0xdc, 0x5e, 0x5f, 0x5c, 0xbe, 0x45, 0x77, 0x20, 0xc3, 0x8b, 0x37, - 0x68, 0xf1, 0xf7, 0xf7, 0xfa, 0x92, 0x7a, 0x2e, 0x9b, 0x0c, 0x3f, 0x57, 0x0b, 0x3f, 0xc8, 0xd7, - 0x17, 0x97, 0x77, 0x11, 0x86, 0xc2, 0xe4, 0xce, 0xb4, 0xfc, 0x03, 0x75, 0x7d, 0x85, 0xfc, 0x83, - 0xf6, 0x21, 0xe7, 0x5f, 0x73, 0x97, 0x7d, 0x32, 0xaf, 0x2f, 0xad, 0xbf, 0xb2, 0x70, 0x89, 0x72, - 0xc4, 0xe2, 0xef, 0xff, 0xf5, 0x25, 0xc5, 0x64, 0xb4, 0x07, 0x59, 0x49, 0xd0, 0x97, 0x7c, 0x06, - 0xaf, 0x2f, 0xab, 0xa7, 0xb2, 0xa0, 0x4d, 0x8a, 0x51, 0xcb, 0xff, 0x6a, 0xa8, 0xaf, 0x50, 0x27, - 0x47, 0xf7, 0x00, 0x42, 0xc5, 0x87, 0x15, 0x7e, 0x57, 0xa8, 0xaf, 0x52, 0xff, 0x46, 0x87, 0x90, - 0x0f, 0xee, 0x82, 0x4b, 0x7f, 0x1e, 0xa8, 0x2f, 0x2f, 0x44, 0xa3, 0x07, 0x50, 0x8e, 0x5e, 0x4e, - 0x56, 0xfb, 0x25, 0xa0, 0xbe, 0x62, 0x85, 0x99, 0xf9, 0x8f, 0xde, 0x54, 0x56, 0xfb, 0x45, 0xa0, - 0xbe, 0x62, 0xc1, 0x19, 0x7d, 0x02, 0xeb, 0xb3, 0x37, 0x89, 0xd5, 0xff, 0x18, 0xa8, 0x5f, 0xa2, - 0x04, 0x8d, 0x46, 0x80, 0xe6, 0xdc, 0x40, 0x2e, 0xf1, 0x03, 0x41, 0xfd, 0x32, 0x15, 0xe9, 0x66, - 0xfb, 0x9b, 0x1f, 0x36, 0x95, 0x6f, 0x7f, 0xd8, 0x54, 0xbe, 0xff, 0x61, 0x53, 0xf9, 0xea, 0xd1, - 0x66, 0xe2, 0xdb, 0x47, 0x9b, 0x89, 0x3f, 0x3f, 0xda, 0x4c, 0xfc, 0xe2, 0xa5, 0x53, 0x93, 0x0e, - 0xc6, 0xbd, 0xed, 0xbe, 0x3d, 0xda, 0x09, 0xff, 0x99, 0x34, 0xef, 0x6f, 0xa9, 0x5e, 0x96, 0x67, - 0xa3, 0xdb, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x1d, 0xb3, 0x11, 0x48, 0x4d, 0x25, 0x00, 0x00, +func (m *ResponseProcessProposal) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ABCIApplicationClient is the client API for ABCIApplication service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ABCIApplicationClient interface { - Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) - Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) - Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) - DeliverTx(ctx context.Context, in *RequestDeliverTx, opts ...grpc.CallOption) (*ResponseDeliverTx, error) - CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) - Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) - Commit(ctx context.Context, in *RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) - InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) - BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) - EndBlock(ctx context.Context, in *RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) - ListSnapshots(ctx context.Context, in *RequestListSnapshots, opts ...grpc.CallOption) (*ResponseListSnapshots, error) - OfferSnapshot(ctx context.Context, in *RequestOfferSnapshot, opts ...grpc.CallOption) (*ResponseOfferSnapshot, error) - LoadSnapshotChunk(ctx context.Context, in *RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*ResponseLoadSnapshotChunk, error) - ApplySnapshotChunk(ctx context.Context, in *RequestApplySnapshotChunk, opts ...grpc.CallOption) (*ResponseApplySnapshotChunk, error) +func (m *ResponseProcessProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseProcessProposal.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } - -type aBCIApplicationClient struct { - cc *grpc.ClientConn +func (m *ResponseProcessProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseProcessProposal.Merge(m, src) } - -func NewABCIApplicationClient(cc *grpc.ClientConn) ABCIApplicationClient { - return &aBCIApplicationClient{cc} +func (m *ResponseProcessProposal) XXX_Size() int { + return m.Size() } - -func (c *aBCIApplicationClient) Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) { - out := new(ResponseEcho) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/Echo", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (m *ResponseProcessProposal) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseProcessProposal.DiscardUnknown(m) } -func (c *aBCIApplicationClient) Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) { - out := new(ResponseFlush) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/Flush", in, out, opts...) - if err != nil { - return nil, err +var xxx_messageInfo_ResponseProcessProposal proto.InternalMessageInfo + +func (m *ResponseProcessProposal) GetStatus() ResponseProcessProposal_ProposalStatus { + if m != nil { + return m.Status } - return out, nil + return ResponseProcessProposal_UNKNOWN } -func (c *aBCIApplicationClient) Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) { - out := new(ResponseInfo) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/Info", in, out, opts...) - if err != nil { - return nil, err +func (m *ResponseProcessProposal) GetAppHash() []byte { + if m != nil { + return m.AppHash } - return out, nil + return nil } -func (c *aBCIApplicationClient) DeliverTx(ctx context.Context, in *RequestDeliverTx, opts ...grpc.CallOption) (*ResponseDeliverTx, error) { - out := new(ResponseDeliverTx) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/DeliverTx", in, out, opts...) - if err != nil { - return nil, err +func (m *ResponseProcessProposal) GetTxResults() []*ExecTxResult { + if m != nil { + return m.TxResults } - return out, nil + return nil } -func (c *aBCIApplicationClient) CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) { - out := new(ResponseCheckTx) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/CheckTx", in, out, opts...) - if err != nil { - return nil, err +func (m *ResponseProcessProposal) GetValidatorUpdates() []*ValidatorUpdate { + if m != nil { + return m.ValidatorUpdates } - return out, nil + return nil } -func (c *aBCIApplicationClient) Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) { - out := new(ResponseQuery) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/Query", in, out, opts...) - if err != nil { - return nil, err +func (m *ResponseProcessProposal) GetConsensusParamUpdates() *types1.ConsensusParams { + if m != nil { + return m.ConsensusParamUpdates } - return out, nil + return nil } -func (c *aBCIApplicationClient) Commit(ctx context.Context, in *RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) { - out := new(ResponseCommit) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/Commit", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +type ResponseExtendVote struct { + VoteExtension []byte `protobuf:"bytes,1,opt,name=vote_extension,json=voteExtension,proto3" json:"vote_extension,omitempty"` } -func (c *aBCIApplicationClient) InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) { - out := new(ResponseInitChain) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/InitChain", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (m *ResponseExtendVote) Reset() { *m = ResponseExtendVote{} } +func (m *ResponseExtendVote) String() string { return proto.CompactTextString(m) } +func (*ResponseExtendVote) ProtoMessage() {} +func (*ResponseExtendVote) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{38} } - -func (c *aBCIApplicationClient) BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) { - out := new(ResponseBeginBlock) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/BeginBlock", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (m *ResponseExtendVote) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (c *aBCIApplicationClient) EndBlock(ctx context.Context, in *RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) { - out := new(ResponseEndBlock) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/EndBlock", in, out, opts...) - if err != nil { - return nil, err +func (m *ResponseExtendVote) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseExtendVote.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return out, nil } - -func (c *aBCIApplicationClient) ListSnapshots(ctx context.Context, in *RequestListSnapshots, opts ...grpc.CallOption) (*ResponseListSnapshots, error) { - out := new(ResponseListSnapshots) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/ListSnapshots", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (m *ResponseExtendVote) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseExtendVote.Merge(m, src) } - -func (c *aBCIApplicationClient) OfferSnapshot(ctx context.Context, in *RequestOfferSnapshot, opts ...grpc.CallOption) (*ResponseOfferSnapshot, error) { - out := new(ResponseOfferSnapshot) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/OfferSnapshot", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (m *ResponseExtendVote) XXX_Size() int { + return m.Size() } - -func (c *aBCIApplicationClient) LoadSnapshotChunk(ctx context.Context, in *RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*ResponseLoadSnapshotChunk, error) { - out := new(ResponseLoadSnapshotChunk) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/LoadSnapshotChunk", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (m *ResponseExtendVote) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseExtendVote.DiscardUnknown(m) } -func (c *aBCIApplicationClient) ApplySnapshotChunk(ctx context.Context, in *RequestApplySnapshotChunk, opts ...grpc.CallOption) (*ResponseApplySnapshotChunk, error) { - out := new(ResponseApplySnapshotChunk) - err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/ApplySnapshotChunk", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} +var xxx_messageInfo_ResponseExtendVote proto.InternalMessageInfo -// ABCIApplicationServer is the server API for ABCIApplication service. -type ABCIApplicationServer interface { - Echo(context.Context, *RequestEcho) (*ResponseEcho, error) - Flush(context.Context, *RequestFlush) (*ResponseFlush, error) - Info(context.Context, *RequestInfo) (*ResponseInfo, error) - DeliverTx(context.Context, *RequestDeliverTx) (*ResponseDeliverTx, error) - CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) - Query(context.Context, *RequestQuery) (*ResponseQuery, error) - Commit(context.Context, *RequestCommit) (*ResponseCommit, error) - InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) - BeginBlock(context.Context, *RequestBeginBlock) (*ResponseBeginBlock, error) - EndBlock(context.Context, *RequestEndBlock) (*ResponseEndBlock, error) - ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) - OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) - LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) - ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) +func (m *ResponseExtendVote) GetVoteExtension() []byte { + if m != nil { + return m.VoteExtension + } + return nil } -// UnimplementedABCIApplicationServer can be embedded to have forward compatible implementations. -type UnimplementedABCIApplicationServer struct { +type ResponseVerifyVoteExtension struct { + Status ResponseVerifyVoteExtension_VerifyStatus `protobuf:"varint,1,opt,name=status,proto3,enum=tendermint.abci.ResponseVerifyVoteExtension_VerifyStatus" json:"status,omitempty"` } -func (*UnimplementedABCIApplicationServer) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) { - return nil, status.Errorf(codes.Unimplemented, "method Echo not implemented") +func (m *ResponseVerifyVoteExtension) Reset() { *m = ResponseVerifyVoteExtension{} } +func (m *ResponseVerifyVoteExtension) String() string { return proto.CompactTextString(m) } +func (*ResponseVerifyVoteExtension) ProtoMessage() {} +func (*ResponseVerifyVoteExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{39} } -func (*UnimplementedABCIApplicationServer) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) { - return nil, status.Errorf(codes.Unimplemented, "method Flush not implemented") +func (m *ResponseVerifyVoteExtension) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (*UnimplementedABCIApplicationServer) Info(ctx context.Context, req *RequestInfo) (*ResponseInfo, error) { - return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") +func (m *ResponseVerifyVoteExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseVerifyVoteExtension.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } -func (*UnimplementedABCIApplicationServer) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeliverTx not implemented") +func (m *ResponseVerifyVoteExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseVerifyVoteExtension.Merge(m, src) } -func (*UnimplementedABCIApplicationServer) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) { - return nil, status.Errorf(codes.Unimplemented, "method CheckTx not implemented") +func (m *ResponseVerifyVoteExtension) XXX_Size() int { + return m.Size() } -func (*UnimplementedABCIApplicationServer) Query(ctx context.Context, req *RequestQuery) (*ResponseQuery, error) { - return nil, status.Errorf(codes.Unimplemented, "method Query not implemented") +func (m *ResponseVerifyVoteExtension) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseVerifyVoteExtension.DiscardUnknown(m) } -func (*UnimplementedABCIApplicationServer) Commit(ctx context.Context, req *RequestCommit) (*ResponseCommit, error) { - return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") + +var xxx_messageInfo_ResponseVerifyVoteExtension proto.InternalMessageInfo + +func (m *ResponseVerifyVoteExtension) GetStatus() ResponseVerifyVoteExtension_VerifyStatus { + if m != nil { + return m.Status + } + return ResponseVerifyVoteExtension_UNKNOWN } -func (*UnimplementedABCIApplicationServer) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) { - return nil, status.Errorf(codes.Unimplemented, "method InitChain not implemented") + +type ResponseFinalizeBlock struct { + Events []Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + TxResults []*ExecTxResult `protobuf:"bytes,2,rep,name=tx_results,json=txResults,proto3" json:"tx_results,omitempty"` + ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,4,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` + AppHash []byte `protobuf:"bytes,5,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + RetainHeight int64 `protobuf:"varint,6,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` + NextCoreChainLockUpdate *types1.CoreChainLock `protobuf:"bytes,100,opt,name=next_core_chain_lock_update,json=nextCoreChainLockUpdate,proto3" json:"next_core_chain_lock_update,omitempty"` + ValidatorSetUpdate *ValidatorSetUpdate `protobuf:"bytes,101,opt,name=validator_set_update,json=validatorSetUpdate,proto3" json:"validator_set_update,omitempty"` } -func (*UnimplementedABCIApplicationServer) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) { - return nil, status.Errorf(codes.Unimplemented, "method BeginBlock not implemented") + +func (m *ResponseFinalizeBlock) Reset() { *m = ResponseFinalizeBlock{} } +func (m *ResponseFinalizeBlock) String() string { return proto.CompactTextString(m) } +func (*ResponseFinalizeBlock) ProtoMessage() {} +func (*ResponseFinalizeBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{40} } -func (*UnimplementedABCIApplicationServer) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) { - return nil, status.Errorf(codes.Unimplemented, "method EndBlock not implemented") +func (m *ResponseFinalizeBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } -func (*UnimplementedABCIApplicationServer) ListSnapshots(ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") +func (m *ResponseFinalizeBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseFinalizeBlock.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } } -func (*UnimplementedABCIApplicationServer) OfferSnapshot(ctx context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { - return nil, status.Errorf(codes.Unimplemented, "method OfferSnapshot not implemented") +func (m *ResponseFinalizeBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseFinalizeBlock.Merge(m, src) } -func (*UnimplementedABCIApplicationServer) LoadSnapshotChunk(ctx context.Context, req *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) { - return nil, status.Errorf(codes.Unimplemented, "method LoadSnapshotChunk not implemented") +func (m *ResponseFinalizeBlock) XXX_Size() int { + return m.Size() } -func (*UnimplementedABCIApplicationServer) ApplySnapshotChunk(ctx context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { - return nil, status.Errorf(codes.Unimplemented, "method ApplySnapshotChunk not implemented") +func (m *ResponseFinalizeBlock) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseFinalizeBlock.DiscardUnknown(m) } -func RegisterABCIApplicationServer(s *grpc.Server, srv ABCIApplicationServer) { - s.RegisterService(&_ABCIApplication_serviceDesc, srv) -} +var xxx_messageInfo_ResponseFinalizeBlock proto.InternalMessageInfo -func _ABCIApplication_Echo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestEcho) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).Echo(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/Echo", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).Echo(ctx, req.(*RequestEcho)) +func (m *ResponseFinalizeBlock) GetEvents() []Event { + if m != nil { + return m.Events } - return interceptor(ctx, in, info, handler) + return nil } -func _ABCIApplication_Flush_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestFlush) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).Flush(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/Flush", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).Flush(ctx, req.(*RequestFlush)) +func (m *ResponseFinalizeBlock) GetTxResults() []*ExecTxResult { + if m != nil { + return m.TxResults } - return interceptor(ctx, in, info, handler) + return nil } -func _ABCIApplication_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestInfo) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).Info(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/Info", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).Info(ctx, req.(*RequestInfo)) +func (m *ResponseFinalizeBlock) GetConsensusParamUpdates() *types1.ConsensusParams { + if m != nil { + return m.ConsensusParamUpdates } - return interceptor(ctx, in, info, handler) + return nil } -func _ABCIApplication_DeliverTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestDeliverTx) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).DeliverTx(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/DeliverTx", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).DeliverTx(ctx, req.(*RequestDeliverTx)) +func (m *ResponseFinalizeBlock) GetAppHash() []byte { + if m != nil { + return m.AppHash } - return interceptor(ctx, in, info, handler) + return nil } -func _ABCIApplication_CheckTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestCheckTx) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).CheckTx(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/CheckTx", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).CheckTx(ctx, req.(*RequestCheckTx)) +func (m *ResponseFinalizeBlock) GetRetainHeight() int64 { + if m != nil { + return m.RetainHeight } - return interceptor(ctx, in, info, handler) + return 0 } -func _ABCIApplication_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestQuery) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).Query(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/Query", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).Query(ctx, req.(*RequestQuery)) +func (m *ResponseFinalizeBlock) GetNextCoreChainLockUpdate() *types1.CoreChainLock { + if m != nil { + return m.NextCoreChainLockUpdate } - return interceptor(ctx, in, info, handler) + return nil } -func _ABCIApplication_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestCommit) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).Commit(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/Commit", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).Commit(ctx, req.(*RequestCommit)) +func (m *ResponseFinalizeBlock) GetValidatorSetUpdate() *ValidatorSetUpdate { + if m != nil { + return m.ValidatorSetUpdate } - return interceptor(ctx, in, info, handler) + return nil } -func _ABCIApplication_InitChain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestInitChain) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).InitChain(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/InitChain", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).InitChain(ctx, req.(*RequestInitChain)) - } - return interceptor(ctx, in, info, handler) +type CommitInfo struct { + Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` + QuorumHash []byte `protobuf:"bytes,3,opt,name=quorum_hash,json=quorumHash,proto3" json:"quorum_hash,omitempty"` + BlockSignature []byte `protobuf:"bytes,4,opt,name=block_signature,json=blockSignature,proto3" json:"block_signature,omitempty"` + StateSignature []byte `protobuf:"bytes,5,opt,name=state_signature,json=stateSignature,proto3" json:"state_signature,omitempty"` } -func _ABCIApplication_BeginBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestBeginBlock) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).BeginBlock(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/BeginBlock", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).BeginBlock(ctx, req.(*RequestBeginBlock)) - } - return interceptor(ctx, in, info, handler) +func (m *CommitInfo) Reset() { *m = CommitInfo{} } +func (m *CommitInfo) String() string { return proto.CompactTextString(m) } +func (*CommitInfo) ProtoMessage() {} +func (*CommitInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{41} } - -func _ABCIApplication_EndBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestEndBlock) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).EndBlock(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/EndBlock", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).EndBlock(ctx, req.(*RequestEndBlock)) +func (m *CommitInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CommitInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return interceptor(ctx, in, info, handler) +} +func (m *CommitInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitInfo.Merge(m, src) +} +func (m *CommitInfo) XXX_Size() int { + return m.Size() +} +func (m *CommitInfo) XXX_DiscardUnknown() { + xxx_messageInfo_CommitInfo.DiscardUnknown(m) } -func _ABCIApplication_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestListSnapshots) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).ListSnapshots(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/ListSnapshots", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).ListSnapshots(ctx, req.(*RequestListSnapshots)) +var xxx_messageInfo_CommitInfo proto.InternalMessageInfo + +func (m *CommitInfo) GetRound() int32 { + if m != nil { + return m.Round } - return interceptor(ctx, in, info, handler) + return 0 } -func _ABCIApplication_OfferSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestOfferSnapshot) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).OfferSnapshot(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/OfferSnapshot", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).OfferSnapshot(ctx, req.(*RequestOfferSnapshot)) +func (m *CommitInfo) GetQuorumHash() []byte { + if m != nil { + return m.QuorumHash } - return interceptor(ctx, in, info, handler) + return nil } -func _ABCIApplication_LoadSnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestLoadSnapshotChunk) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).LoadSnapshotChunk(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/LoadSnapshotChunk", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).LoadSnapshotChunk(ctx, req.(*RequestLoadSnapshotChunk)) +func (m *CommitInfo) GetBlockSignature() []byte { + if m != nil { + return m.BlockSignature } - return interceptor(ctx, in, info, handler) + return nil } -func _ABCIApplication_ApplySnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestApplySnapshotChunk) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ABCIApplicationServer).ApplySnapshotChunk(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.abci.ABCIApplication/ApplySnapshotChunk", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ABCIApplicationServer).ApplySnapshotChunk(ctx, req.(*RequestApplySnapshotChunk)) +func (m *CommitInfo) GetStateSignature() []byte { + if m != nil { + return m.StateSignature } - return interceptor(ctx, in, info, handler) + return nil } -var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tendermint.abci.ABCIApplication", - HandlerType: (*ABCIApplicationServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Echo", - Handler: _ABCIApplication_Echo_Handler, - }, - { - MethodName: "Flush", - Handler: _ABCIApplication_Flush_Handler, - }, - { - MethodName: "Info", - Handler: _ABCIApplication_Info_Handler, - }, - { - MethodName: "DeliverTx", - Handler: _ABCIApplication_DeliverTx_Handler, - }, - { - MethodName: "CheckTx", - Handler: _ABCIApplication_CheckTx_Handler, - }, - { - MethodName: "Query", - Handler: _ABCIApplication_Query_Handler, - }, - { - MethodName: "Commit", - Handler: _ABCIApplication_Commit_Handler, - }, - { - MethodName: "InitChain", - Handler: _ABCIApplication_InitChain_Handler, - }, - { - MethodName: "BeginBlock", - Handler: _ABCIApplication_BeginBlock_Handler, - }, - { - MethodName: "EndBlock", - Handler: _ABCIApplication_EndBlock_Handler, - }, - { - MethodName: "ListSnapshots", - Handler: _ABCIApplication_ListSnapshots_Handler, - }, - { - MethodName: "OfferSnapshot", - Handler: _ABCIApplication_OfferSnapshot_Handler, - }, - { - MethodName: "LoadSnapshotChunk", - Handler: _ABCIApplication_LoadSnapshotChunk_Handler, - }, - { - MethodName: "ApplySnapshotChunk", - Handler: _ABCIApplication_ApplySnapshotChunk_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "tendermint/abci/types.proto", +// ExtendedCommitInfo is similar to CommitInfo except that it is only used in +// the PrepareProposal request such that Tendermint can provide vote extensions +// to the application. +type ExtendedCommitInfo struct { + // The round at which the block proposer decided in the previous height. + Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` + // List of validators' addresses in the last validator set with their voting + // information, including vote extensions. + Votes []ExtendedVoteInfo `protobuf:"bytes,2,rep,name=votes,proto3" json:"votes"` } -func (m *Request) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *ExtendedCommitInfo) Reset() { *m = ExtendedCommitInfo{} } +func (m *ExtendedCommitInfo) String() string { return proto.CompactTextString(m) } +func (*ExtendedCommitInfo) ProtoMessage() {} +func (*ExtendedCommitInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{42} } - -func (m *Request) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ExtendedCommitInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Value != nil { - { - size := m.Value.Size() - i -= size - if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } +func (m *ExtendedCommitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExtendedCommitInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } + return b[:n], nil } - return len(dAtA) - i, nil +} +func (m *ExtendedCommitInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtendedCommitInfo.Merge(m, src) +} +func (m *ExtendedCommitInfo) XXX_Size() int { + return m.Size() +} +func (m *ExtendedCommitInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ExtendedCommitInfo.DiscardUnknown(m) } -func (m *Request_Echo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +var xxx_messageInfo_ExtendedCommitInfo proto.InternalMessageInfo + +func (m *ExtendedCommitInfo) GetRound() int32 { + if m != nil { + return m.Round + } + return 0 } -func (m *Request_Echo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Echo != nil { - { - size, err := m.Echo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa +func (m *ExtendedCommitInfo) GetVotes() []ExtendedVoteInfo { + if m != nil { + return m.Votes } - return len(dAtA) - i, nil + return nil } -func (m *Request_Flush) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + +// Event allows application developers to attach additional information to +// ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. +// Later, transactions may be queried using these events. +type Event struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Attributes []EventAttribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty"` } -func (m *Request_Flush) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Flush != nil { - { - size, err := m.Flush.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) +func (m *Event) Reset() { *m = Event{} } +func (m *Event) String() string { return proto.CompactTextString(m) } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{43} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Event.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x12 + return b[:n], nil } - return len(dAtA) - i, nil } -func (m *Request_Info) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) } -func (m *Request_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Info != nil { - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *Event) GetType() string { + if m != nil { + return m.Type } - return len(dAtA) - i, nil + return "" } -func (m *Request_InitChain) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + +func (m *Event) GetAttributes() []EventAttribute { + if m != nil { + return m.Attributes + } + return nil } -func (m *Request_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.InitChain != nil { - { - size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) +// EventAttribute is a single key-value pair, associated with an event. +type EventAttribute struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Index bool `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"` +} + +func (m *EventAttribute) Reset() { *m = EventAttribute{} } +func (m *EventAttribute) String() string { return proto.CompactTextString(m) } +func (*EventAttribute) ProtoMessage() {} +func (*EventAttribute) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{44} +} +func (m *EventAttribute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventAttribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventAttribute.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x22 + return b[:n], nil } - return len(dAtA) - i, nil } -func (m *Request_Query) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *EventAttribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventAttribute.Merge(m, src) +} +func (m *EventAttribute) XXX_Size() int { + return m.Size() +} +func (m *EventAttribute) XXX_DiscardUnknown() { + xxx_messageInfo_EventAttribute.DiscardUnknown(m) } -func (m *Request_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Query != nil { - { - size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a +var xxx_messageInfo_EventAttribute proto.InternalMessageInfo + +func (m *EventAttribute) GetKey() string { + if m != nil { + return m.Key } - return len(dAtA) - i, nil + return "" } -func (m *Request_BeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + +func (m *EventAttribute) GetValue() string { + if m != nil { + return m.Value + } + return "" } -func (m *Request_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.BeginBlock != nil { - { - size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 +func (m *EventAttribute) GetIndex() bool { + if m != nil { + return m.Index } - return len(dAtA) - i, nil + return false } -func (m *Request_CheckTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + +// ExecTxResult contains results of executing one individual transaction. +// +// * Its structure is equivalent to #ResponseDeliverTx which will be deprecated/deleted +type ExecTxResult struct { + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` + Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` } -func (m *Request_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.CheckTx != nil { - { - size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) +func (m *ExecTxResult) Reset() { *m = ExecTxResult{} } +func (m *ExecTxResult) String() string { return proto.CompactTextString(m) } +func (*ExecTxResult) ProtoMessage() {} +func (*ExecTxResult) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{45} +} +func (m *ExecTxResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecTxResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExecTxResult.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x3a + return b[:n], nil } - return len(dAtA) - i, nil } -func (m *Request_DeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ExecTxResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecTxResult.Merge(m, src) +} +func (m *ExecTxResult) XXX_Size() int { + return m.Size() +} +func (m *ExecTxResult) XXX_DiscardUnknown() { + xxx_messageInfo_ExecTxResult.DiscardUnknown(m) } -func (m *Request_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DeliverTx != nil { - { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 +var xxx_messageInfo_ExecTxResult proto.InternalMessageInfo + +func (m *ExecTxResult) GetCode() uint32 { + if m != nil { + return m.Code } - return len(dAtA) - i, nil + return 0 } -func (m *Request_EndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + +func (m *ExecTxResult) GetData() []byte { + if m != nil { + return m.Data + } + return nil } -func (m *Request_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.EndBlock != nil { - { - size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a +func (m *ExecTxResult) GetLog() string { + if m != nil { + return m.Log } - return len(dAtA) - i, nil + return "" } -func (m *Request_Commit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + +func (m *ExecTxResult) GetInfo() string { + if m != nil { + return m.Info + } + return "" } -func (m *Request_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Commit != nil { - { - size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 +func (m *ExecTxResult) GetGasWanted() int64 { + if m != nil { + return m.GasWanted } - return len(dAtA) - i, nil -} -func (m *Request_ListSnapshots) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return 0 } -func (m *Request_ListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ListSnapshots != nil { - { - size, err := m.ListSnapshots.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a +func (m *ExecTxResult) GetGasUsed() int64 { + if m != nil { + return m.GasUsed } - return len(dAtA) - i, nil -} -func (m *Request_OfferSnapshot) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return 0 } -func (m *Request_OfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.OfferSnapshot != nil { - { - size, err := m.OfferSnapshot.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 +func (m *ExecTxResult) GetEvents() []Event { + if m != nil { + return m.Events } - return len(dAtA) - i, nil -} -func (m *Request_LoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return nil } -func (m *Request_LoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.LoadSnapshotChunk != nil { - { - size, err := m.LoadSnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a +func (m *ExecTxResult) GetCodespace() string { + if m != nil { + return m.Codespace } - return len(dAtA) - i, nil + return "" } -func (m *Request_ApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + +// TxResult contains results of executing the transaction. +// +// One usage is indexing transaction results. +type TxResult struct { + Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + Tx []byte `protobuf:"bytes,3,opt,name=tx,proto3" json:"tx,omitempty"` + Result ExecTxResult `protobuf:"bytes,4,opt,name=result,proto3" json:"result"` } -func (m *Request_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ApplySnapshotChunk != nil { - { - size, err := m.ApplySnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) +func (m *TxResult) Reset() { *m = TxResult{} } +func (m *TxResult) String() string { return proto.CompactTextString(m) } +func (*TxResult) ProtoMessage() {} +func (*TxResult) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{46} +} +func (m *TxResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TxResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TxResult.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x72 + return b[:n], nil } - return len(dAtA) - i, nil } -func (m *RequestEcho) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *TxResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_TxResult.Merge(m, src) } - -func (m *RequestEcho) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *TxResult) XXX_Size() int { + return m.Size() } - -func (m *RequestEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Message) > 0 { - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil +func (m *TxResult) XXX_DiscardUnknown() { + xxx_messageInfo_TxResult.DiscardUnknown(m) } -func (m *RequestFlush) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +var xxx_messageInfo_TxResult proto.InternalMessageInfo + +func (m *TxResult) GetHeight() int64 { + if m != nil { + return m.Height } - return dAtA[:n], nil + return 0 } -func (m *RequestFlush) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *TxResult) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 } -func (m *RequestFlush) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +func (m *TxResult) GetTx() []byte { + if m != nil { + return m.Tx + } + return nil } -func (m *RequestInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *TxResult) GetResult() ExecTxResult { + if m != nil { + return m.Result } - return dAtA[:n], nil + return ExecTxResult{} } -func (m *RequestInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type TxRecord struct { + Action TxRecord_TxAction `protobuf:"varint,1,opt,name=action,proto3,enum=tendermint.abci.TxRecord_TxAction" json:"action,omitempty"` + Tx []byte `protobuf:"bytes,2,opt,name=tx,proto3" json:"tx,omitempty"` } -func (m *RequestInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.AbciVersion) > 0 { - i -= len(m.AbciVersion) - copy(dAtA[i:], m.AbciVersion) - i = encodeVarintTypes(dAtA, i, uint64(len(m.AbciVersion))) - i-- - dAtA[i] = 0x22 - } - if m.P2PVersion != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.P2PVersion)) - i-- - dAtA[i] = 0x18 - } - if m.BlockVersion != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.BlockVersion)) - i-- - dAtA[i] = 0x10 - } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0xa +func (m *TxRecord) Reset() { *m = TxRecord{} } +func (m *TxRecord) String() string { return proto.CompactTextString(m) } +func (*TxRecord) ProtoMessage() {} +func (*TxRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{47} +} +func (m *TxRecord) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TxRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TxRecord.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return len(dAtA) - i, nil +} +func (m *TxRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_TxRecord.Merge(m, src) +} +func (m *TxRecord) XXX_Size() int { + return m.Size() +} +func (m *TxRecord) XXX_DiscardUnknown() { + xxx_messageInfo_TxRecord.DiscardUnknown(m) } -func (m *RequestInitChain) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +var xxx_messageInfo_TxRecord proto.InternalMessageInfo + +func (m *TxRecord) GetAction() TxRecord_TxAction { + if m != nil { + return m.Action } - return dAtA[:n], nil + return TxRecord_UNKNOWN } -func (m *RequestInitChain) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *TxRecord) GetTx() []byte { + if m != nil { + return m.Tx + } + return nil } -func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.InitialCoreHeight != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.InitialCoreHeight)) - i-- - dAtA[i] = 0x38 - } - if m.InitialHeight != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.InitialHeight)) - i-- - dAtA[i] = 0x30 - } - if len(m.AppStateBytes) > 0 { - i -= len(m.AppStateBytes) - copy(dAtA[i:], m.AppStateBytes) - i = encodeVarintTypes(dAtA, i, uint64(len(m.AppStateBytes))) - i-- - dAtA[i] = 0x2a - } - if m.ValidatorSet != nil { - { - size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.ConsensusParams != nil { - { - size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.ChainId) > 0 { - i -= len(m.ChainId) - copy(dAtA[i:], m.ChainId) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) - i-- - dAtA[i] = 0x12 - } - n17, err17 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err17 != nil { - return 0, err17 - } - i -= n17 - i = encodeVarintTypes(dAtA, i, uint64(n17)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil +// Validator +type Validator struct { + // bytes address = 1; // The first 20 bytes of SHA256(public key) + // PubKey pub_key = 2 [(gogoproto.nullable)=false]; + Power int64 `protobuf:"varint,3,opt,name=power,proto3" json:"power,omitempty"` + ProTxHash []byte `protobuf:"bytes,4,opt,name=pro_tx_hash,json=proTxHash,proto3" json:"pro_tx_hash,omitempty"` } -func (m *RequestQuery) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *Validator) Reset() { *m = Validator{} } +func (m *Validator) String() string { return proto.CompactTextString(m) } +func (*Validator) ProtoMessage() {} +func (*Validator) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{48} } - -func (m *RequestQuery) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Validator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (m *RequestQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Prove { - i-- - if m.Prove { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +func (m *Validator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Validator.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x20 - } - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x18 - } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x12 + return b[:n], nil } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0xa +} +func (m *Validator) XXX_Merge(src proto.Message) { + xxx_messageInfo_Validator.Merge(m, src) +} +func (m *Validator) XXX_Size() int { + return m.Size() +} +func (m *Validator) XXX_DiscardUnknown() { + xxx_messageInfo_Validator.DiscardUnknown(m) +} + +var xxx_messageInfo_Validator proto.InternalMessageInfo + +func (m *Validator) GetPower() int64 { + if m != nil { + return m.Power } - return len(dAtA) - i, nil + return 0 } -func (m *RequestBeginBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *Validator) GetProTxHash() []byte { + if m != nil { + return m.ProTxHash } - return dAtA[:n], nil + return nil } -func (m *RequestBeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// ValidatorUpdate +type ValidatorUpdate struct { + PubKey *crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key,omitempty"` + Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"` + ProTxHash []byte `protobuf:"bytes,3,opt,name=pro_tx_hash,json=proTxHash,proto3" json:"pro_tx_hash,omitempty"` + NodeAddress string `protobuf:"bytes,4,opt,name=node_address,json=nodeAddress,proto3" json:"node_address,omitempty"` } -func (m *RequestBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ByzantineValidators) > 0 { - for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - { - size, err := m.LastCommitInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) +func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } +func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } +func (*ValidatorUpdate) ProtoMessage() {} +func (*ValidatorUpdate) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{49} +} +func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorUpdate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) if err != nil { - return 0, err + return nil, err } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + return b[:n], nil } - i-- - dAtA[i] = 0x12 - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0xa +} +func (m *ValidatorUpdate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorUpdate.Merge(m, src) +} +func (m *ValidatorUpdate) XXX_Size() int { + return m.Size() +} +func (m *ValidatorUpdate) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorUpdate.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorUpdate proto.InternalMessageInfo + +func (m *ValidatorUpdate) GetPubKey() *crypto.PublicKey { + if m != nil { + return m.PubKey } - return len(dAtA) - i, nil + return nil } -func (m *RequestCheckTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *ValidatorUpdate) GetPower() int64 { + if m != nil { + return m.Power } - return dAtA[:n], nil + return 0 } -func (m *RequestCheckTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ValidatorUpdate) GetProTxHash() []byte { + if m != nil { + return m.ProTxHash + } + return nil } -func (m *RequestCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Type != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x10 - } - if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) - i-- - dAtA[i] = 0xa +func (m *ValidatorUpdate) GetNodeAddress() string { + if m != nil { + return m.NodeAddress } - return len(dAtA) - i, nil + return "" } -func (m *RequestDeliverTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +type ValidatorSetUpdate struct { + ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` + ThresholdPublicKey crypto.PublicKey `protobuf:"bytes,2,opt,name=threshold_public_key,json=thresholdPublicKey,proto3" json:"threshold_public_key"` + QuorumHash []byte `protobuf:"bytes,3,opt,name=quorum_hash,json=quorumHash,proto3" json:"quorum_hash,omitempty"` } -func (m *RequestDeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ValidatorSetUpdate) Reset() { *m = ValidatorSetUpdate{} } +func (m *ValidatorSetUpdate) String() string { return proto.CompactTextString(m) } +func (*ValidatorSetUpdate) ProtoMessage() {} +func (*ValidatorSetUpdate) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{50} } - -func (m *RequestDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil +func (m *ValidatorSetUpdate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (m *RequestEndBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *ValidatorSetUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorSetUpdate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return dAtA[:n], nil } - -func (m *RequestEndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ValidatorSetUpdate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorSetUpdate.Merge(m, src) +} +func (m *ValidatorSetUpdate) XXX_Size() int { + return m.Size() +} +func (m *ValidatorSetUpdate) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorSetUpdate.DiscardUnknown(m) } -func (m *RequestEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x8 +var xxx_messageInfo_ValidatorSetUpdate proto.InternalMessageInfo + +func (m *ValidatorSetUpdate) GetValidatorUpdates() []ValidatorUpdate { + if m != nil { + return m.ValidatorUpdates } - return len(dAtA) - i, nil + return nil } -func (m *RequestCommit) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *ValidatorSetUpdate) GetThresholdPublicKey() crypto.PublicKey { + if m != nil { + return m.ThresholdPublicKey } - return dAtA[:n], nil + return crypto.PublicKey{} } -func (m *RequestCommit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ValidatorSetUpdate) GetQuorumHash() []byte { + if m != nil { + return m.QuorumHash + } + return nil } -func (m *RequestCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +type ThresholdPublicKeyUpdate struct { + ThresholdPublicKey crypto.PublicKey `protobuf:"bytes,1,opt,name=threshold_public_key,json=thresholdPublicKey,proto3" json:"threshold_public_key"` } -func (m *RequestListSnapshots) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *ThresholdPublicKeyUpdate) Reset() { *m = ThresholdPublicKeyUpdate{} } +func (m *ThresholdPublicKeyUpdate) String() string { return proto.CompactTextString(m) } +func (*ThresholdPublicKeyUpdate) ProtoMessage() {} +func (*ThresholdPublicKeyUpdate) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{51} +} +func (m *ThresholdPublicKeyUpdate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ThresholdPublicKeyUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ThresholdPublicKeyUpdate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return dAtA[:n], nil } - -func (m *RequestListSnapshots) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ThresholdPublicKeyUpdate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ThresholdPublicKeyUpdate.Merge(m, src) } - -func (m *RequestListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +func (m *ThresholdPublicKeyUpdate) XXX_Size() int { + return m.Size() +} +func (m *ThresholdPublicKeyUpdate) XXX_DiscardUnknown() { + xxx_messageInfo_ThresholdPublicKeyUpdate.DiscardUnknown(m) } -func (m *RequestOfferSnapshot) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +var xxx_messageInfo_ThresholdPublicKeyUpdate proto.InternalMessageInfo + +func (m *ThresholdPublicKeyUpdate) GetThresholdPublicKey() crypto.PublicKey { + if m != nil { + return m.ThresholdPublicKey } - return dAtA[:n], nil + return crypto.PublicKey{} } -func (m *RequestOfferSnapshot) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type QuorumHashUpdate struct { + QuorumHash []byte `protobuf:"bytes,1,opt,name=quorum_hash,json=quorumHash,proto3" json:"quorum_hash,omitempty"` } -func (m *RequestOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.AppHash) > 0 { - i -= len(m.AppHash) - copy(dAtA[i:], m.AppHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) - i-- - dAtA[i] = 0x12 - } - if m.Snapshot != nil { - { - size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) +func (m *QuorumHashUpdate) Reset() { *m = QuorumHashUpdate{} } +func (m *QuorumHashUpdate) String() string { return proto.CompactTextString(m) } +func (*QuorumHashUpdate) ProtoMessage() {} +func (*QuorumHashUpdate) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{52} +} +func (m *QuorumHashUpdate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuorumHashUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QuorumHashUpdate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0xa + return b[:n], nil } - return len(dAtA) - i, nil +} +func (m *QuorumHashUpdate) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuorumHashUpdate.Merge(m, src) +} +func (m *QuorumHashUpdate) XXX_Size() int { + return m.Size() +} +func (m *QuorumHashUpdate) XXX_DiscardUnknown() { + xxx_messageInfo_QuorumHashUpdate.DiscardUnknown(m) } -func (m *RequestLoadSnapshotChunk) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +var xxx_messageInfo_QuorumHashUpdate proto.InternalMessageInfo + +func (m *QuorumHashUpdate) GetQuorumHash() []byte { + if m != nil { + return m.QuorumHash } - return dAtA[:n], nil + return nil } -func (m *RequestLoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// VoteInfo +type VoteInfo struct { + Validator Validator `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator"` + SignedLastBlock bool `protobuf:"varint,2,opt,name=signed_last_block,json=signedLastBlock,proto3" json:"signed_last_block,omitempty"` } -func (m *RequestLoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Chunk != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Chunk)) - i-- - dAtA[i] = 0x18 - } - if m.Format != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Format)) - i-- - dAtA[i] = 0x10 +func (m *VoteInfo) Reset() { *m = VoteInfo{} } +func (m *VoteInfo) String() string { return proto.CompactTextString(m) } +func (*VoteInfo) ProtoMessage() {} +func (*VoteInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{53} +} +func (m *VoteInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VoteInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VoteInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x8 +} +func (m *VoteInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoteInfo.Merge(m, src) +} +func (m *VoteInfo) XXX_Size() int { + return m.Size() +} +func (m *VoteInfo) XXX_DiscardUnknown() { + xxx_messageInfo_VoteInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_VoteInfo proto.InternalMessageInfo + +func (m *VoteInfo) GetValidator() Validator { + if m != nil { + return m.Validator } - return len(dAtA) - i, nil + return Validator{} } -func (m *RequestApplySnapshotChunk) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *VoteInfo) GetSignedLastBlock() bool { + if m != nil { + return m.SignedLastBlock } - return dAtA[:n], nil + return false } -func (m *RequestApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// ExtendedVoteInfo +type ExtendedVoteInfo struct { + // The validator that sent the vote. + Validator Validator `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator"` + // Indicates whether the validator signed the last block, allowing for rewards based on validator availability. + SignedLastBlock bool `protobuf:"varint,2,opt,name=signed_last_block,json=signedLastBlock,proto3" json:"signed_last_block,omitempty"` + // Non-deterministic extension provided by the sending validator's application. + VoteExtension []byte `protobuf:"bytes,3,opt,name=vote_extension,json=voteExtension,proto3" json:"vote_extension,omitempty"` } -func (m *RequestApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Sender) > 0 { - i -= len(m.Sender) - copy(dAtA[i:], m.Sender) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Sender))) - i-- - dAtA[i] = 0x1a - } - if len(m.Chunk) > 0 { - i -= len(m.Chunk) - copy(dAtA[i:], m.Chunk) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Chunk))) - i-- - dAtA[i] = 0x12 - } - if m.Index != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x8 +func (m *ExtendedVoteInfo) Reset() { *m = ExtendedVoteInfo{} } +func (m *ExtendedVoteInfo) String() string { return proto.CompactTextString(m) } +func (*ExtendedVoteInfo) ProtoMessage() {} +func (*ExtendedVoteInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{54} +} +func (m *ExtendedVoteInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtendedVoteInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ExtendedVoteInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil } - return len(dAtA) - i, nil +} +func (m *ExtendedVoteInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtendedVoteInfo.Merge(m, src) +} +func (m *ExtendedVoteInfo) XXX_Size() int { + return m.Size() +} +func (m *ExtendedVoteInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ExtendedVoteInfo.DiscardUnknown(m) } -func (m *Response) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +var xxx_messageInfo_ExtendedVoteInfo proto.InternalMessageInfo + +func (m *ExtendedVoteInfo) GetValidator() Validator { + if m != nil { + return m.Validator } - return dAtA[:n], nil + return Validator{} } -func (m *Response) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ExtendedVoteInfo) GetSignedLastBlock() bool { + if m != nil { + return m.SignedLastBlock + } + return false } -func (m *Response) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Value != nil { - { - size := m.Value.Size() - i -= size - if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } +func (m *ExtendedVoteInfo) GetVoteExtension() []byte { + if m != nil { + return m.VoteExtension } - return len(dAtA) - i, nil + return nil } -func (m *Response_Exception) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type Misbehavior struct { + Type MisbehaviorType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.abci.MisbehaviorType" json:"type,omitempty"` + // The offending validator + Validator Validator `protobuf:"bytes,2,opt,name=validator,proto3" json:"validator"` + // The height when the offense occurred + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + // The corresponding time where the offense occurred + Time time.Time `protobuf:"bytes,4,opt,name=time,proto3,stdtime" json:"time"` + // Total voting power of the validator set in case the ABCI application does + // not store historical validators. + // https://github.com/tendermint/tendermint/issues/4581 + TotalVotingPower int64 `protobuf:"varint,5,opt,name=total_voting_power,json=totalVotingPower,proto3" json:"total_voting_power,omitempty"` } -func (m *Response_Exception) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Exception != nil { - { - size, err := m.Exception.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) +func (m *Misbehavior) Reset() { *m = Misbehavior{} } +func (m *Misbehavior) String() string { return proto.CompactTextString(m) } +func (*Misbehavior) ProtoMessage() {} +func (*Misbehavior) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{55} +} +func (m *Misbehavior) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Misbehavior) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Misbehavior.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0xa + return b[:n], nil } - return len(dAtA) - i, nil } -func (m *Response_Echo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Misbehavior) XXX_Merge(src proto.Message) { + xxx_messageInfo_Misbehavior.Merge(m, src) +} +func (m *Misbehavior) XXX_Size() int { + return m.Size() +} +func (m *Misbehavior) XXX_DiscardUnknown() { + xxx_messageInfo_Misbehavior.DiscardUnknown(m) } -func (m *Response_Echo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Echo != nil { - { - size, err := m.Echo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 +var xxx_messageInfo_Misbehavior proto.InternalMessageInfo + +func (m *Misbehavior) GetType() MisbehaviorType { + if m != nil { + return m.Type } - return len(dAtA) - i, nil -} -func (m *Response_Flush) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return MisbehaviorType_UNKNOWN } -func (m *Response_Flush) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Flush != nil { - { - size, err := m.Flush.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a +func (m *Misbehavior) GetValidator() Validator { + if m != nil { + return m.Validator } - return len(dAtA) - i, nil + return Validator{} } -func (m *Response_Info) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + +func (m *Misbehavior) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 } -func (m *Response_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Info != nil { - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 +func (m *Misbehavior) GetTime() time.Time { + if m != nil { + return m.Time } - return len(dAtA) - i, nil + return time.Time{} } -func (m *Response_InitChain) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + +func (m *Misbehavior) GetTotalVotingPower() int64 { + if m != nil { + return m.TotalVotingPower + } + return 0 } -func (m *Response_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.InitChain != nil { - { - size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - return len(dAtA) - i, nil -} -func (m *Response_Query) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type Snapshot struct { + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` + Chunks uint32 `protobuf:"varint,3,opt,name=chunks,proto3" json:"chunks,omitempty"` + Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` + Metadata []byte `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` + CoreChainLockedHeight uint32 `protobuf:"varint,100,opt,name=core_chain_locked_height,json=coreChainLockedHeight,proto3" json:"core_chain_locked_height,omitempty"` } -func (m *Response_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Query != nil { - { - size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - return len(dAtA) - i, nil +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_252557cfdd89a31a, []int{56} } -func (m *Response_BeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Snapshot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) } - -func (m *Response_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.BeginBlock != nil { - { - size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) +func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err } - i-- - dAtA[i] = 0x3a + return b[:n], nil } - return len(dAtA) - i, nil } -func (m *Response_CheckTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Snapshot.Merge(m, src) } - -func (m *Response_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.CheckTx != nil { - { - size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - return len(dAtA) - i, nil +func (m *Snapshot) XXX_Size() int { + return m.Size() } -func (m *Response_DeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_Snapshot.DiscardUnknown(m) } -func (m *Response_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.DeliverTx != nil { - { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - return len(dAtA) - i, nil -} -func (m *Response_EndBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +var xxx_messageInfo_Snapshot proto.InternalMessageInfo -func (m *Response_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.EndBlock != nil { - { - size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 +func (m *Snapshot) GetHeight() uint64 { + if m != nil { + return m.Height } - return len(dAtA) - i, nil -} -func (m *Response_Commit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return 0 } -func (m *Response_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Commit != nil { - { - size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a +func (m *Snapshot) GetFormat() uint32 { + if m != nil { + return m.Format } - return len(dAtA) - i, nil -} -func (m *Response_ListSnapshots) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return 0 } -func (m *Response_ListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ListSnapshots != nil { - { - size, err := m.ListSnapshots.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 +func (m *Snapshot) GetChunks() uint32 { + if m != nil { + return m.Chunks } - return len(dAtA) - i, nil -} -func (m *Response_OfferSnapshot) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return 0 } -func (m *Response_OfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.OfferSnapshot != nil { - { - size, err := m.OfferSnapshot.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a +func (m *Snapshot) GetHash() []byte { + if m != nil { + return m.Hash } - return len(dAtA) - i, nil -} -func (m *Response_LoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return nil } -func (m *Response_LoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.LoadSnapshotChunk != nil { - { - size, err := m.LoadSnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x72 +func (m *Snapshot) GetMetadata() []byte { + if m != nil { + return m.Metadata } - return len(dAtA) - i, nil -} -func (m *Response_ApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return nil } -func (m *Response_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ApplySnapshotChunk != nil { - { - size, err := m.ApplySnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x7a +func (m *Snapshot) GetCoreChainLockedHeight() uint32 { + if m != nil { + return m.CoreChainLockedHeight } - return len(dAtA) - i, nil + return 0 } -func (m *ResponseException) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil + +func init() { + proto.RegisterEnum("tendermint.abci.CheckTxType", CheckTxType_name, CheckTxType_value) + proto.RegisterEnum("tendermint.abci.MisbehaviorType", MisbehaviorType_name, MisbehaviorType_value) + proto.RegisterEnum("tendermint.abci.ResponseOfferSnapshot_Result", ResponseOfferSnapshot_Result_name, ResponseOfferSnapshot_Result_value) + proto.RegisterEnum("tendermint.abci.ResponseApplySnapshotChunk_Result", ResponseApplySnapshotChunk_Result_name, ResponseApplySnapshotChunk_Result_value) + proto.RegisterEnum("tendermint.abci.ResponseProcessProposal_ProposalStatus", ResponseProcessProposal_ProposalStatus_name, ResponseProcessProposal_ProposalStatus_value) + proto.RegisterEnum("tendermint.abci.ResponseVerifyVoteExtension_VerifyStatus", ResponseVerifyVoteExtension_VerifyStatus_name, ResponseVerifyVoteExtension_VerifyStatus_value) + proto.RegisterEnum("tendermint.abci.TxRecord_TxAction", TxRecord_TxAction_name, TxRecord_TxAction_value) + proto.RegisterType((*Request)(nil), "tendermint.abci.Request") + proto.RegisterType((*RequestEcho)(nil), "tendermint.abci.RequestEcho") + proto.RegisterType((*RequestFlush)(nil), "tendermint.abci.RequestFlush") + proto.RegisterType((*RequestInfo)(nil), "tendermint.abci.RequestInfo") + proto.RegisterType((*RequestInitChain)(nil), "tendermint.abci.RequestInitChain") + proto.RegisterType((*RequestQuery)(nil), "tendermint.abci.RequestQuery") + proto.RegisterType((*RequestBeginBlock)(nil), "tendermint.abci.RequestBeginBlock") + proto.RegisterType((*RequestCheckTx)(nil), "tendermint.abci.RequestCheckTx") + proto.RegisterType((*RequestDeliverTx)(nil), "tendermint.abci.RequestDeliverTx") + proto.RegisterType((*RequestEndBlock)(nil), "tendermint.abci.RequestEndBlock") + proto.RegisterType((*RequestCommit)(nil), "tendermint.abci.RequestCommit") + proto.RegisterType((*RequestListSnapshots)(nil), "tendermint.abci.RequestListSnapshots") + proto.RegisterType((*RequestOfferSnapshot)(nil), "tendermint.abci.RequestOfferSnapshot") + proto.RegisterType((*RequestLoadSnapshotChunk)(nil), "tendermint.abci.RequestLoadSnapshotChunk") + proto.RegisterType((*RequestApplySnapshotChunk)(nil), "tendermint.abci.RequestApplySnapshotChunk") + proto.RegisterType((*RequestPrepareProposal)(nil), "tendermint.abci.RequestPrepareProposal") + proto.RegisterType((*RequestProcessProposal)(nil), "tendermint.abci.RequestProcessProposal") + proto.RegisterType((*RequestExtendVote)(nil), "tendermint.abci.RequestExtendVote") + proto.RegisterType((*RequestVerifyVoteExtension)(nil), "tendermint.abci.RequestVerifyVoteExtension") + proto.RegisterType((*RequestFinalizeBlock)(nil), "tendermint.abci.RequestFinalizeBlock") + proto.RegisterType((*Response)(nil), "tendermint.abci.Response") + proto.RegisterType((*ResponseException)(nil), "tendermint.abci.ResponseException") + proto.RegisterType((*ResponseEcho)(nil), "tendermint.abci.ResponseEcho") + proto.RegisterType((*ResponseFlush)(nil), "tendermint.abci.ResponseFlush") + proto.RegisterType((*ResponseInfo)(nil), "tendermint.abci.ResponseInfo") + proto.RegisterType((*ResponseInitChain)(nil), "tendermint.abci.ResponseInitChain") + proto.RegisterType((*ResponseQuery)(nil), "tendermint.abci.ResponseQuery") + proto.RegisterType((*ResponseBeginBlock)(nil), "tendermint.abci.ResponseBeginBlock") + proto.RegisterType((*ResponseCheckTx)(nil), "tendermint.abci.ResponseCheckTx") + proto.RegisterType((*ResponseDeliverTx)(nil), "tendermint.abci.ResponseDeliverTx") + proto.RegisterType((*ResponseEndBlock)(nil), "tendermint.abci.ResponseEndBlock") + proto.RegisterType((*ResponseCommit)(nil), "tendermint.abci.ResponseCommit") + proto.RegisterType((*ResponseListSnapshots)(nil), "tendermint.abci.ResponseListSnapshots") + proto.RegisterType((*ResponseOfferSnapshot)(nil), "tendermint.abci.ResponseOfferSnapshot") + proto.RegisterType((*ResponseLoadSnapshotChunk)(nil), "tendermint.abci.ResponseLoadSnapshotChunk") + proto.RegisterType((*ResponseApplySnapshotChunk)(nil), "tendermint.abci.ResponseApplySnapshotChunk") + proto.RegisterType((*ResponsePrepareProposal)(nil), "tendermint.abci.ResponsePrepareProposal") + proto.RegisterType((*ResponseProcessProposal)(nil), "tendermint.abci.ResponseProcessProposal") + proto.RegisterType((*ResponseExtendVote)(nil), "tendermint.abci.ResponseExtendVote") + proto.RegisterType((*ResponseVerifyVoteExtension)(nil), "tendermint.abci.ResponseVerifyVoteExtension") + proto.RegisterType((*ResponseFinalizeBlock)(nil), "tendermint.abci.ResponseFinalizeBlock") + proto.RegisterType((*CommitInfo)(nil), "tendermint.abci.CommitInfo") + proto.RegisterType((*ExtendedCommitInfo)(nil), "tendermint.abci.ExtendedCommitInfo") + proto.RegisterType((*Event)(nil), "tendermint.abci.Event") + proto.RegisterType((*EventAttribute)(nil), "tendermint.abci.EventAttribute") + proto.RegisterType((*ExecTxResult)(nil), "tendermint.abci.ExecTxResult") + proto.RegisterType((*TxResult)(nil), "tendermint.abci.TxResult") + proto.RegisterType((*TxRecord)(nil), "tendermint.abci.TxRecord") + proto.RegisterType((*Validator)(nil), "tendermint.abci.Validator") + proto.RegisterType((*ValidatorUpdate)(nil), "tendermint.abci.ValidatorUpdate") + proto.RegisterType((*ValidatorSetUpdate)(nil), "tendermint.abci.ValidatorSetUpdate") + proto.RegisterType((*ThresholdPublicKeyUpdate)(nil), "tendermint.abci.ThresholdPublicKeyUpdate") + proto.RegisterType((*QuorumHashUpdate)(nil), "tendermint.abci.QuorumHashUpdate") + proto.RegisterType((*VoteInfo)(nil), "tendermint.abci.VoteInfo") + proto.RegisterType((*ExtendedVoteInfo)(nil), "tendermint.abci.ExtendedVoteInfo") + proto.RegisterType((*Misbehavior)(nil), "tendermint.abci.Misbehavior") + proto.RegisterType((*Snapshot)(nil), "tendermint.abci.Snapshot") } -func (m *ResponseException) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } + +var fileDescriptor_252557cfdd89a31a = []byte{ + // 3742 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcb, 0x6f, 0x1b, 0xd7, + 0xd5, 0xe7, 0xf0, 0x25, 0xf2, 0x50, 0x7c, 0xe8, 0x4a, 0xb6, 0x69, 0xda, 0x96, 0x94, 0x31, 0x1c, + 0x3b, 0x4e, 0x22, 0x25, 0xf2, 0x97, 0xc4, 0xf9, 0x92, 0x7c, 0x81, 0x44, 0xd1, 0xa1, 0x6c, 0x59, + 0x92, 0x47, 0x94, 0x83, 0x7c, 0xf9, 0xe2, 0xc9, 0x88, 0x73, 0x25, 0x4e, 0x4c, 0x72, 0x26, 0x33, + 0x43, 0x85, 0xca, 0x36, 0x5f, 0x36, 0x59, 0x65, 0x53, 0xb4, 0x40, 0x11, 0x14, 0x28, 0x5a, 0xa0, + 0x9b, 0xa2, 0x7f, 0x40, 0x81, 0xae, 0xb3, 0xcc, 0xaa, 0x2d, 0x0a, 0x34, 0x0d, 0x92, 0x4d, 0xd1, + 0x6d, 0x81, 0x76, 0xd7, 0x16, 0xf7, 0x31, 0x4f, 0x72, 0xf8, 0x88, 0xd3, 0x00, 0x41, 0xb3, 0x9b, + 0x7b, 0xee, 0x39, 0x67, 0xee, 0xe3, 0xdc, 0x73, 0xce, 0xfd, 0xdd, 0x7b, 0xe1, 0x82, 0x8d, 0xbb, + 0x2a, 0x36, 0x3b, 0x5a, 0xd7, 0x5e, 0x55, 0x0e, 0x9b, 0xda, 0xaa, 0x7d, 0x6a, 0x60, 0x6b, 0xc5, + 0x30, 0x75, 0x5b, 0x47, 0x45, 0xaf, 0x72, 0x85, 0x54, 0x56, 0x2e, 0xf9, 0xb8, 0x9b, 0xe6, 0xa9, + 0x61, 0xeb, 0xab, 0x86, 0xa9, 0xeb, 0x47, 0x8c, 0xbf, 0x72, 0xd1, 0x57, 0x4d, 0xf5, 0xf8, 0xb5, + 0x05, 0x6a, 0xb9, 0xf0, 0x43, 0x7c, 0xea, 0xd4, 0x5e, 0x1a, 0x90, 0x35, 0x14, 0x53, 0xe9, 0x38, + 0xd5, 0x4b, 0xc7, 0xba, 0x7e, 0xdc, 0xc6, 0xab, 0xb4, 0x74, 0xd8, 0x3b, 0x5a, 0xb5, 0xb5, 0x0e, + 0xb6, 0x6c, 0xa5, 0x63, 0x70, 0x86, 0x85, 0x63, 0xfd, 0x58, 0xa7, 0x9f, 0xab, 0xe4, 0x8b, 0x51, + 0xc5, 0x7f, 0x02, 0xcc, 0x48, 0xf8, 0xdd, 0x1e, 0xb6, 0x6c, 0xb4, 0x06, 0x49, 0xdc, 0x6c, 0xe9, + 0x65, 0x61, 0x59, 0xb8, 0x96, 0x5b, 0xbb, 0xb8, 0x12, 0xea, 0xdc, 0x0a, 0xe7, 0xab, 0x35, 0x5b, + 0x7a, 0x3d, 0x26, 0x51, 0x5e, 0xf4, 0x1c, 0xa4, 0x8e, 0xda, 0x3d, 0xab, 0x55, 0x8e, 0x53, 0xa1, + 0x4b, 0x51, 0x42, 0xb7, 0x08, 0x53, 0x3d, 0x26, 0x31, 0x6e, 0xf2, 0x2b, 0xad, 0x7b, 0xa4, 0x97, + 0x13, 0xa3, 0x7f, 0xb5, 0xd5, 0x3d, 0xa2, 0xbf, 0x22, 0xbc, 0x68, 0x03, 0x40, 0xeb, 0x6a, 0xb6, + 0xdc, 0x6c, 0x29, 0x5a, 0xb7, 0x9c, 0xa4, 0x92, 0x8f, 0x45, 0x4b, 0x6a, 0x76, 0x95, 0x30, 0xd6, + 0x63, 0x52, 0x56, 0x73, 0x0a, 0xa4, 0xb9, 0xef, 0xf6, 0xb0, 0x79, 0x5a, 0x4e, 0x8d, 0x6e, 0xee, + 0x3d, 0xc2, 0x44, 0x9a, 0x4b, 0xb9, 0xd1, 0x16, 0xe4, 0x0e, 0xf1, 0xb1, 0xd6, 0x95, 0x0f, 0xdb, + 0x7a, 0xf3, 0x61, 0x39, 0x4d, 0x85, 0xc5, 0x28, 0xe1, 0x0d, 0xc2, 0xba, 0x41, 0x38, 0x37, 0xe2, + 0x65, 0xa1, 0x1e, 0x93, 0xe0, 0xd0, 0xa5, 0xa0, 0x97, 0x21, 0xd3, 0x6c, 0xe1, 0xe6, 0x43, 0xd9, + 0xee, 0x97, 0x67, 0xa8, 0x9e, 0xa5, 0x28, 0x3d, 0x55, 0xc2, 0xd7, 0xe8, 0xd7, 0x63, 0xd2, 0x4c, + 0x93, 0x7d, 0xa2, 0x5b, 0x00, 0x2a, 0x6e, 0x6b, 0x27, 0xd8, 0x24, 0xf2, 0x99, 0xd1, 0x63, 0xb0, + 0xc9, 0x38, 0x1b, 0x7d, 0xde, 0x8c, 0xac, 0xea, 0x10, 0x50, 0x15, 0xb2, 0xb8, 0xab, 0xf2, 0xee, + 0x64, 0xa9, 0x9a, 0xe5, 0xc8, 0xf9, 0xee, 0xaa, 0xfe, 0xce, 0x64, 0x30, 0x2f, 0xa3, 0x9b, 0x90, + 0x6e, 0xea, 0x9d, 0x8e, 0x66, 0x97, 0x81, 0x6a, 0x58, 0x8c, 0xec, 0x08, 0xe5, 0xaa, 0xc7, 0x24, + 0xce, 0x8f, 0x76, 0xa0, 0xd0, 0xd6, 0x2c, 0x5b, 0xb6, 0xba, 0x8a, 0x61, 0xb5, 0x74, 0xdb, 0x2a, + 0xe7, 0xa8, 0x86, 0x2b, 0x51, 0x1a, 0xb6, 0x35, 0xcb, 0xde, 0x77, 0x98, 0xeb, 0x31, 0x29, 0xdf, + 0xf6, 0x13, 0x88, 0x3e, 0xfd, 0xe8, 0x08, 0x9b, 0xae, 0xc2, 0xf2, 0xec, 0x68, 0x7d, 0xbb, 0x84, + 0xdb, 0x91, 0x27, 0xfa, 0x74, 0x3f, 0x01, 0xbd, 0x09, 0xf3, 0x6d, 0x5d, 0x51, 0x5d, 0x75, 0x72, + 0xb3, 0xd5, 0xeb, 0x3e, 0x2c, 0xe7, 0xa9, 0xd2, 0x27, 0x22, 0x1b, 0xa9, 0x2b, 0xaa, 0xa3, 0xa2, + 0x4a, 0x04, 0xea, 0x31, 0x69, 0xae, 0x1d, 0x26, 0xa2, 0x07, 0xb0, 0xa0, 0x18, 0x46, 0xfb, 0x34, + 0xac, 0xbd, 0x40, 0xb5, 0x5f, 0x8f, 0xd2, 0xbe, 0x4e, 0x64, 0xc2, 0xea, 0x91, 0x32, 0x40, 0x45, + 0x0d, 0x28, 0x19, 0x26, 0x36, 0x14, 0x13, 0xcb, 0x86, 0xa9, 0x1b, 0xba, 0xa5, 0xb4, 0xcb, 0x45, + 0xaa, 0xfb, 0x6a, 0x94, 0xee, 0x3d, 0xc6, 0xbf, 0xc7, 0xd9, 0xeb, 0x31, 0xa9, 0x68, 0x04, 0x49, + 0x4c, 0xab, 0xde, 0xc4, 0x96, 0xe5, 0x69, 0x2d, 0x8d, 0xd3, 0x4a, 0xf9, 0x83, 0x5a, 0x03, 0x24, + 0x54, 0x83, 0x1c, 0xee, 0x13, 0x71, 0xf9, 0x44, 0xb7, 0x71, 0x79, 0x6e, 0xf4, 0xc2, 0xaa, 0x51, + 0xd6, 0xfb, 0xba, 0x8d, 0xc9, 0xa2, 0xc2, 0x6e, 0x09, 0x29, 0x70, 0xe6, 0x04, 0x9b, 0xda, 0xd1, + 0x29, 0x55, 0x23, 0xd3, 0x1a, 0x4b, 0xd3, 0xbb, 0x65, 0x44, 0x15, 0x3e, 0x19, 0xa5, 0xf0, 0x3e, + 0x15, 0x22, 0x2a, 0x6a, 0x8e, 0x48, 0x3d, 0x26, 0xcd, 0x9f, 0x0c, 0x92, 0x89, 0x89, 0x1d, 0x69, + 0x5d, 0xa5, 0xad, 0xbd, 0x8f, 0xf9, 0xb2, 0x99, 0x1f, 0x6d, 0x62, 0xb7, 0x38, 0x37, 0x5d, 0x2b, + 0xc4, 0xc4, 0x8e, 0xfc, 0x84, 0x8d, 0x19, 0x48, 0x9d, 0x28, 0xed, 0x1e, 0x16, 0xaf, 0x42, 0xce, + 0xe7, 0x58, 0x51, 0x19, 0x66, 0x3a, 0xd8, 0xb2, 0x94, 0x63, 0x4c, 0xfd, 0x70, 0x56, 0x72, 0x8a, + 0x62, 0x01, 0x66, 0xfd, 0xce, 0x54, 0xfc, 0x58, 0x70, 0x25, 0x89, 0x9f, 0x24, 0x92, 0x27, 0xd8, + 0xa4, 0xdd, 0xe6, 0x92, 0xbc, 0x88, 0x2e, 0x43, 0x9e, 0x36, 0x59, 0x76, 0xea, 0x89, 0xb3, 0x4e, + 0x4a, 0xb3, 0x94, 0x78, 0x9f, 0x33, 0x2d, 0x41, 0xce, 0x58, 0x33, 0x5c, 0x96, 0x04, 0x65, 0x01, + 0x63, 0xcd, 0x70, 0x18, 0x1e, 0x83, 0x59, 0xd2, 0x3f, 0x97, 0x23, 0x49, 0x7f, 0x92, 0x23, 0x34, + 0xce, 0x22, 0xfe, 0x7f, 0x02, 0x4a, 0x61, 0x07, 0x8c, 0x6e, 0x42, 0x92, 0xc4, 0x22, 0x1e, 0x56, + 0x2a, 0x2b, 0x2c, 0x50, 0xad, 0x38, 0x81, 0x6a, 0xa5, 0xe1, 0x04, 0xaa, 0x8d, 0xcc, 0xa7, 0x9f, + 0x2f, 0xc5, 0x3e, 0xfe, 0xd3, 0x92, 0x20, 0x51, 0x09, 0x74, 0x9e, 0xf8, 0x4a, 0x45, 0xeb, 0xca, + 0x9a, 0x4a, 0x9b, 0x9c, 0x25, 0x8e, 0x50, 0xd1, 0xba, 0x5b, 0x2a, 0xda, 0x86, 0x52, 0x53, 0xef, + 0x5a, 0xb8, 0x6b, 0xf5, 0x2c, 0x99, 0x05, 0x42, 0x1e, 0x4c, 0x02, 0xee, 0x90, 0x85, 0xd7, 0xaa, + 0xc3, 0xb9, 0x47, 0x19, 0xa5, 0x62, 0x33, 0x48, 0x40, 0x3b, 0x90, 0x3f, 0x51, 0xda, 0x9a, 0xaa, + 0xd8, 0xba, 0x29, 0x5b, 0xd8, 0xe6, 0xd1, 0xe5, 0xf2, 0xc0, 0xdc, 0xde, 0x77, 0xb8, 0xf6, 0xb1, + 0x7d, 0x60, 0xa8, 0x8a, 0x8d, 0x37, 0x92, 0x9f, 0x7e, 0xbe, 0x24, 0x48, 0xb3, 0x27, 0xbe, 0x1a, + 0xf4, 0x38, 0x14, 0x15, 0xc3, 0x90, 0x2d, 0x5b, 0xb1, 0xb1, 0x7c, 0x78, 0x6a, 0x63, 0x8b, 0x06, + 0x9c, 0x59, 0x29, 0xaf, 0x18, 0xc6, 0x3e, 0xa1, 0x6e, 0x10, 0x22, 0xba, 0x02, 0x05, 0x12, 0x9b, + 0x34, 0xa5, 0x2d, 0xb7, 0xb0, 0x76, 0xdc, 0xb2, 0x69, 0x68, 0x49, 0x48, 0x79, 0x4e, 0xad, 0x53, + 0x22, 0x5a, 0x81, 0x79, 0x87, 0xad, 0xa9, 0x9b, 0xd8, 0xe1, 0x25, 0xe1, 0x23, 0x2f, 0xcd, 0xf1, + 0xaa, 0xaa, 0x6e, 0x62, 0xc6, 0x2f, 0xaa, 0xae, 0xa5, 0xd0, 0x38, 0x86, 0x10, 0x24, 0x55, 0xc5, + 0x56, 0xe8, 0x0c, 0xcc, 0x4a, 0xf4, 0x9b, 0xd0, 0x0c, 0xc5, 0x6e, 0xf1, 0x71, 0xa5, 0xdf, 0xe8, + 0x2c, 0xa4, 0xb9, 0xea, 0x04, 0x6d, 0x06, 0x2f, 0xa1, 0x05, 0x48, 0x19, 0xa6, 0x7e, 0x82, 0xe9, + 0xb0, 0x64, 0x24, 0x56, 0x10, 0x3f, 0x88, 0xc3, 0xdc, 0x40, 0xc4, 0x23, 0x7a, 0x5b, 0x8a, 0xd5, + 0x72, 0xfe, 0x45, 0xbe, 0xd1, 0xf3, 0x44, 0xaf, 0xa2, 0x62, 0x93, 0x67, 0x09, 0xe5, 0xc1, 0x29, + 0xaa, 0xd3, 0x7a, 0x3a, 0x98, 0x31, 0x89, 0x73, 0xa3, 0x3b, 0x50, 0x6a, 0x2b, 0x96, 0x2d, 0xb3, + 0xa8, 0x21, 0xfb, 0x32, 0x86, 0x0b, 0x03, 0x33, 0xc3, 0x62, 0x0c, 0x59, 0x08, 0x5c, 0x49, 0x81, + 0x88, 0x7a, 0x54, 0x74, 0x00, 0x0b, 0x87, 0xa7, 0xef, 0x2b, 0x5d, 0x5b, 0xeb, 0x62, 0xd9, 0x9d, + 0x2d, 0xab, 0x9c, 0x5c, 0x4e, 0x0c, 0x4d, 0x41, 0xee, 0x6a, 0xd6, 0x21, 0x6e, 0x29, 0x27, 0x9a, + 0xee, 0x34, 0x6b, 0xde, 0x95, 0x77, 0xcd, 0xc0, 0x12, 0x25, 0x28, 0x04, 0xc3, 0x35, 0x2a, 0x40, + 0xdc, 0xee, 0xf3, 0xfe, 0xc7, 0xed, 0x3e, 0x7a, 0x06, 0x92, 0xa4, 0x8f, 0xb4, 0xef, 0x85, 0x21, + 0x3f, 0xe2, 0x72, 0x8d, 0x53, 0x03, 0x4b, 0x94, 0x53, 0x14, 0xdd, 0x55, 0xe4, 0x86, 0xf0, 0xb0, + 0x56, 0xf1, 0x09, 0x28, 0x86, 0xe2, 0xb3, 0x6f, 0xfa, 0x04, 0xff, 0xf4, 0x89, 0x45, 0xc8, 0x07, + 0x02, 0xb1, 0x78, 0x16, 0x16, 0x86, 0xc5, 0x55, 0xb1, 0xe5, 0xd2, 0x03, 0xf1, 0x11, 0x3d, 0x07, + 0x19, 0x37, 0xb0, 0xb2, 0x55, 0x7c, 0x7e, 0xa0, 0x17, 0x0e, 0xb3, 0xe4, 0xb2, 0x92, 0xe5, 0x4b, + 0x56, 0x01, 0x35, 0x87, 0x38, 0x6d, 0xf8, 0x8c, 0x62, 0x18, 0x75, 0xc5, 0x6a, 0x89, 0x6f, 0x43, + 0x39, 0x2a, 0x68, 0x86, 0xba, 0x91, 0x74, 0xad, 0xf0, 0x2c, 0xa4, 0x8f, 0x74, 0xb3, 0xa3, 0xd8, + 0x54, 0x59, 0x5e, 0xe2, 0x25, 0x62, 0x9d, 0x2c, 0x80, 0x26, 0x28, 0x99, 0x15, 0x44, 0x19, 0xce, + 0x47, 0x06, 0x4e, 0x22, 0xa2, 0x75, 0x55, 0xcc, 0xc6, 0x33, 0x2f, 0xb1, 0x82, 0xa7, 0x88, 0x35, + 0x96, 0x15, 0xc8, 0x6f, 0x2d, 0xda, 0x57, 0xaa, 0x3f, 0x2b, 0xf1, 0x92, 0xf8, 0xab, 0x04, 0x9c, + 0x1d, 0x1e, 0x3e, 0xd1, 0x32, 0xcc, 0x76, 0x94, 0xbe, 0x6c, 0xf7, 0xf9, 0xda, 0x67, 0xd3, 0x01, + 0x1d, 0xa5, 0xdf, 0xe8, 0xb3, 0x85, 0x5f, 0x82, 0x84, 0xdd, 0xb7, 0xca, 0xf1, 0xe5, 0xc4, 0xb5, + 0x59, 0x89, 0x7c, 0xa2, 0x03, 0x98, 0x6b, 0xeb, 0x4d, 0xa5, 0x2d, 0xfb, 0x2c, 0x9e, 0x1b, 0xfb, + 0xa0, 0x1b, 0x62, 0x81, 0x10, 0xab, 0x03, 0x46, 0x5f, 0xa4, 0x3a, 0xb6, 0x5d, 0xcb, 0xff, 0x37, + 0x59, 0xbd, 0x6f, 0x8e, 0x52, 0x01, 0x4f, 0xe1, 0xf8, 0xfa, 0xf4, 0xd4, 0xbe, 0xfe, 0x19, 0x58, + 0xe8, 0xe2, 0xbe, 0xed, 0x6b, 0x23, 0x33, 0x9c, 0x19, 0x3a, 0x17, 0x88, 0xd4, 0x79, 0xff, 0x27, + 0x36, 0x84, 0x56, 0x61, 0x81, 0x65, 0x22, 0xd8, 0x24, 0x29, 0x09, 0x19, 0x6e, 0x2a, 0x91, 0xa1, + 0x12, 0x73, 0x4e, 0xdd, 0x9e, 0xa9, 0x37, 0xfa, 0xd4, 0xe8, 0x7e, 0xe2, 0x9f, 0xb1, 0x60, 0x1e, + 0xc2, 0xe7, 0x43, 0xf0, 0xe6, 0x63, 0xdf, 0xd5, 0xae, 0x06, 0xa6, 0x24, 0x3e, 0xa9, 0xff, 0x41, + 0x8e, 0xf8, 0x04, 0xb3, 0x91, 0x78, 0xb4, 0xd9, 0x70, 0x7c, 0x6e, 0xd2, 0xe7, 0x73, 0xbf, 0x93, + 0x33, 0xf4, 0xaa, 0x1b, 0x51, 0xbc, 0x54, 0x6f, 0x68, 0x44, 0xf1, 0x7a, 0x17, 0x0f, 0xb8, 0xba, + 0x9f, 0x0a, 0x50, 0x89, 0xce, 0xed, 0x86, 0xaa, 0x7a, 0x16, 0xce, 0x78, 0xb1, 0xdf, 0xdf, 0x4a, + 0xe6, 0x05, 0x90, 0x5b, 0xe9, 0x36, 0x33, 0x32, 0x4e, 0x5e, 0x81, 0x42, 0x28, 0xff, 0x64, 0x33, + 0x92, 0x3f, 0xf1, 0xb7, 0x42, 0xfc, 0x71, 0xc2, 0xf5, 0xb3, 0x81, 0x24, 0x71, 0x88, 0x15, 0xde, + 0x83, 0x79, 0x15, 0x37, 0x35, 0xf5, 0xeb, 0x1a, 0xe1, 0x1c, 0x97, 0xfe, 0xde, 0x06, 0x27, 0xb6, + 0xc1, 0xdf, 0xe5, 0x20, 0x23, 0x61, 0xcb, 0x20, 0x29, 0x22, 0xda, 0x80, 0x2c, 0xee, 0x37, 0xb1, + 0x61, 0x3b, 0x59, 0xf5, 0xf0, 0xdd, 0x09, 0xe3, 0xae, 0x39, 0x9c, 0x64, 0xaf, 0xed, 0x8a, 0xa1, + 0x1b, 0x1c, 0x56, 0x89, 0x46, 0x48, 0xb8, 0xb8, 0x1f, 0x57, 0x79, 0xde, 0xc1, 0x55, 0x12, 0x91, + 0x5b, 0x6b, 0x26, 0x15, 0x02, 0x56, 0x6e, 0x70, 0x60, 0x25, 0x39, 0xe6, 0x67, 0x01, 0x64, 0xa5, + 0x1a, 0x40, 0x56, 0x52, 0x63, 0xba, 0x19, 0x01, 0xad, 0x3c, 0xef, 0x40, 0x2b, 0xe9, 0x31, 0x2d, + 0x0e, 0x61, 0x2b, 0xb7, 0x83, 0xd8, 0xca, 0x4c, 0x44, 0xc8, 0x73, 0xa4, 0x47, 0x82, 0x2b, 0xaf, + 0xf8, 0xc0, 0x95, 0x4c, 0x24, 0xaa, 0xc1, 0x14, 0x0d, 0x41, 0x57, 0x5e, 0x0b, 0xa0, 0x2b, 0xd9, + 0x31, 0xe3, 0x30, 0x02, 0x5e, 0xd9, 0xf4, 0xc3, 0x2b, 0x10, 0x89, 0xd2, 0xf0, 0x79, 0x8f, 0xc2, + 0x57, 0x5e, 0x74, 0xf1, 0x95, 0x5c, 0x24, 0x50, 0xc4, 0xfb, 0x12, 0x06, 0x58, 0x76, 0x07, 0x00, + 0x16, 0x06, 0x88, 0x3c, 0x1e, 0xa9, 0x62, 0x0c, 0xc2, 0xb2, 0x3b, 0x80, 0xb0, 0xe4, 0xc7, 0x28, + 0x1c, 0x03, 0xb1, 0xfc, 0xdf, 0x70, 0x88, 0x25, 0x1a, 0x04, 0xe1, 0xcd, 0x9c, 0x0c, 0x63, 0x91, + 0x23, 0x30, 0x96, 0x62, 0x24, 0x1e, 0xc0, 0xd4, 0x4f, 0x0c, 0xb2, 0x1c, 0x0c, 0x01, 0x59, 0x18, + 0x1c, 0x72, 0x2d, 0x52, 0xf9, 0x04, 0x28, 0xcb, 0xc1, 0x10, 0x94, 0x65, 0x6e, 0xac, 0xda, 0xb1, + 0x30, 0xcb, 0xad, 0x20, 0xcc, 0x82, 0xc6, 0xac, 0xb1, 0x48, 0x9c, 0xe5, 0x30, 0x0a, 0x67, 0x61, + 0x58, 0xc8, 0x53, 0x91, 0x1a, 0xa7, 0x00, 0x5a, 0x76, 0x07, 0x80, 0x96, 0x85, 0x31, 0x96, 0x36, + 0x29, 0xd2, 0xf2, 0x04, 0xc9, 0x2e, 0x42, 0xae, 0x9a, 0x24, 0xfd, 0xd8, 0x34, 0x75, 0x93, 0x63, + 0x26, 0xac, 0x20, 0x5e, 0x23, 0x3b, 0x68, 0xcf, 0x2d, 0x8f, 0x40, 0x65, 0xe8, 0xe6, 0xca, 0xe7, + 0x8a, 0xc5, 0x3f, 0x0a, 0x9e, 0x2c, 0xdd, 0x78, 0xfa, 0x77, 0xdf, 0x59, 0xbe, 0xfb, 0xf6, 0x61, + 0x35, 0xf1, 0x20, 0x56, 0xb3, 0x04, 0x39, 0xb2, 0x69, 0x0a, 0xc1, 0x30, 0x8a, 0xe1, 0xc2, 0x30, + 0xd7, 0x61, 0x8e, 0xa6, 0x02, 0x0c, 0xd1, 0xe1, 0xf1, 0x35, 0x49, 0xe3, 0x6b, 0x91, 0x54, 0xb0, + 0x51, 0x60, 0x81, 0xf6, 0x69, 0x98, 0xf7, 0xf1, 0xba, 0x9b, 0x31, 0x86, 0x45, 0x94, 0x5c, 0xee, + 0x75, 0xb6, 0x2b, 0xbb, 0x9d, 0xcc, 0xa8, 0x25, 0x2c, 0x5d, 0xe2, 0x99, 0x86, 0x89, 0x59, 0x40, + 0x90, 0x09, 0x0b, 0x56, 0xf9, 0xaf, 0xc4, 0x3f, 0xc7, 0xbd, 0x61, 0xf4, 0x40, 0x9e, 0x61, 0x78, + 0x8c, 0xf0, 0xb5, 0xf1, 0x18, 0xff, 0xce, 0x31, 0x11, 0xd8, 0x39, 0xa2, 0x37, 0x61, 0x21, 0x00, + 0xd5, 0xc8, 0x3d, 0x0a, 0xc3, 0x94, 0xd5, 0xe9, 0x10, 0x9b, 0x98, 0x2f, 0xb1, 0x73, 0x6b, 0xd0, + 0x5b, 0x70, 0x81, 0xa6, 0x17, 0xa1, 0xce, 0x3b, 0xff, 0xc0, 0x83, 0x6e, 0xd8, 0xe9, 0x90, 0x89, + 0xe9, 0x38, 0x6c, 0xeb, 0xcd, 0x87, 0xd2, 0x39, 0xa2, 0x23, 0x40, 0xe2, 0xea, 0x23, 0x70, 0x9c, + 0xa3, 0x28, 0x1c, 0xe7, 0xef, 0x82, 0x67, 0x5c, 0x2e, 0x92, 0xd3, 0xd4, 0x55, 0xcc, 0xf7, 0xad, + 0xf4, 0x9b, 0x64, 0x8d, 0x6d, 0xfd, 0x98, 0xef, 0x4e, 0xc9, 0x27, 0xe1, 0x72, 0x93, 0x80, 0x2c, + 0x8f, 0xf1, 0xee, 0x96, 0x97, 0xa5, 0x62, 0x7c, 0xcb, 0x5b, 0x82, 0xc4, 0x43, 0xcc, 0x42, 0xf6, + 0xac, 0x44, 0x3e, 0x09, 0x1f, 0x5d, 0x2d, 0x3c, 0xa5, 0x62, 0x05, 0x74, 0x13, 0xb2, 0xf4, 0x1c, + 0x4b, 0xd6, 0x0d, 0x8b, 0x47, 0xd6, 0x40, 0xf6, 0xc9, 0x8e, 0xab, 0x56, 0xf6, 0x08, 0xcf, 0xae, + 0x61, 0x49, 0x19, 0x83, 0x7f, 0xf9, 0x72, 0xc0, 0x6c, 0x20, 0x07, 0xbc, 0x08, 0x59, 0xd2, 0x7a, + 0xcb, 0x50, 0x9a, 0x98, 0x86, 0xc8, 0xac, 0xe4, 0x11, 0xc4, 0x07, 0x80, 0x06, 0x03, 0x3e, 0xaa, + 0x43, 0x1a, 0x9f, 0xe0, 0xae, 0xcd, 0x52, 0xe4, 0xdc, 0xda, 0xd9, 0xc1, 0x8d, 0x31, 0xa9, 0xde, + 0x28, 0x93, 0x09, 0xfe, 0xcb, 0xe7, 0x4b, 0x25, 0xc6, 0xfd, 0x94, 0xde, 0xd1, 0x6c, 0xdc, 0x31, + 0xec, 0x53, 0x89, 0xcb, 0x8b, 0x7f, 0x88, 0x43, 0x31, 0x94, 0x08, 0x0c, 0x1d, 0x5b, 0x67, 0xed, + 0xc6, 0x7d, 0xc8, 0xd9, 0x64, 0xe3, 0x7d, 0x09, 0xe0, 0x58, 0xb1, 0xe4, 0xf7, 0x94, 0xae, 0x8d, + 0x55, 0x3e, 0xe8, 0xd9, 0x63, 0xc5, 0x7a, 0x9d, 0x12, 0x88, 0x85, 0x93, 0xea, 0x9e, 0x85, 0x55, + 0x8e, 0xf9, 0xcd, 0x1c, 0x2b, 0xd6, 0x81, 0x85, 0x55, 0x5f, 0x2f, 0x67, 0x1e, 0xad, 0x97, 0xc1, + 0x31, 0xce, 0x84, 0xc6, 0xd8, 0x07, 0x6c, 0x64, 0xfd, 0xc0, 0x06, 0xaa, 0x40, 0xc6, 0x30, 0x35, + 0xdd, 0xd4, 0xec, 0x53, 0x3a, 0x31, 0x09, 0xc9, 0x2d, 0xa3, 0xcb, 0x90, 0xef, 0xe0, 0x8e, 0xa1, + 0xeb, 0x6d, 0x99, 0x79, 0xcd, 0x1c, 0x15, 0x9d, 0xe5, 0xc4, 0x1a, 0x75, 0x9e, 0x1f, 0xfa, 0x3c, + 0x84, 0x07, 0x60, 0x7d, 0xb3, 0xc3, 0xbb, 0x38, 0x64, 0x78, 0x7d, 0x14, 0xd2, 0x89, 0xd0, 0xf8, + 0xba, 0xe5, 0x6f, 0x6b, 0x80, 0xc5, 0xbf, 0xc6, 0xa1, 0x14, 0x4e, 0xf2, 0xd0, 0x1b, 0x70, 0x2e, + 0xe4, 0x28, 0xb9, 0x77, 0xb1, 0xf8, 0x06, 0x61, 0x02, 0x7f, 0x79, 0x26, 0xe8, 0x2f, 0x99, 0x77, + 0xb1, 0x7c, 0xfd, 0x4a, 0x3c, 0x62, 0xbf, 0xc6, 0xf8, 0x41, 0xf5, 0x11, 0xfd, 0x60, 0x94, 0x0f, + 0xc7, 0xd3, 0xa2, 0xee, 0x43, 0x7c, 0xb8, 0xb8, 0x05, 0x85, 0x60, 0x5a, 0x3c, 0xd4, 0xca, 0x2e, + 0x43, 0xde, 0xc4, 0x36, 0xe9, 0x58, 0x60, 0x27, 0x3f, 0xcb, 0x88, 0xdc, 0xff, 0xee, 0xc1, 0x99, + 0xa1, 0xe9, 0x31, 0x7a, 0x01, 0xb2, 0x5e, 0x66, 0xcd, 0x7c, 0xd1, 0x08, 0x44, 0xd4, 0xe3, 0x15, + 0x7f, 0x23, 0x78, 0x2a, 0x83, 0x18, 0x6b, 0x0d, 0xd2, 0x26, 0xb6, 0x7a, 0x6d, 0x86, 0x7a, 0x16, + 0xd6, 0x9e, 0x9e, 0x2c, 0xb1, 0x26, 0xd4, 0x5e, 0xdb, 0x96, 0xb8, 0xb0, 0xf8, 0x00, 0xd2, 0x8c, + 0x82, 0x72, 0x30, 0x73, 0xb0, 0x73, 0x67, 0x67, 0xf7, 0xf5, 0x9d, 0x52, 0x0c, 0x01, 0xa4, 0xd7, + 0xab, 0xd5, 0xda, 0x5e, 0xa3, 0x24, 0xa0, 0x2c, 0xa4, 0xd6, 0x37, 0x76, 0xa5, 0x46, 0x29, 0x4e, + 0xc8, 0x52, 0xed, 0x76, 0xad, 0xda, 0x28, 0x25, 0xd0, 0x1c, 0xe4, 0xd9, 0xb7, 0x7c, 0x6b, 0x57, + 0xba, 0xbb, 0xde, 0x28, 0x25, 0x7d, 0xa4, 0xfd, 0xda, 0xce, 0x66, 0x4d, 0x2a, 0xa5, 0xc4, 0x67, + 0xe1, 0x7c, 0x64, 0x2a, 0xee, 0x01, 0xa8, 0x82, 0x0f, 0x40, 0x15, 0x7f, 0x14, 0x87, 0x4a, 0x74, + 0x7e, 0x8d, 0x6e, 0x87, 0x3a, 0xbe, 0x36, 0x45, 0x72, 0x1e, 0xea, 0x3d, 0xba, 0x02, 0x05, 0x13, + 0x1f, 0x61, 0xbb, 0xd9, 0x62, 0xf9, 0x3e, 0x43, 0x58, 0xf3, 0x52, 0x9e, 0x53, 0xa9, 0x90, 0xc5, + 0xd8, 0xde, 0xc1, 0x4d, 0x5b, 0x66, 0x2e, 0x8f, 0x2d, 0x98, 0x2c, 0x61, 0x23, 0xd4, 0x7d, 0x46, + 0x14, 0xdf, 0x9e, 0x6a, 0x2c, 0xb3, 0x90, 0x92, 0x6a, 0x0d, 0xe9, 0x8d, 0x52, 0x02, 0x21, 0x28, + 0xd0, 0x4f, 0x79, 0x7f, 0x67, 0x7d, 0x6f, 0xbf, 0xbe, 0x4b, 0xc6, 0x72, 0x1e, 0x8a, 0xce, 0x58, + 0x3a, 0xc4, 0x94, 0xf8, 0xdb, 0x38, 0x9c, 0x8b, 0xd8, 0x1d, 0xa0, 0x9b, 0x00, 0x76, 0x5f, 0x36, + 0x71, 0x53, 0x37, 0xd5, 0x68, 0x23, 0x6b, 0xf4, 0x25, 0xca, 0x21, 0x65, 0x6d, 0xfe, 0x65, 0x8d, + 0xc0, 0xdd, 0xd1, 0xcb, 0x5c, 0x29, 0xe9, 0x95, 0xe3, 0x26, 0x2e, 0x0d, 0x81, 0x97, 0x71, 0x93, + 0x28, 0xa6, 0x63, 0x4b, 0x15, 0x53, 0x7e, 0x74, 0x17, 0xe6, 0xbc, 0x75, 0xeb, 0x78, 0x2d, 0x86, + 0x24, 0x2f, 0x47, 0x2f, 0x5a, 0xb6, 0x2e, 0xa5, 0xd2, 0x49, 0x90, 0x60, 0x8d, 0x72, 0x85, 0xa9, + 0x47, 0x73, 0x85, 0xe2, 0xcf, 0x12, 0xfe, 0x81, 0x0d, 0x6e, 0x86, 0x76, 0x21, 0x6d, 0xd9, 0x8a, + 0xdd, 0xb3, 0xb8, 0xc1, 0xbd, 0x30, 0xe9, 0xce, 0x6a, 0xc5, 0xf9, 0xd8, 0xa7, 0xe2, 0x12, 0x57, + 0xf3, 0xfd, 0x78, 0x5b, 0xe2, 0x73, 0x50, 0x08, 0x0e, 0x4e, 0xf4, 0x92, 0xf1, 0x7c, 0x4e, 0x5c, + 0x7c, 0xc9, 0x4b, 0xf3, 0x7c, 0x80, 0xef, 0x20, 0x8c, 0x2a, 0x0c, 0x83, 0x51, 0x7f, 0x2e, 0xc0, + 0x85, 0x11, 0xfb, 0x4b, 0x74, 0x2f, 0x34, 0xcf, 0x2f, 0x4e, 0xb3, 0x3b, 0x5d, 0x61, 0xb4, 0xe0, + 0x4c, 0x8b, 0x37, 0x60, 0xd6, 0x4f, 0x9f, 0xac, 0x93, 0x7f, 0x4b, 0x78, 0x3e, 0x3f, 0x88, 0xf7, + 0x7e, 0x63, 0xf9, 0x6c, 0xc8, 0xce, 0xe2, 0x53, 0xda, 0xd9, 0x08, 0xc3, 0x48, 0x3e, 0x62, 0x4e, + 0xe2, 0x5f, 0x1b, 0xa9, 0xe0, 0xda, 0x18, 0x08, 0xc1, 0xe9, 0xc1, 0x10, 0xfc, 0x9d, 0xce, 0x44, + 0x7e, 0x20, 0x00, 0xf8, 0x0e, 0xa0, 0x17, 0x20, 0x65, 0xea, 0xbd, 0xae, 0x4a, 0xcd, 0x31, 0x25, + 0xb1, 0x02, 0xd9, 0xef, 0xbf, 0xdb, 0xd3, 0xcd, 0x5e, 0xc7, 0xbf, 0xdb, 0x05, 0x46, 0xa2, 0xc3, + 0x74, 0x15, 0x8a, 0x6c, 0xfb, 0x6e, 0x69, 0xc7, 0x5d, 0xc5, 0xee, 0x99, 0x98, 0x63, 0xec, 0x05, + 0x4a, 0xde, 0x77, 0xa8, 0x84, 0x91, 0x5d, 0x38, 0xf0, 0x18, 0xd9, 0x88, 0x17, 0x28, 0xd9, 0x65, + 0x14, 0x35, 0x40, 0x83, 0x07, 0x88, 0x11, 0xcd, 0x7b, 0x05, 0x52, 0x64, 0xd5, 0x39, 0x36, 0xf5, + 0x58, 0xe4, 0x51, 0x24, 0x59, 0x3d, 0xbe, 0x83, 0x07, 0x26, 0x25, 0xbe, 0x0f, 0x29, 0x6a, 0xc2, + 0x24, 0x07, 0xa3, 0x87, 0xe0, 0x1c, 0x04, 0x21, 0xdf, 0xe8, 0x2d, 0x00, 0xc5, 0xb6, 0x4d, 0xed, + 0xb0, 0xe7, 0xfd, 0x60, 0x69, 0xf8, 0x12, 0x58, 0x77, 0xf8, 0x36, 0x2e, 0xf2, 0xb5, 0xb0, 0xe0, + 0x89, 0xfa, 0xd6, 0x83, 0x4f, 0xa1, 0xb8, 0x03, 0x85, 0xa0, 0xac, 0xb3, 0xdb, 0x65, 0x6d, 0x08, + 0xee, 0x76, 0x19, 0x0a, 0xc3, 0x77, 0xbb, 0xee, 0x5e, 0x39, 0xc1, 0xee, 0x3b, 0xd0, 0x82, 0xf8, + 0x0f, 0x01, 0x66, 0xfd, 0x2b, 0xe8, 0x3f, 0x6d, 0xc3, 0x28, 0x7e, 0x28, 0x40, 0xc6, 0xed, 0x7c, + 0xc4, 0x65, 0x03, 0x6f, 0xec, 0xe2, 0xfe, 0xa3, 0x75, 0x76, 0x7b, 0x21, 0xe1, 0xde, 0x89, 0x78, + 0xc9, 0x4d, 0xfa, 0xa2, 0x0e, 0x2a, 0xfc, 0x23, 0xed, 0x5c, 0x0b, 0xe1, 0x39, 0xee, 0x0f, 0x79, + 0x3b, 0x48, 0xb6, 0x83, 0xfe, 0x1b, 0xd2, 0x4a, 0xd3, 0x3d, 0x9e, 0x29, 0x0c, 0xc1, 0xeb, 0x1d, + 0xd6, 0x95, 0x46, 0x7f, 0x9d, 0x72, 0x4a, 0x5c, 0x82, 0xb7, 0x2a, 0xee, 0xde, 0xa9, 0x78, 0x95, + 0xe8, 0x65, 0x3c, 0x41, 0xd7, 0x5e, 0x00, 0x38, 0xd8, 0xb9, 0xbb, 0xbb, 0xb9, 0x75, 0x6b, 0xab, + 0xb6, 0xc9, 0xd3, 0xbe, 0xcd, 0xcd, 0xda, 0x66, 0x29, 0x4e, 0xf8, 0xa4, 0xda, 0xdd, 0xdd, 0xfb, + 0xb5, 0xcd, 0x52, 0x42, 0x5c, 0x87, 0xac, 0xeb, 0x21, 0xe8, 0xad, 0x19, 0xfd, 0x3d, 0x7e, 0x6f, + 0x20, 0x21, 0xb1, 0x02, 0x5a, 0x84, 0x9c, 0xff, 0x18, 0x8a, 0x2d, 0xe5, 0xac, 0xe1, 0x1e, 0x3f, + 0xfd, 0x52, 0x80, 0x62, 0x28, 0x94, 0xa3, 0x97, 0x60, 0xc6, 0xe8, 0x1d, 0xca, 0x8e, 0xed, 0x86, + 0x4e, 0xe9, 0x1c, 0xec, 0xa5, 0x77, 0xd8, 0xd6, 0x9a, 0x77, 0xf0, 0x29, 0xf7, 0x48, 0x69, 0xa3, + 0x77, 0x78, 0x87, 0x99, 0x38, 0x6b, 0x46, 0x7c, 0x44, 0x33, 0x12, 0xa1, 0x66, 0xa0, 0xab, 0x30, + 0xdb, 0xd5, 0x55, 0x2c, 0x2b, 0xaa, 0x6a, 0x62, 0x8b, 0xc5, 0x81, 0x2c, 0xd7, 0x9c, 0x23, 0x35, + 0xeb, 0xac, 0x42, 0xfc, 0x42, 0x00, 0x34, 0xe8, 0x15, 0xd1, 0xfe, 0xb0, 0xd4, 0x45, 0x98, 0x2c, + 0x75, 0xe1, 0xd3, 0x3d, 0x98, 0xc0, 0x34, 0x60, 0xc1, 0x6e, 0x99, 0xd8, 0x6a, 0xe9, 0x6d, 0x55, + 0x36, 0x68, 0x7f, 0xe9, 0xa0, 0xc4, 0x27, 0x1c, 0x94, 0x98, 0x84, 0x5c, 0x79, 0xb7, 0x66, 0xac, + 0x07, 0x16, 0x0d, 0x28, 0x37, 0x06, 0xc4, 0x78, 0x3f, 0xa3, 0x9a, 0x24, 0x3c, 0x4a, 0x93, 0xc4, + 0x1b, 0x50, 0xba, 0xe7, 0xfe, 0x9f, 0xff, 0x29, 0xd4, 0x4c, 0x61, 0xa0, 0x99, 0x27, 0x90, 0x71, + 0x9c, 0x30, 0xfa, 0x1f, 0xc8, 0xba, 0xa3, 0xe7, 0x5e, 0xbc, 0x8b, 0x1c, 0x76, 0xde, 0x12, 0x4f, + 0x04, 0x5d, 0x87, 0x39, 0x12, 0x45, 0x9c, 0x63, 0x67, 0x86, 0xc3, 0xc7, 0xa9, 0x37, 0x2c, 0xb2, + 0x8a, 0x6d, 0x07, 0x3c, 0x26, 0x79, 0x58, 0x29, 0x1c, 0x05, 0xbe, 0xcd, 0x06, 0x0c, 0xc9, 0x17, + 0x13, 0xc3, 0xf2, 0xc5, 0x0f, 0xe2, 0x90, 0xf3, 0x1d, 0x66, 0xa3, 0xff, 0xf2, 0x85, 0xa4, 0xc2, + 0x10, 0xab, 0xf4, 0xf1, 0x7a, 0x77, 0xb3, 0x82, 0x1d, 0x8b, 0x4f, 0xdf, 0xb1, 0xa8, 0xbb, 0x03, + 0xce, 0x99, 0x78, 0x72, 0xea, 0x33, 0xf1, 0xa7, 0x00, 0xd9, 0xba, 0xad, 0xb4, 0xe5, 0x13, 0xdd, + 0xd6, 0xba, 0xc7, 0x32, 0x5b, 0xed, 0x2c, 0x80, 0x94, 0x68, 0xcd, 0x7d, 0x5a, 0xb1, 0x47, 0xe8, + 0xe2, 0xaf, 0x05, 0xc8, 0xb8, 0xa0, 0xc3, 0xb4, 0x57, 0xad, 0xce, 0x42, 0x9a, 0xef, 0xab, 0xd9, + 0x5d, 0x2b, 0x5e, 0x1a, 0x7a, 0xf8, 0x5f, 0x81, 0x4c, 0x07, 0xdb, 0x0a, 0x8d, 0x86, 0x2c, 0x0f, + 0x71, 0xcb, 0xe8, 0x05, 0x28, 0x47, 0x9d, 0x2f, 0xd0, 0x94, 0x2e, 0x4f, 0xd2, 0x49, 0x5f, 0xb6, + 0x86, 0x55, 0x96, 0x0e, 0x5e, 0x7f, 0x11, 0x72, 0xbe, 0xeb, 0x72, 0x24, 0xb2, 0xee, 0xd4, 0x5e, + 0x2f, 0xc5, 0x2a, 0x33, 0x1f, 0x7d, 0xb2, 0x9c, 0xd8, 0xc1, 0xef, 0xa1, 0x32, 0x71, 0xc7, 0xd5, + 0x7a, 0xad, 0x7a, 0xa7, 0x24, 0x54, 0x72, 0x1f, 0x7d, 0xb2, 0x3c, 0x23, 0x61, 0x7a, 0xbc, 0x7b, + 0xfd, 0x0e, 0x14, 0x43, 0x33, 0x1a, 0xf4, 0xf1, 0x08, 0x0a, 0x9b, 0x07, 0x7b, 0xdb, 0x5b, 0xd5, + 0xf5, 0x46, 0x4d, 0xbe, 0xbf, 0xdb, 0xa8, 0x95, 0x04, 0x74, 0x0e, 0xe6, 0xb7, 0xb7, 0x5e, 0xab, + 0x37, 0xe4, 0xea, 0xf6, 0x56, 0x6d, 0xa7, 0x21, 0xaf, 0x37, 0x1a, 0xeb, 0xd5, 0x3b, 0xa5, 0xf8, + 0xda, 0x2f, 0x72, 0x50, 0x5c, 0xdf, 0xa8, 0x6e, 0xad, 0x1b, 0x46, 0x5b, 0x6b, 0x2a, 0x34, 0x62, + 0x54, 0x21, 0x49, 0xcf, 0x8a, 0x46, 0x3e, 0x9c, 0xa8, 0x8c, 0x3e, 0xff, 0x47, 0xb7, 0x20, 0x45, + 0x8f, 0x91, 0xd0, 0xe8, 0x97, 0x14, 0x95, 0x31, 0x17, 0x02, 0x48, 0x63, 0xe8, 0x3a, 0x1c, 0xf9, + 0xb4, 0xa2, 0x32, 0xfa, 0x7e, 0x00, 0xda, 0x86, 0x19, 0x07, 0x1c, 0x1f, 0xf7, 0x48, 0xa1, 0x32, + 0xf6, 0xa0, 0x9d, 0x74, 0x8d, 0x1d, 0x62, 0x8c, 0x7e, 0x75, 0x51, 0x19, 0x73, 0x73, 0x00, 0x6d, + 0x41, 0x9a, 0x03, 0x7b, 0x63, 0x1e, 0x1c, 0x54, 0xc6, 0x1d, 0x98, 0x23, 0x09, 0xb2, 0xde, 0x11, + 0xd6, 0xf8, 0xb7, 0x24, 0x95, 0x09, 0x2e, 0x45, 0xa0, 0x07, 0x90, 0x0f, 0x82, 0x85, 0x93, 0x3d, + 0x6a, 0xa8, 0x4c, 0x78, 0x34, 0x4f, 0xf4, 0x07, 0x91, 0xc3, 0xc9, 0x1e, 0x39, 0x54, 0x26, 0x3c, + 0xa9, 0x47, 0xef, 0xc0, 0xdc, 0x20, 0xb2, 0x37, 0xf9, 0x9b, 0x87, 0xca, 0x14, 0x67, 0xf7, 0xa8, + 0x03, 0x68, 0x08, 0x22, 0x38, 0xc5, 0x13, 0x88, 0xca, 0x34, 0x47, 0xf9, 0x48, 0x85, 0x62, 0x18, + 0x65, 0x9b, 0xf4, 0x49, 0x44, 0x65, 0xe2, 0x63, 0x7d, 0xf6, 0x97, 0x20, 0xe4, 0x34, 0xe9, 0x13, + 0x89, 0xca, 0xc4, 0xa7, 0xfc, 0xe8, 0x00, 0xc0, 0x07, 0x99, 0x4c, 0xf0, 0x64, 0xa2, 0x32, 0xc9, + 0x79, 0x3f, 0x32, 0x60, 0x7e, 0x18, 0x96, 0x32, 0xcd, 0x0b, 0x8a, 0xca, 0x54, 0xd7, 0x00, 0x88, + 0x3d, 0x07, 0x51, 0x91, 0xc9, 0x5e, 0x54, 0x54, 0x26, 0xbc, 0x0f, 0xb0, 0x51, 0xfb, 0xf4, 0xcb, + 0x45, 0xe1, 0xb3, 0x2f, 0x17, 0x85, 0x2f, 0xbe, 0x5c, 0x14, 0x3e, 0xfe, 0x6a, 0x31, 0xf6, 0xd9, + 0x57, 0x8b, 0xb1, 0xdf, 0x7f, 0xb5, 0x18, 0xfb, 0xdf, 0x27, 0x8f, 0x35, 0xbb, 0xd5, 0x3b, 0x5c, + 0x69, 0xea, 0x9d, 0x55, 0xff, 0xe3, 0xba, 0x61, 0x0f, 0xfe, 0x0e, 0xd3, 0x34, 0x12, 0xdf, 0xf8, + 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x2a, 0x96, 0xd0, 0x10, 0x38, 0x00, 0x00, } -func (m *ResponseException) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// ABCIApplicationClient is the client API for ABCIApplication service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ABCIApplicationClient interface { + Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) + Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) + Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) + CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) + Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) + Commit(ctx context.Context, in *RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) + InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) + ListSnapshots(ctx context.Context, in *RequestListSnapshots, opts ...grpc.CallOption) (*ResponseListSnapshots, error) + OfferSnapshot(ctx context.Context, in *RequestOfferSnapshot, opts ...grpc.CallOption) (*ResponseOfferSnapshot, error) + LoadSnapshotChunk(ctx context.Context, in *RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*ResponseLoadSnapshotChunk, error) + ApplySnapshotChunk(ctx context.Context, in *RequestApplySnapshotChunk, opts ...grpc.CallOption) (*ResponseApplySnapshotChunk, error) + PrepareProposal(ctx context.Context, in *RequestPrepareProposal, opts ...grpc.CallOption) (*ResponsePrepareProposal, error) + ProcessProposal(ctx context.Context, in *RequestProcessProposal, opts ...grpc.CallOption) (*ResponseProcessProposal, error) + ExtendVote(ctx context.Context, in *RequestExtendVote, opts ...grpc.CallOption) (*ResponseExtendVote, error) + VerifyVoteExtension(ctx context.Context, in *RequestVerifyVoteExtension, opts ...grpc.CallOption) (*ResponseVerifyVoteExtension, error) + FinalizeBlock(ctx context.Context, in *RequestFinalizeBlock, opts ...grpc.CallOption) (*ResponseFinalizeBlock, error) } -func (m *ResponseEcho) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +type aBCIApplicationClient struct { + cc *grpc.ClientConn +} + +func NewABCIApplicationClient(cc *grpc.ClientConn) ABCIApplicationClient { + return &aBCIApplicationClient{cc} +} + +func (c *aBCIApplicationClient) Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) { + out := new(ResponseEcho) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/Echo", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *ResponseEcho) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (c *aBCIApplicationClient) Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) { + out := new(ResponseFlush) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/Flush", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -func (m *ResponseEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Message) > 0 { - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0xa +func (c *aBCIApplicationClient) Info(ctx context.Context, in *RequestInfo, opts ...grpc.CallOption) (*ResponseInfo, error) { + out := new(ResponseInfo) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/Info", in, out, opts...) + if err != nil { + return nil, err } - return len(dAtA) - i, nil + return out, nil } -func (m *ResponseFlush) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *aBCIApplicationClient) CheckTx(ctx context.Context, in *RequestCheckTx, opts ...grpc.CallOption) (*ResponseCheckTx, error) { + out := new(ResponseCheckTx) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/CheckTx", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *ResponseFlush) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (c *aBCIApplicationClient) Query(ctx context.Context, in *RequestQuery, opts ...grpc.CallOption) (*ResponseQuery, error) { + out := new(ResponseQuery) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/Query", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -func (m *ResponseFlush) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil +func (c *aBCIApplicationClient) Commit(ctx context.Context, in *RequestCommit, opts ...grpc.CallOption) (*ResponseCommit, error) { + out := new(ResponseCommit) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/Commit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -func (m *ResponseInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *aBCIApplicationClient) InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) { + out := new(ResponseInitChain) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/InitChain", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *ResponseInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (c *aBCIApplicationClient) ListSnapshots(ctx context.Context, in *RequestListSnapshots, opts ...grpc.CallOption) (*ResponseListSnapshots, error) { + out := new(ResponseListSnapshots) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/ListSnapshots", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -func (m *ResponseInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.LastBlockAppHash) > 0 { - i -= len(m.LastBlockAppHash) - copy(dAtA[i:], m.LastBlockAppHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.LastBlockAppHash))) - i-- - dAtA[i] = 0x2a - } - if m.LastBlockHeight != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockHeight)) - i-- - dAtA[i] = 0x20 - } - if m.AppVersion != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.AppVersion)) - i-- - dAtA[i] = 0x18 +func (c *aBCIApplicationClient) OfferSnapshot(ctx context.Context, in *RequestOfferSnapshot, opts ...grpc.CallOption) (*ResponseOfferSnapshot, error) { + out := new(ResponseOfferSnapshot) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/OfferSnapshot", in, out, opts...) + if err != nil { + return nil, err } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x12 + return out, nil +} + +func (c *aBCIApplicationClient) LoadSnapshotChunk(ctx context.Context, in *RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*ResponseLoadSnapshotChunk, error) { + out := new(ResponseLoadSnapshotChunk) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/LoadSnapshotChunk", in, out, opts...) + if err != nil { + return nil, err } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0xa + return out, nil +} + +func (c *aBCIApplicationClient) ApplySnapshotChunk(ctx context.Context, in *RequestApplySnapshotChunk, opts ...grpc.CallOption) (*ResponseApplySnapshotChunk, error) { + out := new(ResponseApplySnapshotChunk) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/ApplySnapshotChunk", in, out, opts...) + if err != nil { + return nil, err } - return len(dAtA) - i, nil + return out, nil } -func (m *ResponseInitChain) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *aBCIApplicationClient) PrepareProposal(ctx context.Context, in *RequestPrepareProposal, opts ...grpc.CallOption) (*ResponsePrepareProposal, error) { + out := new(ResponsePrepareProposal) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/PrepareProposal", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *ResponseInitChain) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (c *aBCIApplicationClient) ProcessProposal(ctx context.Context, in *RequestProcessProposal, opts ...grpc.CallOption) (*ResponseProcessProposal, error) { + out := new(ResponseProcessProposal) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/ProcessProposal", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil } -func (m *ResponseInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.InitialCoreHeight != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.InitialCoreHeight)) - i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0xb0 - } - if m.NextCoreChainLockUpdate != nil { - { - size, err := m.NextCoreChainLockUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0xaa - } - { - size, err := m.ValidatorSetUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0xa2 - if len(m.AppHash) > 0 { - i -= len(m.AppHash) - copy(dAtA[i:], m.AppHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) - i-- - dAtA[i] = 0x1a +func (c *aBCIApplicationClient) ExtendVote(ctx context.Context, in *RequestExtendVote, opts ...grpc.CallOption) (*ResponseExtendVote, error) { + out := new(ResponseExtendVote) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/ExtendVote", in, out, opts...) + if err != nil { + return nil, err } - if m.ConsensusParams != nil { - { - size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + return out, nil +} + +func (c *aBCIApplicationClient) VerifyVoteExtension(ctx context.Context, in *RequestVerifyVoteExtension, opts ...grpc.CallOption) (*ResponseVerifyVoteExtension, error) { + out := new(ResponseVerifyVoteExtension) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/VerifyVoteExtension", in, out, opts...) + if err != nil { + return nil, err } - return len(dAtA) - i, nil + return out, nil } -func (m *ResponseQuery) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) +func (c *aBCIApplicationClient) FinalizeBlock(ctx context.Context, in *RequestFinalizeBlock, opts ...grpc.CallOption) (*ResponseFinalizeBlock, error) { + out := new(ResponseFinalizeBlock) + err := c.cc.Invoke(ctx, "/tendermint.abci.ABCIApplication/FinalizeBlock", in, out, opts...) if err != nil { return nil, err } - return dAtA[:n], nil + return out, nil } -func (m *ResponseQuery) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// ABCIApplicationServer is the server API for ABCIApplication service. +type ABCIApplicationServer interface { + Echo(context.Context, *RequestEcho) (*ResponseEcho, error) + Flush(context.Context, *RequestFlush) (*ResponseFlush, error) + Info(context.Context, *RequestInfo) (*ResponseInfo, error) + CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) + Query(context.Context, *RequestQuery) (*ResponseQuery, error) + Commit(context.Context, *RequestCommit) (*ResponseCommit, error) + InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) + ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) + OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) + LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) + ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) + PrepareProposal(context.Context, *RequestPrepareProposal) (*ResponsePrepareProposal, error) + ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error) + ExtendVote(context.Context, *RequestExtendVote) (*ResponseExtendVote, error) + VerifyVoteExtension(context.Context, *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) + FinalizeBlock(context.Context, *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) } -func (m *ResponseQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Codespace) > 0 { - i -= len(m.Codespace) - copy(dAtA[i:], m.Codespace) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) - i-- - dAtA[i] = 0x52 - } - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x48 +// UnimplementedABCIApplicationServer can be embedded to have forward compatible implementations. +type UnimplementedABCIApplicationServer struct { +} + +func (*UnimplementedABCIApplicationServer) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) { + return nil, status.Errorf(codes.Unimplemented, "method Echo not implemented") +} +func (*UnimplementedABCIApplicationServer) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) { + return nil, status.Errorf(codes.Unimplemented, "method Flush not implemented") +} +func (*UnimplementedABCIApplicationServer) Info(ctx context.Context, req *RequestInfo) (*ResponseInfo, error) { + return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") +} +func (*UnimplementedABCIApplicationServer) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) { + return nil, status.Errorf(codes.Unimplemented, "method CheckTx not implemented") +} +func (*UnimplementedABCIApplicationServer) Query(ctx context.Context, req *RequestQuery) (*ResponseQuery, error) { + return nil, status.Errorf(codes.Unimplemented, "method Query not implemented") +} +func (*UnimplementedABCIApplicationServer) Commit(ctx context.Context, req *RequestCommit) (*ResponseCommit, error) { + return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") +} +func (*UnimplementedABCIApplicationServer) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) { + return nil, status.Errorf(codes.Unimplemented, "method InitChain not implemented") +} +func (*UnimplementedABCIApplicationServer) ListSnapshots(ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") +} +func (*UnimplementedABCIApplicationServer) OfferSnapshot(ctx context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { + return nil, status.Errorf(codes.Unimplemented, "method OfferSnapshot not implemented") +} +func (*UnimplementedABCIApplicationServer) LoadSnapshotChunk(ctx context.Context, req *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) { + return nil, status.Errorf(codes.Unimplemented, "method LoadSnapshotChunk not implemented") +} +func (*UnimplementedABCIApplicationServer) ApplySnapshotChunk(ctx context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplySnapshotChunk not implemented") +} +func (*UnimplementedABCIApplicationServer) PrepareProposal(ctx context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) { + return nil, status.Errorf(codes.Unimplemented, "method PrepareProposal not implemented") +} +func (*UnimplementedABCIApplicationServer) ProcessProposal(ctx context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) { + return nil, status.Errorf(codes.Unimplemented, "method ProcessProposal not implemented") +} +func (*UnimplementedABCIApplicationServer) ExtendVote(ctx context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) { + return nil, status.Errorf(codes.Unimplemented, "method ExtendVote not implemented") +} +func (*UnimplementedABCIApplicationServer) VerifyVoteExtension(ctx context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) { + return nil, status.Errorf(codes.Unimplemented, "method VerifyVoteExtension not implemented") +} +func (*UnimplementedABCIApplicationServer) FinalizeBlock(ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) { + return nil, status.Errorf(codes.Unimplemented, "method FinalizeBlock not implemented") +} + +func RegisterABCIApplicationServer(s *grpc.Server, srv ABCIApplicationServer) { + s.RegisterService(&_ABCIApplication_serviceDesc, srv) +} + +func _ABCIApplication_Echo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestEcho) + if err := dec(in); err != nil { + return nil, err } - if m.ProofOps != nil { - { - size, err := m.ProofOps.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 + if interceptor == nil { + return srv.(ABCIApplicationServer).Echo(ctx, in) } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x3a + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/Echo", } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x32 + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Echo(ctx, req.(*RequestEcho)) } - if m.Index != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x28 + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_Flush_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestFlush) + if err := dec(in); err != nil { + return nil, err } - if len(m.Info) > 0 { - i -= len(m.Info) - copy(dAtA[i:], m.Info) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) - i-- - dAtA[i] = 0x22 + if interceptor == nil { + return srv.(ABCIApplicationServer).Flush(ctx, in) } - if len(m.Log) > 0 { - i -= len(m.Log) - copy(dAtA[i:], m.Log) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) - i-- - dAtA[i] = 0x1a + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/Flush", } - if m.Code != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Code)) - i-- - dAtA[i] = 0x8 + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Flush(ctx, req.(*RequestFlush)) } - return len(dAtA) - i, nil + return interceptor(ctx, in, info, handler) } -func (m *ResponseBeginBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { +func _ABCIApplication_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestInfo) + if err := dec(in); err != nil { return nil, err } - return dAtA[:n], nil + if interceptor == nil { + return srv.(ABCIApplicationServer).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Info(ctx, req.(*RequestInfo)) + } + return interceptor(ctx, in, info, handler) } -func (m *ResponseBeginBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func _ABCIApplication_CheckTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestCheckTx) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).CheckTx(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/CheckTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).CheckTx(ctx, req.(*RequestCheckTx)) + } + return interceptor(ctx, in, info, handler) } -func (m *ResponseBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } +func _ABCIApplication_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestQuery) + if err := dec(in); err != nil { + return nil, err } - return len(dAtA) - i, nil + if interceptor == nil { + return srv.(ABCIApplicationServer).Query(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/Query", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Query(ctx, req.(*RequestQuery)) + } + return interceptor(ctx, in, info, handler) } -func (m *ResponseCheckTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { +func _ABCIApplication_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestCommit) + if err := dec(in); err != nil { return nil, err } - return dAtA[:n], nil + if interceptor == nil { + return srv.(ABCIApplicationServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).Commit(ctx, req.(*RequestCommit)) + } + return interceptor(ctx, in, info, handler) } -func (m *ResponseCheckTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func _ABCIApplication_InitChain_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestInitChain) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).InitChain(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/InitChain", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).InitChain(ctx, req.(*RequestInitChain)) + } + return interceptor(ctx, in, info, handler) } -func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.MempoolError) > 0 { - i -= len(m.MempoolError) - copy(dAtA[i:], m.MempoolError) - i = encodeVarintTypes(dAtA, i, uint64(len(m.MempoolError))) - i-- - dAtA[i] = 0x5a +func _ABCIApplication_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestListSnapshots) + if err := dec(in); err != nil { + return nil, err } - if m.Priority != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Priority)) - i-- - dAtA[i] = 0x50 + if interceptor == nil { + return srv.(ABCIApplicationServer).ListSnapshots(ctx, in) } - if len(m.Sender) > 0 { - i -= len(m.Sender) - copy(dAtA[i:], m.Sender) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Sender))) - i-- - dAtA[i] = 0x4a + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/ListSnapshots", } - if len(m.Codespace) > 0 { - i -= len(m.Codespace) - copy(dAtA[i:], m.Codespace) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) - i-- - dAtA[i] = 0x42 + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).ListSnapshots(ctx, req.(*RequestListSnapshots)) } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_OfferSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestOfferSnapshot) + if err := dec(in); err != nil { + return nil, err } - if m.GasUsed != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) - i-- - dAtA[i] = 0x30 + if interceptor == nil { + return srv.(ABCIApplicationServer).OfferSnapshot(ctx, in) } - if m.GasWanted != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) - i-- - dAtA[i] = 0x28 + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/OfferSnapshot", } - if len(m.Info) > 0 { - i -= len(m.Info) - copy(dAtA[i:], m.Info) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) - i-- - dAtA[i] = 0x22 + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).OfferSnapshot(ctx, req.(*RequestOfferSnapshot)) } - if len(m.Log) > 0 { - i -= len(m.Log) - copy(dAtA[i:], m.Log) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) - i-- - dAtA[i] = 0x1a + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_LoadSnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestLoadSnapshotChunk) + if err := dec(in); err != nil { + return nil, err } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 + if interceptor == nil { + return srv.(ABCIApplicationServer).LoadSnapshotChunk(ctx, in) } - if m.Code != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Code)) - i-- - dAtA[i] = 0x8 + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/LoadSnapshotChunk", } - return len(dAtA) - i, nil + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).LoadSnapshotChunk(ctx, req.(*RequestLoadSnapshotChunk)) + } + return interceptor(ctx, in, info, handler) } -func (m *ResponseDeliverTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { +func _ABCIApplication_ApplySnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestApplySnapshotChunk) + if err := dec(in); err != nil { return nil, err } - return dAtA[:n], nil + if interceptor == nil { + return srv.(ABCIApplicationServer).ApplySnapshotChunk(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/ApplySnapshotChunk", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).ApplySnapshotChunk(ctx, req.(*RequestApplySnapshotChunk)) + } + return interceptor(ctx, in, info, handler) } -func (m *ResponseDeliverTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func _ABCIApplication_PrepareProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestPrepareProposal) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).PrepareProposal(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/PrepareProposal", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).PrepareProposal(ctx, req.(*RequestPrepareProposal)) + } + return interceptor(ctx, in, info, handler) } -func (m *ResponseDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Codespace) > 0 { - i -= len(m.Codespace) - copy(dAtA[i:], m.Codespace) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) - i-- - dAtA[i] = 0x42 +func _ABCIApplication_ProcessProposal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestProcessProposal) + if err := dec(in); err != nil { + return nil, err } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } + if interceptor == nil { + return srv.(ABCIApplicationServer).ProcessProposal(ctx, in) } - if m.GasUsed != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) - i-- - dAtA[i] = 0x30 + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/ProcessProposal", } - if m.GasWanted != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) - i-- - dAtA[i] = 0x28 + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).ProcessProposal(ctx, req.(*RequestProcessProposal)) } - if len(m.Info) > 0 { - i -= len(m.Info) - copy(dAtA[i:], m.Info) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) - i-- - dAtA[i] = 0x22 + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_ExtendVote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestExtendVote) + if err := dec(in); err != nil { + return nil, err } - if len(m.Log) > 0 { - i -= len(m.Log) - copy(dAtA[i:], m.Log) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) - i-- - dAtA[i] = 0x1a + if interceptor == nil { + return srv.(ABCIApplicationServer).ExtendVote(ctx, in) } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/ExtendVote", } - if m.Code != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Code)) - i-- - dAtA[i] = 0x8 + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).ExtendVote(ctx, req.(*RequestExtendVote)) } - return len(dAtA) - i, nil + return interceptor(ctx, in, info, handler) } -func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { +func _ABCIApplication_VerifyVoteExtension_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestVerifyVoteExtension) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).VerifyVoteExtension(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/VerifyVoteExtension", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).VerifyVoteExtension(ctx, req.(*RequestVerifyVoteExtension)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_FinalizeBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestFinalizeBlock) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).FinalizeBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.ABCIApplication/FinalizeBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).FinalizeBlock(ctx, req.(*RequestFinalizeBlock)) + } + return interceptor(ctx, in, info, handler) +} + +var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ + ServiceName: "tendermint.abci.ABCIApplication", + HandlerType: (*ABCIApplicationServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Echo", + Handler: _ABCIApplication_Echo_Handler, + }, + { + MethodName: "Flush", + Handler: _ABCIApplication_Flush_Handler, + }, + { + MethodName: "Info", + Handler: _ABCIApplication_Info_Handler, + }, + { + MethodName: "CheckTx", + Handler: _ABCIApplication_CheckTx_Handler, + }, + { + MethodName: "Query", + Handler: _ABCIApplication_Query_Handler, + }, + { + MethodName: "Commit", + Handler: _ABCIApplication_Commit_Handler, + }, + { + MethodName: "InitChain", + Handler: _ABCIApplication_InitChain_Handler, + }, + { + MethodName: "ListSnapshots", + Handler: _ABCIApplication_ListSnapshots_Handler, + }, + { + MethodName: "OfferSnapshot", + Handler: _ABCIApplication_OfferSnapshot_Handler, + }, + { + MethodName: "LoadSnapshotChunk", + Handler: _ABCIApplication_LoadSnapshotChunk_Handler, + }, + { + MethodName: "ApplySnapshotChunk", + Handler: _ABCIApplication_ApplySnapshotChunk_Handler, + }, + { + MethodName: "PrepareProposal", + Handler: _ABCIApplication_PrepareProposal_Handler, + }, + { + MethodName: "ProcessProposal", + Handler: _ABCIApplication_ProcessProposal_Handler, + }, + { + MethodName: "ExtendVote", + Handler: _ABCIApplication_ExtendVote_Handler, + }, + { + MethodName: "VerifyVoteExtension", + Handler: _ABCIApplication_VerifyVoteExtension_Handler, + }, + { + MethodName: "FinalizeBlock", + Handler: _ABCIApplication_FinalizeBlock_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "tendermint/abci/types.proto", +} + +func (m *Request) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5673,33 +5331,38 @@ func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { +func (m *Request) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ValidatorSetUpdate != nil { + if m.Value != nil { { - size, err := m.ValidatorSetUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { + size := m.Value.Size() + i -= size + if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { return 0, err } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0xaa } - if m.NextCoreChainLockUpdate != nil { + return len(dAtA) - i, nil +} + +func (m *Request_Echo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Echo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Echo != nil { { - size, err := m.NextCoreChainLockUpdate.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Echo.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5707,27 +5370,20 @@ func (m *ResponseEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0xa2 - } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } + dAtA[i] = 0xa } - if m.ConsensusParamUpdates != nil { + return len(dAtA) - i, nil +} +func (m *Request_Flush) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_Flush) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Flush != nil { { - size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Flush.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5739,333 +5395,372 @@ func (m *ResponseEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } - -func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseCommit) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_Info) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - _ = i - var l int - _ = l - if m.RetainHeight != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) - i-- - dAtA[i] = 0x18 - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + if m.Info != nil { + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x1a } return len(dAtA) - i, nil } - -func (m *ResponseListSnapshots) Marshal() (dAtA []byte, err error) { +func (m *Request_InitChain) MarshalTo(dAtA []byte) (int, error) { size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseListSnapshots) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.InitChain != nil { + { + size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} +func (m *Request_Query) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Snapshots) > 0 { - for iNdEx := len(m.Snapshots) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Snapshots[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + if m.Query != nil { + { + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } return len(dAtA) - i, nil } - -func (m *ResponseOfferSnapshot) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseOfferSnapshot) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_BeginBlock) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - _ = i - var l int - _ = l - if m.Result != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Result)) + if m.BeginBlock != nil { + { + size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x8 + dAtA[i] = 0x32 } return len(dAtA) - i, nil } - -func (m *ResponseLoadSnapshotChunk) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseLoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_CheckTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseLoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Chunk) > 0 { - i -= len(m.Chunk) - copy(dAtA[i:], m.Chunk) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Chunk))) + if m.CheckTx != nil { + { + size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0xa + dAtA[i] = 0x3a } return len(dAtA) - i, nil } - -func (m *ResponseApplySnapshotChunk) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_DeliverTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - _ = i - var l int - _ = l - if len(m.RejectSenders) > 0 { - for iNdEx := len(m.RejectSenders) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RejectSenders[iNdEx]) - copy(dAtA[i:], m.RejectSenders[iNdEx]) - i = encodeVarintTypes(dAtA, i, uint64(len(m.RejectSenders[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.RefetchChunks) > 0 { - dAtA44 := make([]byte, len(m.RefetchChunks)*10) - var j43 int - for _, num := range m.RefetchChunks { - for num >= 1<<7 { - dAtA44[j43] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j43++ + if m.DeliverTx != nil { + { + size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - dAtA44[j43] = uint8(num) - j43++ + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - i -= j43 - copy(dAtA[i:], dAtA44[:j43]) - i = encodeVarintTypes(dAtA, i, uint64(j43)) - i-- - dAtA[i] = 0x12 - } - if m.Result != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Result)) i-- - dAtA[i] = 0x8 + dAtA[i] = 0x42 } return len(dAtA) - i, nil } - -func (m *LastCommitInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LastCommitInfo) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_EndBlock) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *LastCommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - _ = i - var l int - _ = l - if len(m.StateSignature) > 0 { - i -= len(m.StateSignature) - copy(dAtA[i:], m.StateSignature) - i = encodeVarintTypes(dAtA, i, uint64(len(m.StateSignature))) - i-- - dAtA[i] = 0x2a - } - if len(m.BlockSignature) > 0 { - i -= len(m.BlockSignature) - copy(dAtA[i:], m.BlockSignature) - i = encodeVarintTypes(dAtA, i, uint64(len(m.BlockSignature))) - i-- - dAtA[i] = 0x22 - } - if len(m.QuorumHash) > 0 { - i -= len(m.QuorumHash) - copy(dAtA[i:], m.QuorumHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.QuorumHash))) - i-- - dAtA[i] = 0x1a - } - if m.Round != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + if m.EndBlock != nil { + { + size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x8 + dAtA[i] = 0x4a } return len(dAtA) - i, nil } - -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Event) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_Commit) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + if m.Commit != nil { + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) i-- - dAtA[i] = 0xa + dAtA[i] = 0x52 } return len(dAtA) - i, nil } - -func (m *EventAttribute) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EventAttribute) MarshalTo(dAtA []byte) (int, error) { +func (m *Request_ListSnapshots) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EventAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *Request_ListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - _ = i - var l int - _ = l - if m.Index { - i-- - if m.Index { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.ListSnapshots != nil { + { + size, err := m.ListSnapshots.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x18 + dAtA[i] = 0x5a } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + return len(dAtA) - i, nil +} +func (m *Request_OfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_OfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OfferSnapshot != nil { + { + size, err := m.OfferSnapshot.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x62 } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + return len(dAtA) - i, nil +} +func (m *Request_LoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_LoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LoadSnapshotChunk != nil { + { + size, err := m.LoadSnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0xa + dAtA[i] = 0x6a } return len(dAtA) - i, nil } +func (m *Request_ApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} -func (m *TxResult) Marshal() (dAtA []byte, err error) { +func (m *Request_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ApplySnapshotChunk != nil { + { + size, err := m.ApplySnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + return len(dAtA) - i, nil +} +func (m *Request_PrepareProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_PrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PrepareProposal != nil { + { + size, err := m.PrepareProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + return len(dAtA) - i, nil +} +func (m *Request_ProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_ProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ProcessProposal != nil { + { + size, err := m.ProcessProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + return len(dAtA) - i, nil +} +func (m *Request_ExtendVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_ExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExtendVote != nil { + { + size, err := m.ExtendVote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + return len(dAtA) - i, nil +} +func (m *Request_VerifyVoteExtension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_VerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.VerifyVoteExtension != nil { + { + size, err := m.VerifyVoteExtension.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + return len(dAtA) - i, nil +} +func (m *Request_FinalizeBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_FinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FinalizeBlock != nil { + { + size, err := m.FinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + return len(dAtA) - i, nil +} +func (m *RequestEcho) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6075,47 +5770,27 @@ func (m *TxResult) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TxResult) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestEcho) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TxResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) - i-- - dAtA[i] = 0x1a - } - if m.Index != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x10 - } - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *Validator) Marshal() (dAtA []byte, err error) { +func (m *RequestFlush) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6125,32 +5800,20 @@ func (m *Validator) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Validator) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestFlush) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Validator) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestFlush) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.ProTxHash) > 0 { - i -= len(m.ProTxHash) - copy(dAtA[i:], m.ProTxHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ProTxHash))) - i-- - dAtA[i] = 0x22 - } - if m.Power != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Power)) - i-- - dAtA[i] = 0x18 - } return len(dAtA) - i, nil } -func (m *ValidatorUpdate) Marshal() (dAtA []byte, err error) { +func (m *RequestInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6160,51 +5823,44 @@ func (m *ValidatorUpdate) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatorUpdate) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestInfo) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatorUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.NodeAddress) > 0 { - i -= len(m.NodeAddress) - copy(dAtA[i:], m.NodeAddress) - i = encodeVarintTypes(dAtA, i, uint64(len(m.NodeAddress))) + if len(m.AbciVersion) > 0 { + i -= len(m.AbciVersion) + copy(dAtA[i:], m.AbciVersion) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AbciVersion))) i-- dAtA[i] = 0x22 } - if len(m.ProTxHash) > 0 { - i -= len(m.ProTxHash) - copy(dAtA[i:], m.ProTxHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.ProTxHash))) + if m.P2PVersion != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.P2PVersion)) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x18 } - if m.Power != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Power)) + if m.BlockVersion != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.BlockVersion)) i-- dAtA[i] = 0x10 } - if m.PubKey != nil { - { - size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *ValidatorSetUpdate) Marshal() (dAtA []byte, err error) { +func (m *RequestInitChain) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6214,51 +5870,76 @@ func (m *ValidatorSetUpdate) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ValidatorSetUpdate) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestInitChain) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatorSetUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.QuorumHash) > 0 { - i -= len(m.QuorumHash) - copy(dAtA[i:], m.QuorumHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.QuorumHash))) + if m.InitialCoreHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.InitialCoreHeight)) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x38 } - { - size, err := m.ThresholdPublicKey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.InitialHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.InitialHeight)) + i-- + dAtA[i] = 0x30 + } + if len(m.AppStateBytes) > 0 { + i -= len(m.AppStateBytes) + copy(dAtA[i:], m.AppStateBytes) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppStateBytes))) + i-- + dAtA[i] = 0x2a + } + if m.ValidatorSet != nil { + { + size, err := m.ValidatorSet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 } - i-- - dAtA[i] = 0x12 - if len(m.ValidatorUpdates) > 0 { - for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ValidatorUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + if m.ConsensusParams != nil { + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a + } + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0x12 + } + n22, err22 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err22 != nil { + return 0, err22 } + i -= n22 + i = encodeVarintTypes(dAtA, i, uint64(n22)) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ThresholdPublicKeyUpdate) Marshal() (dAtA []byte, err error) { +func (m *RequestQuery) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6268,30 +5949,49 @@ func (m *ThresholdPublicKeyUpdate) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ThresholdPublicKeyUpdate) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestQuery) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ThresholdPublicKeyUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.ThresholdPublicKey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.Prove { + i-- + if m.Prove { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x20 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x18 + } + if len(m.Path) > 0 { + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *QuorumHashUpdate) Marshal() (dAtA []byte, err error) { +func (m *RequestBeginBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6301,27 +6001,61 @@ func (m *QuorumHashUpdate) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *QuorumHashUpdate) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestBeginBlock) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *QuorumHashUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.QuorumHash) > 0 { - i -= len(m.QuorumHash) - copy(dAtA[i:], m.QuorumHash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.QuorumHash))) + if len(m.ByzantineValidators) > 0 { + for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.LastCommitInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Header.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) i-- dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *VoteInfo) Marshal() (dAtA []byte, err error) { +func (m *RequestCheckTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6331,40 +6065,32 @@ func (m *VoteInfo) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *VoteInfo) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestCheckTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *VoteInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.SignedLastBlock { - i-- - if m.SignedLastBlock { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) i-- dAtA[i] = 0x10 } - { - size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i-- + dAtA[i] = 0xa } - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *Evidence) Marshal() (dAtA []byte, err error) { +func (m *RequestDeliverTx) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6374,53 +6100,27 @@ func (m *Evidence) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestDeliverTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.TotalVotingPower != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.TotalVotingPower)) - i-- - dAtA[i] = 0x28 - } - n50, err50 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err50 != nil { - return 0, err50 - } - i -= n50 - i = encodeVarintTypes(dAtA, i, uint64(n50)) - i-- - dAtA[i] = 0x22 - if m.Height != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x18 - } - { - size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if m.Type != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa } return len(dAtA) - i, nil } -func (m *Snapshot) Marshal() (dAtA []byte, err error) { +func (m *RequestEndBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -6430,47 +6130,16 @@ func (m *Snapshot) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { +func (m *RequestEndBlock) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RequestEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.CoreChainLockedHeight != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.CoreChainLockedHeight)) - i-- - dAtA[i] = 0x6 - i-- - dAtA[i] = 0xa0 - } - if len(m.Metadata) > 0 { - i -= len(m.Metadata) - copy(dAtA[i:], m.Metadata) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Metadata))) - i-- - dAtA[i] = 0x2a - } - if len(m.Hash) > 0 { - i -= len(m.Hash) - copy(dAtA[i:], m.Hash) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) - i-- - dAtA[i] = 0x22 - } - if m.Chunks != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Chunks)) - i-- - dAtA[i] = 0x18 - } - if m.Format != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.Format)) - i-- - dAtA[i] = 0x10 - } if m.Height != 0 { i = encodeVarintTypes(dAtA, i, uint64(m.Height)) i-- @@ -6479,1212 +6148,7156 @@ func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *RequestCommit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *Request) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Value != nil { - n += m.Value.Size() - } - return n + +func (m *RequestCommit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Request_Echo) Size() (n int) { - if m == nil { - return 0 - } +func (m *RequestCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Echo != nil { - l = m.Echo.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n + return len(dAtA) - i, nil } -func (m *Request_Flush) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Flush != nil { - l = m.Flush.Size() - n += 1 + l + sovTypes(uint64(l)) + +func (m *RequestListSnapshots) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Request_Info) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Info != nil { - l = m.Info.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n + +func (m *RequestListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Request_InitChain) Size() (n int) { - if m == nil { - return 0 - } + +func (m *RequestListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.InitChain != nil { - l = m.InitChain.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n + return len(dAtA) - i, nil } -func (m *Request_Query) Size() (n int) { - if m == nil { - return 0 + +func (m *RequestOfferSnapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *RequestOfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Query != nil { - l = m.Query.Size() - n += 1 + l + sovTypes(uint64(l)) + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x12 } - return n + if m.Snapshot != nil { + { + size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *Request_BeginBlock) Size() (n int) { - if m == nil { - return 0 + +func (m *RequestLoadSnapshotChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *RequestLoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestLoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.BeginBlock != nil { - l = m.BeginBlock.Size() - n += 1 + l + sovTypes(uint64(l)) + if m.Chunk != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Chunk)) + i-- + dAtA[i] = 0x18 } - return n -} -func (m *Request_CheckTx) Size() (n int) { - if m == nil { - return 0 + if m.Format != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x10 } - var l int - _ = l - if m.CheckTx != nil { - l = m.CheckTx.Size() - n += 1 + l + sovTypes(uint64(l)) + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func (m *Request_DeliverTx) Size() (n int) { - if m == nil { - return 0 + +func (m *RequestApplySnapshotChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *RequestApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.DeliverTx != nil { - l = m.DeliverTx.Size() - n += 1 + l + sovTypes(uint64(l)) + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0x1a } - return n -} -func (m *Request_EndBlock) Size() (n int) { - if m == nil { - return 0 + if len(m.Chunk) > 0 { + i -= len(m.Chunk) + copy(dAtA[i:], m.Chunk) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Chunk))) + i-- + dAtA[i] = 0x12 } - var l int - _ = l - if m.EndBlock != nil { - l = m.EndBlock.Size() - n += 1 + l + sovTypes(uint64(l)) + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func (m *Request_Commit) Size() (n int) { - if m == nil { - return 0 + +func (m *RequestPrepareProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *RequestPrepareProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestPrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Commit != nil { - l = m.Commit.Size() - n += 1 + l + sovTypes(uint64(l)) + if len(m.ProposerProTxHash) > 0 { + i -= len(m.ProposerProTxHash) + copy(dAtA[i:], m.ProposerProTxHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerProTxHash))) + i-- + dAtA[i] = 0x42 } - return n -} -func (m *Request_ListSnapshots) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ListSnapshots != nil { - l = m.ListSnapshots.Size() - n += 1 + l + sovTypes(uint64(l)) + if len(m.NextValidatorsHash) > 0 { + i -= len(m.NextValidatorsHash) + copy(dAtA[i:], m.NextValidatorsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) + i-- + dAtA[i] = 0x3a } - return n -} -func (m *Request_OfferSnapshot) Size() (n int) { - if m == nil { - return 0 + n26, err26 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err26 != nil { + return 0, err26 } - var l int - _ = l - if m.OfferSnapshot != nil { - l = m.OfferSnapshot.Size() - n += 1 + l + sovTypes(uint64(l)) + i -= n26 + i = encodeVarintTypes(dAtA, i, uint64(n26)) + i-- + dAtA[i] = 0x32 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x28 } - return n -} -func (m *Request_LoadSnapshotChunk) Size() (n int) { - if m == nil { - return 0 + if len(m.ByzantineValidators) > 0 { + for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } } - var l int - _ = l - if m.LoadSnapshotChunk != nil { - l = m.LoadSnapshotChunk.Size() - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.LocalLastCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - return n -} -func (m *Request_ApplySnapshotChunk) Size() (n int) { - if m == nil { - return 0 + i-- + dAtA[i] = 0x1a + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - var l int - _ = l - if m.ApplySnapshotChunk != nil { - l = m.ApplySnapshotChunk.Size() - n += 1 + l + sovTypes(uint64(l)) + if m.MaxTxBytes != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.MaxTxBytes)) + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func (m *RequestEcho) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Message) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + +func (m *RequestProcessProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *RequestFlush) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n +func (m *RequestProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestInfo) Size() (n int) { - if m == nil { - return 0 - } +func (m *RequestProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Version) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.BlockVersion != 0 { - n += 1 + sovTypes(uint64(m.BlockVersion)) - } - if m.P2PVersion != 0 { - n += 1 + sovTypes(uint64(m.P2PVersion)) - } - l = len(m.AbciVersion) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.ProposerProTxHash) > 0 { + i -= len(m.ProposerProTxHash) + copy(dAtA[i:], m.ProposerProTxHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerProTxHash))) + i-- + dAtA[i] = 0x42 } - return n -} - -func (m *RequestInitChain) Size() (n int) { - if m == nil { - return 0 + if len(m.NextValidatorsHash) > 0 { + i -= len(m.NextValidatorsHash) + copy(dAtA[i:], m.NextValidatorsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) + i-- + dAtA[i] = 0x3a } - var l int - _ = l - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) - n += 1 + l + sovTypes(uint64(l)) - l = len(m.ChainId) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + n28, err28 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err28 != nil { + return 0, err28 } - if m.ConsensusParams != nil { - l = m.ConsensusParams.Size() - n += 1 + l + sovTypes(uint64(l)) + i -= n28 + i = encodeVarintTypes(dAtA, i, uint64(n28)) + i-- + dAtA[i] = 0x32 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x28 } - if m.ValidatorSet != nil { - l = m.ValidatorSet.Size() - n += 1 + l + sovTypes(uint64(l)) + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x22 } - l = len(m.AppStateBytes) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.ByzantineValidators) > 0 { + for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - if m.InitialHeight != 0 { - n += 1 + sovTypes(uint64(m.InitialHeight)) + { + size, err := m.ProposedLastCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - if m.InitialCoreHeight != 0 { - n += 1 + sovTypes(uint64(m.InitialCoreHeight)) + i-- + dAtA[i] = 0x12 + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - return n + return len(dAtA) - i, nil } -func (m *RequestQuery) Size() (n int) { - if m == nil { - return 0 +func (m *RequestExtendVote) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *RequestExtendVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Path) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 } - if m.Prove { - n += 2 + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *RequestBeginBlock) Size() (n int) { - if m == nil { - return 0 +func (m *RequestVerifyVoteExtension) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *RequestVerifyVoteExtension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestVerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.VoteExtension) > 0 { + i -= len(m.VoteExtension) + copy(dAtA[i:], m.VoteExtension) + i = encodeVarintTypes(dAtA, i, uint64(len(m.VoteExtension))) + i-- + dAtA[i] = 0x22 } - l = m.Header.Size() - n += 1 + l + sovTypes(uint64(l)) - l = m.LastCommitInfo.Size() - n += 1 + l + sovTypes(uint64(l)) - if len(m.ByzantineValidators) > 0 { - for _, e := range m.ByzantineValidators { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - return n -} - -func (m *RequestCheckTx) Size() (n int) { - if m == nil { - return 0 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x18 } - var l int - _ = l - l = len(m.Tx) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.ValidatorProTxHash) > 0 { + i -= len(m.ValidatorProTxHash) + copy(dAtA[i:], m.ValidatorProTxHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorProTxHash))) + i-- + dAtA[i] = 0x12 } - if m.Type != 0 { - n += 1 + sovTypes(uint64(m.Type)) + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *RequestDeliverTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Tx) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) +func (m *RequestFinalizeBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *RequestEndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) - } - return n +func (m *RequestFinalizeBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RequestCommit) Size() (n int) { - if m == nil { - return 0 - } +func (m *RequestFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - return n -} - -func (m *RequestListSnapshots) Size() (n int) { - if m == nil { - return 0 + if len(m.ProposerProTxHash) > 0 { + i -= len(m.ProposerProTxHash) + copy(dAtA[i:], m.ProposerProTxHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerProTxHash))) + i-- + dAtA[i] = 0x42 } - var l int - _ = l - return n -} - -func (m *RequestOfferSnapshot) Size() (n int) { - if m == nil { - return 0 + if len(m.NextValidatorsHash) > 0 { + i -= len(m.NextValidatorsHash) + copy(dAtA[i:], m.NextValidatorsHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) + i-- + dAtA[i] = 0x3a } - var l int - _ = l - if m.Snapshot != nil { - l = m.Snapshot.Size() - n += 1 + l + sovTypes(uint64(l)) + n30, err30 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err30 != nil { + return 0, err30 } - l = len(m.AppHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + i -= n30 + i = encodeVarintTypes(dAtA, i, uint64(n30)) + i-- + dAtA[i] = 0x32 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x28 } - return n -} - -func (m *RequestLoadSnapshotChunk) Size() (n int) { - if m == nil { - return 0 + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x22 } - var l int - _ = l - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) + if len(m.ByzantineValidators) > 0 { + for iNdEx := len(m.ByzantineValidators) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ByzantineValidators[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } } - if m.Format != 0 { - n += 1 + sovTypes(uint64(m.Format)) + { + size, err := m.DecidedLastCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - if m.Chunk != 0 { - n += 1 + sovTypes(uint64(m.Chunk)) + i-- + dAtA[i] = 0x12 + if len(m.Txs) > 0 { + for iNdEx := len(m.Txs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Txs[iNdEx]) + copy(dAtA[i:], m.Txs[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Txs[iNdEx]))) + i-- + dAtA[i] = 0xa + } } - return n + return len(dAtA) - i, nil } -func (m *RequestApplySnapshotChunk) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Index != 0 { - n += 1 + sovTypes(uint64(m.Index)) - } - l = len(m.Chunk) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Sender) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) +func (m *Response) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Response) Size() (n int) { - if m == nil { - return 0 - } +func (m *Response) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l if m.Value != nil { - n += m.Value.Size() + { + size := m.Value.Size() + i -= size + if _, err := m.Value.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } } - return n + return len(dAtA) - i, nil } -func (m *Response_Exception) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l +func (m *Response_Exception) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Exception) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) if m.Exception != nil { - l = m.Exception.Size() - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.Exception.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *Response_Echo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l +func (m *Response_Echo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Echo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) if m.Echo != nil { - l = m.Echo.Size() - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.Echo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - return n + return len(dAtA) - i, nil } -func (m *Response_Flush) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Flush != nil { - l = m.Flush.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n +func (m *Response_Flush) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Response_Info) Size() (n int) { - if m == nil { - return 0 + +func (m *Response_Flush) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Flush != nil { + { + size, err := m.Flush.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - var l int - _ = l + return len(dAtA) - i, nil +} +func (m *Response_Info) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) if m.Info != nil { - l = m.Info.Size() - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - return n + return len(dAtA) - i, nil } -func (m *Response_InitChain) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l +func (m *Response_InitChain) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_InitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) if m.InitChain != nil { - l = m.InitChain.Size() - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.InitChain.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - return n + return len(dAtA) - i, nil } -func (m *Response_Query) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l +func (m *Response_Query) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Query) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) if m.Query != nil { - l = m.Query.Size() - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.Query.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - return n + return len(dAtA) - i, nil } -func (m *Response_BeginBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l +func (m *Response_BeginBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_BeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) if m.BeginBlock != nil { - l = m.BeginBlock.Size() - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } - return n + return len(dAtA) - i, nil } -func (m *Response_CheckTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l +func (m *Response_CheckTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_CheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) if m.CheckTx != nil { - l = m.CheckTx.Size() - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 } - return n + return len(dAtA) - i, nil } -func (m *Response_DeliverTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l +func (m *Response_DeliverTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_DeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) if m.DeliverTx != nil { - l = m.DeliverTx.Size() - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a } - return n + return len(dAtA) - i, nil } -func (m *Response_EndBlock) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l +func (m *Response_EndBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_EndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) if m.EndBlock != nil { - l = m.EndBlock.Size() - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 } - return n + return len(dAtA) - i, nil } -func (m *Response_Commit) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l +func (m *Response_Commit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) if m.Commit != nil { - l = m.Commit.Size() - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.Commit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } - return n + return len(dAtA) - i, nil } -func (m *Response_ListSnapshots) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l +func (m *Response_ListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_ListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) if m.ListSnapshots != nil { - l = m.ListSnapshots.Size() - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.ListSnapshots.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 } - return n + return len(dAtA) - i, nil } -func (m *Response_OfferSnapshot) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l +func (m *Response_OfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_OfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) if m.OfferSnapshot != nil { - l = m.OfferSnapshot.Size() - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.OfferSnapshot.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a } - return n + return len(dAtA) - i, nil } -func (m *Response_LoadSnapshotChunk) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l +func (m *Response_LoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_LoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) if m.LoadSnapshotChunk != nil { - l = m.LoadSnapshotChunk.Size() - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.LoadSnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 } - return n + return len(dAtA) - i, nil } -func (m *Response_ApplySnapshotChunk) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l +func (m *Response_ApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) if m.ApplySnapshotChunk != nil { - l = m.ApplySnapshotChunk.Size() - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.ApplySnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a } - return n + return len(dAtA) - i, nil } -func (m *ResponseException) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Error) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - return n +func (m *Response_PrepareProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseEcho) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Message) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) +func (m *Response_PrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.PrepareProposal != nil { + { + size, err := m.PrepareProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 } - return n + return len(dAtA) - i, nil +} +func (m *Response_ProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseFlush) Size() (n int) { - if m == nil { - return 0 +func (m *Response_ProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ProcessProposal != nil { + { + size, err := m.ProcessProposal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a } - var l int - _ = l - return n + return len(dAtA) - i, nil +} +func (m *Response_ExtendVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) +func (m *Response_ExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExtendVote != nil { + { + size, err := m.ExtendVote.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 } - if m.AppVersion != 0 { - n += 1 + sovTypes(uint64(m.AppVersion)) + return len(dAtA) - i, nil +} +func (m *Response_VerifyVoteExtension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_VerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.VerifyVoteExtension != nil { + { + size, err := m.VerifyVoteExtension.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a } - if m.LastBlockHeight != 0 { - n += 1 + sovTypes(uint64(m.LastBlockHeight)) + return len(dAtA) - i, nil +} +func (m *Response_FinalizeBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_FinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.FinalizeBlock != nil { + { + size, err := m.FinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 } - l = len(m.LastBlockAppHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + return len(dAtA) - i, nil +} +func (m *ResponseException) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ResponseInitChain) Size() (n int) { - if m == nil { - return 0 - } +func (m *ResponseException) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseException) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.ConsensusParams != nil { - l = m.ConsensusParams.Size() - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.AppHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = m.ValidatorSetUpdate.Size() - n += 2 + l + sovTypes(uint64(l)) - if m.NextCoreChainLockUpdate != nil { - l = m.NextCoreChainLockUpdate.Size() - n += 2 + l + sovTypes(uint64(l)) - } - if m.InitialCoreHeight != 0 { - n += 2 + sovTypes(uint64(m.InitialCoreHeight)) + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *ResponseQuery) Size() (n int) { - if m == nil { - return 0 +func (m *ResponseEcho) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ResponseEcho) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseEcho) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Code != 0 { - n += 1 + sovTypes(uint64(m.Code)) - } - l = len(m.Log) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Info) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.Index != 0 { - n += 1 + sovTypes(uint64(m.Index)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.ProofOps != nil { - l = m.ProofOps.Size() - n += 1 + l + sovTypes(uint64(l)) - } - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) - } - l = len(m.Codespace) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *ResponseBeginBlock) Size() (n int) { - if m == nil { - return 0 +func (m *ResponseFlush) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ResponseFlush) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseFlush) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - return n + return len(dAtA) - i, nil } -func (m *ResponseCheckTx) Size() (n int) { - if m == nil { - return 0 +func (m *ResponseInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ResponseInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Code != 0 { - n += 1 + sovTypes(uint64(m.Code)) + if len(m.LastBlockAppHash) > 0 { + i -= len(m.LastBlockAppHash) + copy(dAtA[i:], m.LastBlockAppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastBlockAppHash))) + i-- + dAtA[i] = 0x2a } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.LastBlockHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockHeight)) + i-- + dAtA[i] = 0x20 } - l = len(m.Log) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.AppVersion != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.AppVersion)) + i-- + dAtA[i] = 0x18 } - l = len(m.Info) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 } - if m.GasWanted != 0 { - n += 1 + sovTypes(uint64(m.GasWanted)) + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0xa } - if m.GasUsed != 0 { - n += 1 + sovTypes(uint64(m.GasUsed)) + return len(dAtA) - i, nil +} + +func (m *ResponseInitChain) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } + return dAtA[:n], nil +} + +func (m *ResponseInitChain) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InitialCoreHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.InitialCoreHeight)) + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xb0 } - l = len(m.Codespace) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.NextCoreChainLockUpdate != nil { + { + size, err := m.NextCoreChainLockUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xaa } - l = len(m.Sender) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + { + size, err := m.ValidatorSetUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } - if m.Priority != 0 { - n += 1 + sovTypes(uint64(m.Priority)) + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xa2 + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x1a } - l = len(m.MempoolError) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.ConsensusParams != nil { + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *ResponseDeliverTx) Size() (n int) { - if m == nil { - return 0 +func (m *ResponseQuery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ResponseQuery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseQuery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Code != 0 { - n += 1 + sovTypes(uint64(m.Code)) + if len(m.Codespace) > 0 { + i -= len(m.Codespace) + copy(dAtA[i:], m.Codespace) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i-- + dAtA[i] = 0x52 } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Log) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Info) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.GasWanted != 0 { - n += 1 + sovTypes(uint64(m.GasWanted)) - } - if m.GasUsed != 0 { - n += 1 + sovTypes(uint64(m.GasUsed)) + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x48 } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) + if m.ProofOps != nil { + { + size, err := m.ProofOps.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x42 } - l = len(m.Codespace) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x3a } - return n -} - -func (m *ResponseEndBlock) Size() (n int) { - if m == nil { - return 0 + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x32 } - var l int - _ = l - if m.ConsensusParamUpdates != nil { - l = m.ConsensusParamUpdates.Size() - n += 1 + l + sovTypes(uint64(l)) + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x28 } - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } + if len(m.Info) > 0 { + i -= len(m.Info) + copy(dAtA[i:], m.Info) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i-- + dAtA[i] = 0x22 } - if m.NextCoreChainLockUpdate != nil { - l = m.NextCoreChainLockUpdate.Size() - n += 2 + l + sovTypes(uint64(l)) + if len(m.Log) > 0 { + i -= len(m.Log) + copy(dAtA[i:], m.Log) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i-- + dAtA[i] = 0x1a } - if m.ValidatorSetUpdate != nil { - l = m.ValidatorSetUpdate.Size() - n += 2 + l + sovTypes(uint64(l)) + if m.Code != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func (m *ResponseCommit) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.RetainHeight != 0 { - n += 1 + sovTypes(uint64(m.RetainHeight)) +func (m *ResponseBeginBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ResponseListSnapshots) Size() (n int) { - if m == nil { - return 0 - } +func (m *ResponseBeginBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseBeginBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Snapshots) > 0 { - for _, e := range m.Snapshots { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *ResponseOfferSnapshot) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Result != 0 { - n += 1 + sovTypes(uint64(m.Result)) +func (m *ResponseCheckTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ResponseLoadSnapshotChunk) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Chunk) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - return n +func (m *ResponseCheckTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResponseApplySnapshotChunk) Size() (n int) { - if m == nil { - return 0 - } +func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Result != 0 { - n += 1 + sovTypes(uint64(m.Result)) - } - if len(m.RefetchChunks) > 0 { - l = 0 - for _, e := range m.RefetchChunks { - l += sovTypes(uint64(e)) - } - n += 1 + sovTypes(uint64(l)) + l + if len(m.MempoolError) > 0 { + i -= len(m.MempoolError) + copy(dAtA[i:], m.MempoolError) + i = encodeVarintTypes(dAtA, i, uint64(len(m.MempoolError))) + i-- + dAtA[i] = 0x5a } - if len(m.RejectSenders) > 0 { - for _, s := range m.RejectSenders { - l = len(s) - n += 1 + l + sovTypes(uint64(l)) - } + if m.Priority != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Priority)) + i-- + dAtA[i] = 0x50 } - return n -} - -func (m *LastCommitInfo) Size() (n int) { - if m == nil { - return 0 + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0x4a } - var l int - _ = l - if m.Round != 0 { - n += 1 + sovTypes(uint64(m.Round)) + if len(m.Codespace) > 0 { + i -= len(m.Codespace) + copy(dAtA[i:], m.Codespace) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i-- + dAtA[i] = 0x42 } - l = len(m.QuorumHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } } - l = len(m.BlockSignature) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.GasUsed != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) + i-- + dAtA[i] = 0x30 } - l = len(m.StateSignature) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.GasWanted != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) + i-- + dAtA[i] = 0x28 } - return n -} - -func (m *Event) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - return n -} - -func (m *EventAttribute) Size() (n int) { - if m == nil { - return 0 + if len(m.Info) > 0 { + i -= len(m.Info) + copy(dAtA[i:], m.Info) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i-- + dAtA[i] = 0x22 } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.Log) > 0 { + i -= len(m.Log) + copy(dAtA[i:], m.Log) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i-- + dAtA[i] = 0x1a } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 } - if m.Index { - n += 2 + if m.Code != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 } - return n + return len(dAtA) - i, nil } -func (m *TxResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) - } - if m.Index != 0 { - n += 1 + sovTypes(uint64(m.Index)) - } - l = len(m.Tx) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) +func (m *ResponseDeliverTx) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - l = m.Result.Size() - n += 1 + l + sovTypes(uint64(l)) - return n + return dAtA[:n], nil } -func (m *Validator) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Power != 0 { - n += 1 + sovTypes(uint64(m.Power)) - } - l = len(m.ProTxHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - return n +func (m *ResponseDeliverTx) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ValidatorUpdate) Size() (n int) { - if m == nil { - return 0 - } +func (m *ResponseDeliverTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.PubKey != nil { - l = m.PubKey.Size() - n += 1 + l + sovTypes(uint64(l)) + if len(m.Codespace) > 0 { + i -= len(m.Codespace) + copy(dAtA[i:], m.Codespace) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i-- + dAtA[i] = 0x42 } - if m.Power != 0 { - n += 1 + sovTypes(uint64(m.Power)) + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } } - l = len(m.ProTxHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.GasUsed != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) + i-- + dAtA[i] = 0x30 } - l = len(m.NodeAddress) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if m.GasWanted != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) + i-- + dAtA[i] = 0x28 } - return n -} - -func (m *ValidatorSetUpdate) Size() (n int) { - if m == nil { - return 0 + if len(m.Info) > 0 { + i -= len(m.Info) + copy(dAtA[i:], m.Info) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i-- + dAtA[i] = 0x22 } - var l int - _ = l - if len(m.ValidatorUpdates) > 0 { - for _, e := range m.ValidatorUpdates { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } + if len(m.Log) > 0 { + i -= len(m.Log) + copy(dAtA[i:], m.Log) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i-- + dAtA[i] = 0x1a } - l = m.ThresholdPublicKey.Size() - n += 1 + l + sovTypes(uint64(l)) - l = len(m.QuorumHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 } - return n -} - -func (m *ThresholdPublicKeyUpdate) Size() (n int) { - if m == nil { - return 0 + if m.Code != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 } - var l int - _ = l - l = m.ThresholdPublicKey.Size() - n += 1 + l + sovTypes(uint64(l)) - return n + return len(dAtA) - i, nil } -func (m *QuorumHashUpdate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.QuorumHash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) +func (m *ResponseEndBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *VoteInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Validator.Size() - n += 1 + l + sovTypes(uint64(l)) - if m.SignedLastBlock { - n += 2 - } - return n +func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Evidence) Size() (n int) { - if m == nil { - return 0 - } +func (m *ResponseEndBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Type != 0 { - n += 1 + sovTypes(uint64(m.Type)) - } - l = m.Validator.Size() - n += 1 + l + sovTypes(uint64(l)) - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) + if m.ValidatorSetUpdate != nil { + { + size, err := m.ValidatorSetUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xaa } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) + if m.NextCoreChainLockUpdate != nil { + { + size, err := m.NextCoreChainLockUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xa2 + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ConsensusParamUpdates != nil { + { + size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *ResponseCommit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseCommit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RetainHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) + i-- + dAtA[i] = 0x18 + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *ResponseListSnapshots) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Snapshots) > 0 { + for iNdEx := len(m.Snapshots) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Snapshots[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResponseOfferSnapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseOfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Result != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Result)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseLoadSnapshotChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseLoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseLoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Chunk) > 0 { + i -= len(m.Chunk) + copy(dAtA[i:], m.Chunk) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Chunk))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponseApplySnapshotChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RejectSenders) > 0 { + for iNdEx := len(m.RejectSenders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RejectSenders[iNdEx]) + copy(dAtA[i:], m.RejectSenders[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.RejectSenders[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.RefetchChunks) > 0 { + dAtA60 := make([]byte, len(m.RefetchChunks)*10) + var j59 int + for _, num := range m.RefetchChunks { + for num >= 1<<7 { + dAtA60[j59] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j59++ + } + dAtA60[j59] = uint8(num) + j59++ + } + i -= j59 + copy(dAtA[i:], dAtA60[:j59]) + i = encodeVarintTypes(dAtA, i, uint64(j59)) + i-- + dAtA[i] = 0x12 + } + if m.Result != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Result)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponsePrepareProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponsePrepareProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponsePrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ConsensusParamUpdates != nil { + { + size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.ValidatorUpdates) > 0 { + for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValidatorUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.TxResults) > 0 { + for iNdEx := len(m.TxResults) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TxResults[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x12 + } + if len(m.TxRecords) > 0 { + for iNdEx := len(m.TxRecords) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TxRecords[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResponseProcessProposal) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseProcessProposal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ConsensusParamUpdates != nil { + { + size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.ValidatorUpdates) > 0 { + for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValidatorUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.TxResults) > 0 { + for iNdEx := len(m.TxResults) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TxResults[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x12 + } + if m.Status != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseExtendVote) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseExtendVote) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseExtendVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.VoteExtension) > 0 { + i -= len(m.VoteExtension) + copy(dAtA[i:], m.VoteExtension) + i = encodeVarintTypes(dAtA, i, uint64(len(m.VoteExtension))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponseVerifyVoteExtension) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseVerifyVoteExtension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseVerifyVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Status != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseFinalizeBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseFinalizeBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ValidatorSetUpdate != nil { + { + size, err := m.ValidatorSetUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xaa + } + if m.NextCoreChainLockUpdate != nil { + { + size, err := m.NextCoreChainLockUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xa2 + } + if m.RetainHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) + i-- + dAtA[i] = 0x30 + } + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x2a + } + if m.ConsensusParamUpdates != nil { + { + size, err := m.ConsensusParamUpdates.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.TxResults) > 0 { + for iNdEx := len(m.TxResults) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TxResults[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CommitInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommitInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.StateSignature) > 0 { + i -= len(m.StateSignature) + copy(dAtA[i:], m.StateSignature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.StateSignature))) + i-- + dAtA[i] = 0x2a + } + if len(m.BlockSignature) > 0 { + i -= len(m.BlockSignature) + copy(dAtA[i:], m.BlockSignature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.BlockSignature))) + i-- + dAtA[i] = 0x22 + } + if len(m.QuorumHash) > 0 { + i -= len(m.QuorumHash) + copy(dAtA[i:], m.QuorumHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.QuorumHash))) + i-- + dAtA[i] = 0x1a + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ExtendedCommitInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtendedCommitInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExtendedCommitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Votes) > 0 { + for iNdEx := len(m.Votes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Votes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Round != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Type) > 0 { + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EventAttribute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventAttribute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventAttribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Index { + i-- + if m.Index { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExecTxResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecTxResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecTxResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Codespace) > 0 { + i -= len(m.Codespace) + copy(dAtA[i:], m.Codespace) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i-- + dAtA[i] = 0x42 + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.GasUsed != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasUsed)) + i-- + dAtA[i] = 0x30 + } + if m.GasWanted != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) + i-- + dAtA[i] = 0x28 + } + if len(m.Info) > 0 { + i -= len(m.Info) + copy(dAtA[i:], m.Info) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Info))) + i-- + dAtA[i] = 0x22 + } + if len(m.Log) > 0 { + i -= len(m.Log) + copy(dAtA[i:], m.Log) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Log))) + i-- + dAtA[i] = 0x1a + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + if m.Code != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TxResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TxResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i-- + dAtA[i] = 0x1a + } + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TxRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TxRecord) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TxRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tx) > 0 { + i -= len(m.Tx) + copy(dAtA[i:], m.Tx) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) + i-- + dAtA[i] = 0x12 + } + if m.Action != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Action)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Validator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Validator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ProTxHash) > 0 { + i -= len(m.ProTxHash) + copy(dAtA[i:], m.ProTxHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProTxHash))) + i-- + dAtA[i] = 0x22 + } + if m.Power != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Power)) + i-- + dAtA[i] = 0x18 + } + return len(dAtA) - i, nil +} + +func (m *ValidatorUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorUpdate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NodeAddress) > 0 { + i -= len(m.NodeAddress) + copy(dAtA[i:], m.NodeAddress) + i = encodeVarintTypes(dAtA, i, uint64(len(m.NodeAddress))) + i-- + dAtA[i] = 0x22 + } + if len(m.ProTxHash) > 0 { + i -= len(m.ProTxHash) + copy(dAtA[i:], m.ProTxHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProTxHash))) + i-- + dAtA[i] = 0x1a + } + if m.Power != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Power)) + i-- + dAtA[i] = 0x10 + } + if m.PubKey != nil { + { + size, err := m.PubKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ValidatorSetUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorSetUpdate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorSetUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.QuorumHash) > 0 { + i -= len(m.QuorumHash) + copy(dAtA[i:], m.QuorumHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.QuorumHash))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.ThresholdPublicKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.ValidatorUpdates) > 0 { + for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValidatorUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ThresholdPublicKeyUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ThresholdPublicKeyUpdate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ThresholdPublicKeyUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ThresholdPublicKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QuorumHashUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuorumHashUpdate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QuorumHashUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.QuorumHash) > 0 { + i -= len(m.QuorumHash) + copy(dAtA[i:], m.QuorumHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.QuorumHash))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VoteInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VoteInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VoteInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SignedLastBlock { + i-- + if m.SignedLastBlock { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExtendedVoteInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExtendedVoteInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExtendedVoteInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.VoteExtension) > 0 { + i -= len(m.VoteExtension) + copy(dAtA[i:], m.VoteExtension) + i = encodeVarintTypes(dAtA, i, uint64(len(m.VoteExtension))) + i-- + dAtA[i] = 0x1a + } + if m.SignedLastBlock { + i-- + if m.SignedLastBlock { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Misbehavior) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Misbehavior) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Misbehavior) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TotalVotingPower != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.TotalVotingPower)) + i-- + dAtA[i] = 0x28 + } + n72, err72 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err72 != nil { + return 0, err72 + } + i -= n72 + i = encodeVarintTypes(dAtA, i, uint64(n72)) + i-- + dAtA[i] = 0x22 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x18 + } + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if m.Type != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CoreChainLockedHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.CoreChainLockedHeight)) + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xa0 + } + if len(m.Metadata) > 0 { + i -= len(m.Metadata) + copy(dAtA[i:], m.Metadata) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Metadata))) + i-- + dAtA[i] = 0x2a + } + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x22 + } + if m.Chunks != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Chunks)) + i-- + dAtA[i] = 0x18 + } + if m.Format != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Request) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + n += m.Value.Size() + } + return n +} + +func (m *Request_Echo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Echo != nil { + l = m.Echo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Flush) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Flush != nil { + l = m.Flush.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Info) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Info != nil { + l = m.Info.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_InitChain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InitChain != nil { + l = m.InitChain.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Query) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_BeginBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BeginBlock != nil { + l = m.BeginBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_CheckTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CheckTx != nil { + l = m.CheckTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_DeliverTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DeliverTx != nil { + l = m.DeliverTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_EndBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndBlock != nil { + l = m.EndBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_Commit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_ListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListSnapshots != nil { + l = m.ListSnapshots.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_OfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OfferSnapshot != nil { + l = m.OfferSnapshot.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_LoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LoadSnapshotChunk != nil { + l = m.LoadSnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_ApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApplySnapshotChunk != nil { + l = m.ApplySnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_PrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrepareProposal != nil { + l = m.PrepareProposal.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_ProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProcessProposal != nil { + l = m.ProcessProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_ExtendVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExtendVote != nil { + l = m.ExtendVote.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_VerifyVoteExtension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VerifyVoteExtension != nil { + l = m.VerifyVoteExtension.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_FinalizeBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FinalizeBlock != nil { + l = m.FinalizeBlock.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *RequestEcho) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestFlush) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RequestInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Version) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.BlockVersion != 0 { + n += 1 + sovTypes(uint64(m.BlockVersion)) + } + if m.P2PVersion != 0 { + n += 1 + sovTypes(uint64(m.P2PVersion)) + } + l = len(m.AbciVersion) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestInitChain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ConsensusParams != nil { + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.ValidatorSet != nil { + l = m.ValidatorSet.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppStateBytes) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.InitialHeight != 0 { + n += 1 + sovTypes(uint64(m.InitialHeight)) + } + if m.InitialCoreHeight != 0 { + n += 1 + sovTypes(uint64(m.InitialCoreHeight)) + } + return n +} + +func (m *RequestQuery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Path) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Prove { + n += 2 + } + return n +} + +func (m *RequestBeginBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Header.Size() + n += 1 + l + sovTypes(uint64(l)) + l = m.LastCommitInfo.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.ByzantineValidators) > 0 { + for _, e := range m.ByzantineValidators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *RequestCheckTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + return n +} + +func (m *RequestDeliverTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestEndBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *RequestCommit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RequestListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *RequestOfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Snapshot != nil { + l = m.Snapshot.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestLoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Format != 0 { + n += 1 + sovTypes(uint64(m.Format)) + } + if m.Chunk != 0 { + n += 1 + sovTypes(uint64(m.Chunk)) + } + return n +} + +func (m *RequestApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + l = len(m.Chunk) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestPrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxTxBytes != 0 { + n += 1 + sovTypes(uint64(m.MaxTxBytes)) + } + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.LocalLastCommit.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.ByzantineValidators) > 0 { + for _, e := range m.ByzantineValidators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ProposerProTxHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.ProposedLastCommit.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.ByzantineValidators) > 0 { + for _, e := range m.ByzantineValidators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ProposerProTxHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestExtendVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *RequestVerifyVoteExtension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ValidatorProTxHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = len(m.VoteExtension) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RequestFinalizeBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Txs) > 0 { + for _, b := range m.Txs { + l = len(b) + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.DecidedLastCommit.Size() + n += 1 + l + sovTypes(uint64(l)) + if len(m.ByzantineValidators) > 0 { + for _, e := range m.ByzantineValidators { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) + n += 1 + l + sovTypes(uint64(l)) + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ProposerProTxHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Response) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + n += m.Value.Size() + } + return n +} + +func (m *Response_Exception) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exception != nil { + l = m.Exception.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Echo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Echo != nil { + l = m.Echo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Flush) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Flush != nil { + l = m.Flush.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Info) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Info != nil { + l = m.Info.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_InitChain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.InitChain != nil { + l = m.InitChain.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Query) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Query != nil { + l = m.Query.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_BeginBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.BeginBlock != nil { + l = m.BeginBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_CheckTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.CheckTx != nil { + l = m.CheckTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_DeliverTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DeliverTx != nil { + l = m.DeliverTx.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_EndBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.EndBlock != nil { + l = m.EndBlock.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_Commit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Commit != nil { + l = m.Commit.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListSnapshots != nil { + l = m.ListSnapshots.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_OfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OfferSnapshot != nil { + l = m.OfferSnapshot.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_LoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LoadSnapshotChunk != nil { + l = m.LoadSnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApplySnapshotChunk != nil { + l = m.ApplySnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_PrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PrepareProposal != nil { + l = m.PrepareProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ProcessProposal != nil { + l = m.ProcessProposal.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ExtendVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExtendVote != nil { + l = m.ExtendVote.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_VerifyVoteExtension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VerifyVoteExtension != nil { + l = m.VerifyVoteExtension.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_FinalizeBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FinalizeBlock != nil { + l = m.FinalizeBlock.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} +func (m *ResponseException) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseEcho) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseFlush) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *ResponseInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.AppVersion != 0 { + n += 1 + sovTypes(uint64(m.AppVersion)) + } + if m.LastBlockHeight != 0 { + n += 1 + sovTypes(uint64(m.LastBlockHeight)) + } + l = len(m.LastBlockAppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseInitChain) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConsensusParams != nil { + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.ValidatorSetUpdate.Size() + n += 2 + l + sovTypes(uint64(l)) + if m.NextCoreChainLockUpdate != nil { + l = m.NextCoreChainLockUpdate.Size() + n += 2 + l + sovTypes(uint64(l)) + } + if m.InitialCoreHeight != 0 { + n += 2 + sovTypes(uint64(m.InitialCoreHeight)) + } + return n +} + +func (m *ResponseQuery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ProofOps != nil { + l = m.ProofOps.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseBeginBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResponseCheckTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.GasWanted != 0 { + n += 1 + sovTypes(uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + n += 1 + sovTypes(uint64(m.GasUsed)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Priority != 0 { + n += 1 + sovTypes(uint64(m.Priority)) + } + l = len(m.MempoolError) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseDeliverTx) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.GasWanted != 0 { + n += 1 + sovTypes(uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + n += 1 + sovTypes(uint64(m.GasUsed)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseEndBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConsensusParamUpdates != nil { + l = m.ConsensusParamUpdates.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.NextCoreChainLockUpdate != nil { + l = m.NextCoreChainLockUpdate.Size() + n += 2 + l + sovTypes(uint64(l)) + } + if m.ValidatorSetUpdate != nil { + l = m.ValidatorSetUpdate.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseCommit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.RetainHeight != 0 { + n += 1 + sovTypes(uint64(m.RetainHeight)) + } + return n +} + +func (m *ResponseListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Snapshots) > 0 { + for _, e := range m.Snapshots { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResponseOfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != 0 { + n += 1 + sovTypes(uint64(m.Result)) + } + return n +} + +func (m *ResponseLoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Chunk) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != 0 { + n += 1 + sovTypes(uint64(m.Result)) + } + if len(m.RefetchChunks) > 0 { + l = 0 + for _, e := range m.RefetchChunks { + l += sovTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.RejectSenders) > 0 { + for _, s := range m.RejectSenders { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResponsePrepareProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TxRecords) > 0 { + for _, e := range m.TxRecords { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.TxResults) > 0 { + for _, e := range m.TxResults { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.ValidatorUpdates) > 0 { + for _, e := range m.ValidatorUpdates { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.ConsensusParamUpdates != nil { + l = m.ConsensusParamUpdates.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseProcessProposal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != 0 { + n += 1 + sovTypes(uint64(m.Status)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.TxResults) > 0 { + for _, e := range m.TxResults { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.ValidatorUpdates) > 0 { + for _, e := range m.ValidatorUpdates { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.ConsensusParamUpdates != nil { + l = m.ConsensusParamUpdates.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseExtendVote) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VoteExtension) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ResponseVerifyVoteExtension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != 0 { + n += 1 + sovTypes(uint64(m.Status)) + } + return n +} + +func (m *ResponseFinalizeBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.TxResults) > 0 { + for _, e := range m.TxResults { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.ConsensusParamUpdates != nil { + l = m.ConsensusParamUpdates.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.RetainHeight != 0 { + n += 1 + sovTypes(uint64(m.RetainHeight)) + } + if m.NextCoreChainLockUpdate != nil { + l = m.NextCoreChainLockUpdate.Size() + n += 2 + l + sovTypes(uint64(l)) + } + if m.ValidatorSetUpdate != nil { + l = m.ValidatorSetUpdate.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *CommitInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + l = len(m.QuorumHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.BlockSignature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.StateSignature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ExtendedCommitInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) + } + if len(m.Votes) > 0 { + for _, e := range m.Votes { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *Event) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *EventAttribute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Index { + n += 2 + } + return n +} + +func (m *ExecTxResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Code != 0 { + n += 1 + sovTypes(uint64(m.Code)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Log) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Info) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.GasWanted != 0 { + n += 1 + sovTypes(uint64(m.GasWanted)) + } + if m.GasUsed != 0 { + n += 1 + sovTypes(uint64(m.GasUsed)) + } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *TxResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Result.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *TxRecord) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Action != 0 { + n += 1 + sovTypes(uint64(m.Action)) + } + l = len(m.Tx) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Validator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Power != 0 { + n += 1 + sovTypes(uint64(m.Power)) + } + l = len(m.ProTxHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ValidatorUpdate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PubKey != nil { + l = m.PubKey.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Power != 0 { + n += 1 + sovTypes(uint64(m.Power)) + } + l = len(m.ProTxHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.NodeAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ValidatorSetUpdate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ValidatorUpdates) > 0 { + for _, e := range m.ValidatorUpdates { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = m.ThresholdPublicKey.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.QuorumHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ThresholdPublicKeyUpdate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ThresholdPublicKey.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + +func (m *QuorumHashUpdate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.QuorumHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *VoteInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.SignedLastBlock { + n += 2 + } + return n +} + +func (m *ExtendedVoteInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.SignedLastBlock { + n += 2 + } + l = len(m.VoteExtension) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Misbehavior) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) n += 1 + l + sovTypes(uint64(l)) if m.TotalVotingPower != 0 { n += 1 + sovTypes(uint64(m.TotalVotingPower)) } - return n -} + return n +} + +func (m *Snapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Format != 0 { + n += 1 + sovTypes(uint64(m.Format)) + } + if m.Chunks != 0 { + n += 1 + sovTypes(uint64(m.Chunks)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Metadata) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.CoreChainLockedHeight != 0 { + n += 2 + sovTypes(uint64(m.CoreChainLockedHeight)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Request) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Request: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestEcho{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Echo{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestFlush{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Flush{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestInfo{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Info{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestInitChain{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_InitChain{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestQuery{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Query{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestBeginBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_BeginBlock{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestCheckTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_CheckTx{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestDeliverTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_DeliverTx{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestEndBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_EndBlock{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestCommit{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_Commit{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestListSnapshots{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ListSnapshots{v} + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestOfferSnapshot{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_OfferSnapshot{v} + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestLoadSnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_LoadSnapshotChunk{v} + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestApplySnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ApplySnapshotChunk{v} + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrepareProposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestPrepareProposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_PrepareProposal{v} + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessProposal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestProcessProposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ProcessProposal{v} + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtendVote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestExtendVote{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ExtendVote{v} + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VerifyVoteExtension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestVerifyVoteExtension{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_VerifyVoteExtension{v} + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinalizeBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestFinalizeBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_FinalizeBlock{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestEcho) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestEcho: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestEcho: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestFlush) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestFlush: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestFlush: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockVersion", wireType) + } + m.BlockVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockVersion |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field P2PVersion", wireType) + } + m.P2PVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.P2PVersion |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AbciVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AbciVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestInitChain) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestInitChain: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestInitChain: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParams == nil { + m.ConsensusParams = &types1.ConsensusParams{} + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValidatorSet == nil { + m.ValidatorSet = &ValidatorSetUpdate{} + } + if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppStateBytes", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppStateBytes = append(m.AppStateBytes[:0], dAtA[iNdEx:postIndex]...) + if m.AppStateBytes == nil { + m.AppStateBytes = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType) + } + m.InitialHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InitialHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialCoreHeight", wireType) + } + m.InitialCoreHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InitialCoreHeight |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestQuery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestQuery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestQuery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Prove = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestBeginBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastCommitInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastCommitInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) + if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= CheckTxType(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestDeliverTx: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestEndBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestCommit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestCommit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestCommit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestListSnapshots: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestListSnapshots: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestOfferSnapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestOfferSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Snapshot == nil { + m.Snapshot = &Snapshot{} + } + if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestLoadSnapshotChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestLoadSnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + m.Format = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Format |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) + } + m.Chunk = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Chunk |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestApplySnapshotChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestApplySnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunk = append(m.Chunk[:0], dAtA[iNdEx:postIndex]...) + if m.Chunk == nil { + m.Chunk = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestPrepareProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestPrepareProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestPrepareProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxTxBytes", wireType) + } + m.MaxTxBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxTxBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalLastCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalLastCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) + if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposerProTxHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProposerProTxHash = append(m.ProposerProTxHash[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerProTxHash == nil { + m.ProposerProTxHash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } -func (m *Snapshot) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Height != 0 { - n += 1 + sovTypes(uint64(m.Height)) - } - if m.Format != 0 { - n += 1 + sovTypes(uint64(m.Format)) - } - if m.Chunks != 0 { - n += 1 + sovTypes(uint64(m.Chunks)) - } - l = len(m.Hash) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Metadata) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.CoreChainLockedHeight != 0 { - n += 2 + sovTypes(uint64(m.CoreChainLockedHeight)) + if iNdEx > l { + return io.ErrUnexpectedEOF } - return n -} - -func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTypes(x uint64) (n int) { - return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return nil } -func (m *Request) Unmarshal(dAtA []byte) error { +func (m *RequestProcessProposal) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7707,17 +13320,17 @@ func (m *Request) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Request: wiretype end group for non-group") + return fmt.Errorf("proto: RequestProcessProposal: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestProcessProposal: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -7727,30 +13340,27 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestEcho{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Request_Echo{v} + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProposedLastCommit", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7777,15 +13387,13 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestFlush{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ProposedLastCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_Flush{v} iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7812,17 +13420,16 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestInfo{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) + if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_Info{v} iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -7832,32 +13439,31 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestInitChain{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} } - m.Value = &Request_InitChain{v} iNdEx = postIndex case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - var msglen int + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -7867,30 +13473,14 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Height |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RequestQuery{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Request_Query{v} - iNdEx = postIndex case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -7917,17 +13507,15 @@ func (m *Request) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestBeginBlock{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Request_BeginBlock{v} iNdEx = postIndex case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -7937,32 +13525,31 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestCheckTx{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} } - m.Value = &Request_CheckTx{v} iNdEx = postIndex case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProposerProTxHash", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -7972,32 +13559,81 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestDeliverTx{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.ProposerProTxHash = append(m.ProposerProTxHash[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerProTxHash == nil { + m.ProposerProTxHash = []byte{} } - m.Value = &Request_DeliverTx{v} iNdEx = postIndex - case 9: + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestExtendVote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestExtendVote: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestExtendVote: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8007,32 +13643,31 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestEndBlock{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} } - m.Value = &Request_EndBlock{v} iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - var msglen int + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8042,32 +13677,66 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Height |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } - if postIndex > l { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestVerifyVoteExtension) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - v := &RequestCommit{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.Value = &Request_Commit{v} - iNdEx = postIndex - case 11: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestVerifyVoteExtension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestVerifyVoteExtension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8077,32 +13746,31 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestListSnapshots{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} } - m.Value = &Request_ListSnapshots{v} iNdEx = postIndex - case 12: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorProTxHash", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8112,32 +13780,31 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestOfferSnapshot{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.ValidatorProTxHash = append(m.ValidatorProTxHash[:0], dAtA[iNdEx:postIndex]...) + if m.ValidatorProTxHash == nil { + m.ValidatorProTxHash = []byte{} } - m.Value = &Request_OfferSnapshot{v} iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - var msglen int + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8147,32 +13814,16 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Height |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RequestLoadSnapshotChunk{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Request_LoadSnapshotChunk{v} - iNdEx = postIndex - case 14: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VoteExtension", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8182,26 +13833,25 @@ func (m *Request) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - v := &RequestApplySnapshotChunk{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.VoteExtension = append(m.VoteExtension[:0], dAtA[iNdEx:postIndex]...) + if m.VoteExtension == nil { + m.VoteExtension = []byte{} } - m.Value = &Request_ApplySnapshotChunk{v} iNdEx = postIndex default: iNdEx = preIndex @@ -8224,7 +13874,7 @@ func (m *Request) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestEcho) Unmarshal(dAtA []byte) error { +func (m *RequestFinalizeBlock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8247,17 +13897,17 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestEcho: wiretype end group for non-group") + return fmt.Errorf("proto: RequestFinalizeBlock: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestEcho: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RequestFinalizeBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Txs", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8267,129 +13917,96 @@ func (m *RequestEcho) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Txs = append(m.Txs, make([]byte, postIndex-iNdEx)) + copy(m.Txs[len(m.Txs)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DecidedLastCommit", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthTypes } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestFlush) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestFlush: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestFlush: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { + if err := m.DecidedLastCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + if msglen < 0 { + return ErrInvalidLengthTypes } - if iNdEx >= l { + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.ByzantineValidators = append(m.ByzantineValidators, Misbehavior{}) + if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8399,29 +14016,31 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } iNdEx = postIndex - case 2: + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - m.BlockVersion = 0 + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8431,16 +14050,16 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.BlockVersion |= uint64(b&0x7F) << shift + m.Height |= int64(b&0x7F) << shift if b < 0x80 { break } } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field P2PVersion", wireType) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } - m.P2PVersion = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8450,16 +14069,30 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.P2PVersion |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 4: + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AbciVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8469,23 +14102,59 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.AbciVersion = string(dAtA[iNdEx:postIndex]) + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposerProTxHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProposerProTxHash = append(m.ProposerProTxHash[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerProTxHash == nil { + m.ProposerProTxHash = []byte{} + } iNdEx = postIndex default: iNdEx = preIndex @@ -8508,7 +14177,7 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestInitChain) Unmarshal(dAtA []byte) error { +func (m *Response) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8531,15 +14200,15 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestInitChain: wiretype end group for non-group") + return fmt.Errorf("proto: Response: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestInitChain: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Exception", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8566,15 +14235,17 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Time, dAtA[iNdEx:postIndex]); err != nil { + v := &ResponseException{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } + m.Value = &Response_Exception{v} iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8584,27 +14255,30 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.ChainId = string(dAtA[iNdEx:postIndex]) + v := &ResponseEcho{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Echo{v} iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8631,16 +14305,15 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConsensusParams == nil { - m.ConsensusParams = &types1.ConsensusParams{} - } - if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + v := &ResponseFlush{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } + m.Value = &Response_Flush{v} iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSet", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8667,18 +14340,17 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ValidatorSet == nil { - m.ValidatorSet = &ValidatorSetUpdate{} - } - if err := m.ValidatorSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + v := &ResponseInfo{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } + m.Value = &Response_Info{v} iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppStateBytes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8688,31 +14360,32 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.AppStateBytes = append(m.AppStateBytes[:0], dAtA[iNdEx:postIndex]...) - if m.AppStateBytes == nil { - m.AppStateBytes = []byte{} + v := &ResponseInitChain{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } + m.Value = &Response_InitChain{v} iNdEx = postIndex case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) } - m.InitialHeight = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8722,16 +14395,32 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.InitialHeight |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseQuery{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Query{v} + iNdEx = postIndex case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialCoreHeight", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) } - m.InitialCoreHeight = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8741,66 +14430,67 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.InitialCoreHeight |= uint32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err + if msglen < 0 { + return ErrInvalidLengthTypes } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestQuery) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + v := &ResponseBeginBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.Value = &Response_BeginBlock{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestQuery: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestQuery: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + v := &ResponseCheckTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_CheckTx{v} + iNdEx = postIndex + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8810,31 +14500,32 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} + v := &ResponseDeliverTx{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } + m.Value = &Response_DeliverTx{v} iNdEx = postIndex - case 2: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8844,29 +14535,32 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + v := &ResponseEndBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_EndBlock{v} iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) } - m.Height = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8876,16 +14570,32 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Prove", wireType) + if msglen < 0 { + return ErrInvalidLengthTypes } - var v int + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseCommit{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_Commit{v} + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) + } + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8895,67 +14605,32 @@ func (m *RequestQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Prove = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + v := &ResponseListSnapshots{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestBeginBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Value = &Response_ListSnapshots{v} + iNdEx = postIndex + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -8965,29 +14640,30 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) - if m.Hash == nil { - m.Hash = []byte{} + v := &ResponseOfferSnapshot{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } + m.Value = &Response_OfferSnapshot{v} iNdEx = postIndex - case 2: + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9014,13 +14690,15 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + v := &ResponseLoadSnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } + m.Value = &Response_LoadSnapshotChunk{v} iNdEx = postIndex - case 3: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastCommitInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9047,13 +14725,15 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastCommitInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + v := &ResponseApplySnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } + m.Value = &Response_ApplySnapshotChunk{v} iNdEx = postIndex - case 4: + case 16: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByzantineValidators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PrepareProposal", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9080,66 +14760,52 @@ func (m *RequestBeginBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ByzantineValidators = append(m.ByzantineValidators, Evidence{}) - if err := m.ByzantineValidators[len(m.ByzantineValidators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + v := &ResponsePrepareProposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } + m.Value = &Response_PrepareProposal{v} iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcessProposal", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if msglen < 0 { + return ErrInvalidLengthTypes } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + v := &ResponseProcessProposal{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestCheckTx: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Value = &Response_ProcessProposal{v} + iNdEx = postIndex + case 18: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExtendVote", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -9149,31 +14815,32 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) - if m.Tx == nil { - m.Tx = []byte{} + v := &ResponseExtendVote{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } + m.Value = &Response_ExtendVote{v} iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VerifyVoteExtension", wireType) } - m.Type = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -9183,66 +14850,32 @@ func (m *RequestCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= CheckTxType(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + v := &ResponseVerifyVoteExtension{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestDeliverTx: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Value = &Response_VerifyVoteExtension{v} + iNdEx = postIndex + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FinalizeBlock", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -9252,25 +14885,26 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) - if m.Tx == nil { - m.Tx = []byte{} + v := &ResponseFinalizeBlock{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } + m.Value = &Response_FinalizeBlock{v} iNdEx = postIndex default: iNdEx = preIndex @@ -9293,7 +14927,7 @@ func (m *RequestDeliverTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { +func (m *ResponseException) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9316,17 +14950,17 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestEndBlock: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseException: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseException: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } - m.Height = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -9336,111 +14970,24 @@ func (m *RequestEndBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Height |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestCommit) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestCommit: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestCommit: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + intStringLen + if postIndex < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestListSnapshots: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestListSnapshots: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -9462,76 +15009,40 @@ func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestOfferSnapshot: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestOfferSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes +func (m *ResponseEcho) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - if m.Snapshot == nil { - m.Snapshot = &Snapshot{} - } - if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 2: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseEcho: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseEcho: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -9541,25 +15052,23 @@ func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) - if m.AppHash == nil { - m.AppHash = []byte{} - } + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -9582,7 +15091,7 @@ func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { +func (m *ResponseFlush) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9605,69 +15114,12 @@ func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestLoadSnapshotChunk: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseFlush: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestLoadSnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseFlush: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) - } - m.Format = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Format |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) - } - m.Chunk = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Chunk |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -9689,7 +15141,7 @@ func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { } return nil } -func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { +func (m *ResponseInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9712,17 +15164,17 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RequestApplySnapshotChunk: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RequestApplySnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } - m.Index = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -9732,16 +15184,29 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Index |= uint32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -9751,31 +15216,67 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Chunk = append(m.Chunk[:0], dAtA[iNdEx:postIndex]...) - if m.Chunk == nil { - m.Chunk = []byte{} - } + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AppVersion", wireType) + } + m.AppVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AppVersion |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockHeight", wireType) + } + m.LastBlockHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastBlockHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockAppHash", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -9785,23 +15286,25 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Sender = string(dAtA[iNdEx:postIndex]) + m.LastBlockAppHash = append(m.LastBlockAppHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastBlockAppHash == nil { + m.LastBlockAppHash = []byte{} + } iNdEx = postIndex default: iNdEx = preIndex @@ -9824,7 +15327,7 @@ func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { } return nil } -func (m *Response) Unmarshal(dAtA []byte) error { +func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9837,60 +15340,25 @@ func (m *Response) Unmarshal(dAtA []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Response: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Response: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exception", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ResponseException{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - m.Value = &Response_Exception{v} - iNdEx = postIndex - case 2: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseInitChain: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseInitChain: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Echo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9917,17 +15385,18 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseEcho{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ConsensusParams == nil { + m.ConsensusParams = &types1.ConsensusParams{} + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Echo{v} iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Flush", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -9937,30 +15406,29 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseFlush{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} } - m.Value = &Response_Flush{v} iNdEx = postIndex - case 4: + case 100: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSetUpdate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9987,15 +15455,13 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseInfo{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ValidatorSetUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_Info{v} iNdEx = postIndex - case 5: + case 101: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitChain", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NextCoreChainLockUpdate", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10022,17 +15488,18 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseInitChain{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.NextCoreChainLockUpdate == nil { + m.NextCoreChainLockUpdate = &types1.CoreChainLock{} + } + if err := m.NextCoreChainLockUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_InitChain{v} iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Query", wireType) + case 102: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialCoreHeight", wireType) } - var msglen int + m.InitialCoreHeight = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10042,32 +15509,66 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.InitialCoreHeight |= uint32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - v := &ResponseQuery{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseQuery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes } - m.Value = &Response_Query{v} - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - var msglen int + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseQuery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseQuery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10077,32 +15578,16 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Code |= uint32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ResponseBeginBlock{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Response_BeginBlock{v} - iNdEx = postIndex - case 8: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10112,32 +15597,29 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseCheckTx{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Response_CheckTx{v} + m.Log = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 9: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10147,32 +15629,29 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseDeliverTx{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Response_DeliverTx{v} + m.Info = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) } - var msglen int + m.Index = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10182,32 +15661,16 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Index |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ResponseEndBlock{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Response_EndBlock{v} - iNdEx = postIndex - case 11: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10217,32 +15680,31 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseCommit{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} } - m.Value = &Response_Commit{v} iNdEx = postIndex - case 12: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10252,30 +15714,29 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseListSnapshots{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} } - m.Value = &Response_ListSnapshots{v} iNdEx = postIndex - case 13: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProofOps", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10302,17 +15763,18 @@ func (m *Response) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseOfferSnapshot{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ProofOps == nil { + m.ProofOps = &crypto.ProofOps{} + } + if err := m.ProofOps.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Value = &Response_OfferSnapshot{v} iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } - var msglen int + m.Height = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10322,32 +15784,16 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Height |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ResponseLoadSnapshotChunk{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Response_LoadSnapshotChunk{v} - iNdEx = postIndex - case 15: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10357,26 +15803,23 @@ func (m *Response) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - v := &ResponseApplySnapshotChunk{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Value = &Response_ApplySnapshotChunk{v} + m.Codespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -10399,7 +15842,7 @@ func (m *Response) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseException) Unmarshal(dAtA []byte) error { +func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10422,17 +15865,17 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseException: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseBeginBlock: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseException: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10442,23 +15885,25 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Error = string(dAtA[iNdEx:postIndex]) + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -10481,7 +15926,7 @@ func (m *ResponseException) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseEcho) Unmarshal(dAtA []byte) error { +func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10504,17 +15949,36 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseEcho: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseCheckTx: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseEcho: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10524,129 +15988,133 @@ func (m *ResponseEcho) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponseFlush) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Log = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseFlush: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseFlush: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponseInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + m.Info = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) + } + m.GasWanted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasWanted |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.GasUsed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasUsed |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10656,27 +16124,29 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = string(dAtA[iNdEx:postIndex]) + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10704,13 +16174,13 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) + m.Codespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AppVersion", wireType) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) } - m.AppVersion = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10720,16 +16190,29 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AppVersion |= uint64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 4: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LastBlockHeight", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) } - m.LastBlockHeight = 0 + m.Priority = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10739,16 +16222,16 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.LastBlockHeight |= int64(b&0x7F) << shift + m.Priority |= int64(b&0x7F) << shift if b < 0x80 { break } } - case 5: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastBlockAppHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MempoolError", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10758,25 +16241,23 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.LastBlockAppHash = append(m.LastBlockAppHash[:0], dAtA[iNdEx:postIndex]...) - if m.LastBlockAppHash == nil { - m.LastBlockAppHash = []byte{} - } + m.MempoolError = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -10799,7 +16280,7 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { +func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10822,17 +16303,36 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseInitChain: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseDeliverTx: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseInitChain: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10842,33 +16342,31 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConsensusParams == nil { - m.ConsensusParams = &types1.ConsensusParams{} - } - if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10878,31 +16376,29 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) - if m.AppHash == nil { - m.AppHash = []byte{} - } + m.Log = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 100: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSetUpdate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10912,28 +16408,65 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ValidatorSetUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Info = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 101: + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) + } + m.GasWanted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasWanted |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) + } + m.GasUsed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasUsed |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NextCoreChainLockUpdate", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10960,18 +16493,16 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NextCoreChainLockUpdate == nil { - m.NextCoreChainLockUpdate = &types1.CoreChainLock{} - } - if err := m.NextCoreChainLockUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 102: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialCoreHeight", wireType) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) } - m.InitialCoreHeight = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -10981,11 +16512,24 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.InitialCoreHeight |= uint32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -11007,7 +16551,7 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseQuery) Unmarshal(dAtA []byte) error { +func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11030,17 +16574,17 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseQuery: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseEndBlock: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseQuery: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) } - m.Code = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11050,16 +16594,33 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Code |= uint32(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParamUpdates == nil { + m.ConsensusParamUpdates = &types1.ConsensusParams{} + } + if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11069,29 +16630,31 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Log = string(dAtA[iNdEx:postIndex]) + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 100: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NextCoreChainLockUpdate", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11101,48 +16664,33 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Info = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + if m.NextCoreChainLockUpdate == nil { + m.NextCoreChainLockUpdate = &types1.CoreChainLock{} } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Index |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.NextCoreChainLockUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - case 6: + iNdEx = postIndex + case 101: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSetUpdate", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11152,29 +16700,81 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} + if m.ValidatorSetUpdate == nil { + m.ValidatorSetUpdate = &ValidatorSetUpdate{} + } + if err := m.ValidatorSetUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex - case 7: + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseCommit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseCommit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseCommit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -11201,16 +16801,16 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} } iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProofOps", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) } - var msglen int + m.RetainHeight = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11220,52 +16820,66 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.RetainHeight |= int64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.ProofOps == nil { - m.ProofOps = &crypto.ProofOps{} - } - if err := m.ProofOps.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - case 10: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseListSnapshots: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseListSnapshots: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Snapshots", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11275,23 +16889,25 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Codespace = string(dAtA[iNdEx:postIndex]) + m.Snapshots = append(m.Snapshots, &Snapshot{}) + if err := m.Snapshots[len(m.Snapshots)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -11314,7 +16930,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { +func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11337,17 +16953,17 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseBeginBlock: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseOfferSnapshot: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseBeginBlock: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseOfferSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } - var msglen int + m.Result = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11357,26 +16973,11 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Result |= ResponseOfferSnapshot_Result(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -11398,7 +16999,7 @@ func (m *ResponseBeginBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { +func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11421,34 +17022,15 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseCheckTx: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseLoadSnapshotChunk: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseCheckTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseLoadSnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) - } - m.Code = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Code |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -11475,48 +17057,66 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} + m.Chunk = append(m.Chunk[:0], dAtA[iNdEx:postIndex]...) + if m.Chunk == nil { + m.Chunk = []byte{} } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Log = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - var stringLen uint64 + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseApplySnapshotChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseApplySnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + m.Result = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11526,48 +17126,92 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Result |= ResponseApplySnapshotChunk_Result(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Info = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) - } - m.GasWanted = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + case 2: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + m.RefetchChunks = append(m.RefetchChunks, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - m.GasWanted |= int64(b&0x7F) << shift - if b < 0x80 { - break + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.RefetchChunks) == 0 { + m.RefetchChunks = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RefetchChunks = append(m.RefetchChunks, v) } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field RefetchChunks", wireType) } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectSenders", wireType) } - m.GasUsed = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11577,14 +17221,77 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.GasUsed |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 7: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RejectSenders = append(m.RejectSenders, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponsePrepareProposal: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponsePrepareProposal: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TxRecords", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11611,16 +17318,16 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.TxRecords = append(m.TxRecords, &TxRecord{}) + if err := m.TxRecords[len(m.TxRecords)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11630,29 +17337,31 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Codespace = string(dAtA[iNdEx:postIndex]) + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } iNdEx = postIndex - case 9: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TxResults", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11662,29 +17371,31 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Sender = string(dAtA[iNdEx:postIndex]) + m.TxResults = append(m.TxResults, &ExecTxResult{}) + if err := m.TxResults[len(m.TxResults)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) } - m.Priority = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11694,16 +17405,31 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Priority |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 11: + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorUpdates = append(m.ValidatorUpdates, &ValidatorUpdate{}) + if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MempoolError", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11713,23 +17439,27 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.MempoolError = string(dAtA[iNdEx:postIndex]) + if m.ConsensusParamUpdates == nil { + m.ConsensusParamUpdates = &types1.ConsensusParams{} + } + if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -11752,7 +17482,7 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { +func (m *ResponseProcessProposal) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11775,17 +17505,17 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseDeliverTx: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseProcessProposal: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseDeliverTx: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseProcessProposal: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - m.Code = 0 + m.Status = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11795,14 +17525,14 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Code |= uint32(b&0x7F) << shift + m.Status |= ResponseProcessProposal_ProposalStatus(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -11829,16 +17559,16 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TxResults", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11848,29 +17578,31 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Log = string(dAtA[iNdEx:postIndex]) + m.TxResults = append(m.TxResults, &ExecTxResult{}) + if err := m.TxResults[len(m.TxResults)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11880,29 +17612,31 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Info = string(dAtA[iNdEx:postIndex]) + m.ValidatorUpdates = append(m.ValidatorUpdates, &ValidatorUpdate{}) + if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) } - m.GasWanted = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11912,35 +17646,83 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.GasWanted |= int64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) + if msglen < 0 { + return ErrInvalidLengthTypes } - m.GasUsed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GasUsed |= int64(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConsensusParamUpdates == nil { + m.ConsensusParamUpdates = &types1.ConsensusParams{} + } + if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseExtendVote) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes } - case 7: + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseExtendVote: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseExtendVote: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VoteExtension", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11950,31 +17732,81 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.VoteExtension = append(m.VoteExtension[:0], dAtA[iNdEx:postIndex]...) + if m.VoteExtension == nil { + m.VoteExtension = []byte{} } iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err } - var stringLen uint64 + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseVerifyVoteExtension) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseVerifyVoteExtension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseVerifyVoteExtension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11984,24 +17816,11 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Status |= ResponseVerifyVoteExtension_VerifyStatus(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Codespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -12023,7 +17842,7 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { +func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12046,15 +17865,15 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseEndBlock: wiretype end group for non-group") + return fmt.Errorf("proto: ResponseFinalizeBlock: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseEndBlock: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ResponseFinalizeBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 2: + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12081,16 +17900,48 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConsensusParamUpdates == nil { - m.ConsensusParamUpdates = &types1.ConsensusParams{} + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TxResults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TxResults = append(m.TxResults, &ExecTxResult{}) + if err := m.TxResults[len(m.TxResults)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParamUpdates", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12117,11 +17968,66 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ConsensusParamUpdates == nil { + m.ConsensusParamUpdates = &types1.ConsensusParams{} + } + if err := m.ConsensusParamUpdates.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) + } + m.RetainHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RetainHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } case 100: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field NextCoreChainLockUpdate", wireType) @@ -12215,7 +18121,7 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseCommit) Unmarshal(dAtA []byte) error { +func (m *CommitInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12238,15 +18144,34 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseCommit: wiretype end group for non-group") + return fmt.Errorf("proto: CommitInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseCommit: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CommitInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 2: + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Round |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field QuorumHash", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -12273,16 +18198,16 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} + m.QuorumHash = append(m.QuorumHash[:0], dAtA[iNdEx:postIndex]...) + if m.QuorumHash == nil { + m.QuorumHash = []byte{} } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockSignature", wireType) } - m.RetainHeight = 0 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12292,66 +18217,31 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RetainHeight |= int64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if byteLen < 0 { + return ErrInvalidLengthTypes } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.BlockSignature = append(m.BlockSignature[:0], dAtA[iNdEx:postIndex]...) + if m.BlockSignature == nil { + m.BlockSignature = []byte{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseListSnapshots: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseListSnapshots: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshots", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StateSignature", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12361,24 +18251,24 @@ func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Snapshots = append(m.Snapshots, &Snapshot{}) - if err := m.Snapshots[len(m.Snapshots)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.StateSignature = append(m.StateSignature[:0], dAtA[iNdEx:postIndex]...) + if m.StateSignature == nil { + m.StateSignature = []byte{} } iNdEx = postIndex default: @@ -12402,7 +18292,7 @@ func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { +func (m *ExtendedCommitInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12425,17 +18315,17 @@ func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseOfferSnapshot: wiretype end group for non-group") + return fmt.Errorf("proto: ExtendedCommitInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseOfferSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExtendedCommitInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) } - m.Result = 0 + m.Round = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12445,66 +18335,16 @@ func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Result |= ResponseOfferSnapshot_Result(b&0x7F) << shift + m.Round |= int32(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseLoadSnapshotChunk: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseLoadSnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12514,24 +18354,24 @@ func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Chunk = append(m.Chunk[:0], dAtA[iNdEx:postIndex]...) - if m.Chunk == nil { - m.Chunk = []byte{} + m.Votes = append(m.Votes, ExtendedVoteInfo{}) + if err := m.Votes[len(m.Votes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex default: @@ -12555,7 +18395,7 @@ func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { +func (m *Event) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12578,17 +18418,17 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResponseApplySnapshotChunk: wiretype end group for non-group") + return fmt.Errorf("proto: Event: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseApplySnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.Result = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12598,92 +18438,29 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Result |= ResponseApplySnapshotChunk_Result(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 2: - if wireType == 0 { - var v uint32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RefetchChunks = append(m.RefetchChunks, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.RefetchChunks) == 0 { - m.RefetchChunks = make([]uint32, 0, elementCount) - } - for iNdEx < postIndex { - var v uint32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RefetchChunks = append(m.RefetchChunks, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field RefetchChunks", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes } - case 3: + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RejectSenders", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12693,23 +18470,25 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.RejectSenders = append(m.RejectSenders, string(dAtA[iNdEx:postIndex])) + m.Attributes = append(m.Attributes, EventAttribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -12732,7 +18511,7 @@ func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { } return nil } -func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { +func (m *EventAttribute) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12755,36 +18534,17 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: LastCommitInfo: wiretype end group for non-group") + return fmt.Errorf("proto: EventAttribute: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: LastCommitInfo: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EventAttribute: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) - } - m.Round = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Round |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QuorumHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12794,31 +18554,29 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.QuorumHash = append(m.QuorumHash[:0], dAtA[iNdEx:postIndex]...) - if m.QuorumHash == nil { - m.QuorumHash = []byte{} - } + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockSignature", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12828,31 +18586,29 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.BlockSignature = append(m.BlockSignature[:0], dAtA[iNdEx:postIndex]...) - if m.BlockSignature == nil { - m.BlockSignature = []byte{} - } + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StateSignature", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) } - var byteLen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12862,26 +18618,12 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StateSignature = append(m.StateSignature[:0], dAtA[iNdEx:postIndex]...) - if m.StateSignature == nil { - m.StateSignature = []byte{} - } - iNdEx = postIndex + m.Index = bool(v != 0) default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -12903,7 +18645,7 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { } return nil } -func (m *Event) Unmarshal(dAtA []byte) error { +func (m *ExecTxResult) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12926,15 +18668,68 @@ func (m *Event) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") + return fmt.Errorf("proto: ExecTxResult: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExecTxResult: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -12962,13 +18757,13 @@ func (m *Event) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) + m.Log = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12978,81 +18773,67 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Attributes = append(m.Attributes, EventAttribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Info = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventAttribute) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + m.GasWanted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasWanted |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.GasUsed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GasUsed |= int64(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventAttribute: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventAttribute: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -13062,27 +18843,29 @@ func (m *EventAttribute) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTypes } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -13110,28 +18893,8 @@ func (m *EventAttribute) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(dAtA[iNdEx:postIndex]) + m.Codespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Index = bool(v != 0) default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -13308,6 +19071,109 @@ func (m *TxResult) Unmarshal(dAtA []byte) error { } return nil } +func (m *TxRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TxRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TxRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= TxRecord_TxAction(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) + if m.Tx == nil { + m.Tx = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Validator) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -14003,7 +19869,144 @@ func (m *VoteInfo) Unmarshal(dAtA []byte) error { } return nil } -func (m *Evidence) Unmarshal(dAtA []byte) error { +func (m *ExtendedVoteInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtendedVoteInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtendedVoteInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedLastBlock", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SignedLastBlock = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteExtension", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VoteExtension = append(m.VoteExtension[:0], dAtA[iNdEx:postIndex]...) + if m.VoteExtension == nil { + m.VoteExtension = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Misbehavior) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14026,10 +20029,10 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Evidence: wiretype end group for non-group") + return fmt.Errorf("proto: Misbehavior: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Evidence: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Misbehavior: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -14046,7 +20049,7 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= EvidenceType(b&0x7F) << shift + m.Type |= MisbehaviorType(b&0x7F) << shift if b < 0x80 { break } diff --git a/abci/types/types_test.go b/abci/types/types_test.go new file mode 100644 index 0000000000..f79a244544 --- /dev/null +++ b/abci/types/types_test.go @@ -0,0 +1,74 @@ +package types_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto/merkle" +) + +func TestHashAndProveResults(t *testing.T) { + trs := []*abci.ExecTxResult{ + // Note, these tests rely on the first two entries being in this order. + {Code: 0, Data: nil}, + {Code: 0, Data: []byte{}}, + + {Code: 0, Data: []byte("one")}, + {Code: 14, Data: nil}, + {Code: 14, Data: []byte("foo")}, + {Code: 14, Data: []byte("bar")}, + } + + // Nil and []byte{} should produce the same bytes + bz0, err := trs[0].Marshal() + require.NoError(t, err) + bz1, err := trs[1].Marshal() + require.NoError(t, err) + require.Equal(t, bz0, bz1) + + // Make sure that we can get a root hash from results and verify proofs. + rs, err := abci.MarshalTxResults(trs) + require.NoError(t, err) + root := merkle.HashFromByteSlices(rs) + assert.NotEmpty(t, root) + + _, proofs := merkle.ProofsFromByteSlices(rs) + for i, tr := range trs { + bz, err := tr.Marshal() + require.NoError(t, err) + + valid := proofs[i].Verify(root, bz) + assert.NoError(t, valid, "%d", i) + } +} + +func TestHashDeterministicFieldsOnly(t *testing.T) { + tr1 := abci.ExecTxResult{ + Code: 1, + Data: []byte("transaction"), + Log: "nondeterministic data: abc", + Info: "nondeterministic data: abc", + GasWanted: 1000, + GasUsed: 1000, + Events: []abci.Event{}, + Codespace: "nondeterministic.data.abc", + } + tr2 := abci.ExecTxResult{ + Code: 1, + Data: []byte("transaction"), + Log: "nondeterministic data: def", + Info: "nondeterministic data: def", + GasWanted: 1000, + GasUsed: 1000, + Events: []abci.Event{}, + Codespace: "nondeterministic.data.def", + } + r1, err := abci.MarshalTxResults([]*abci.ExecTxResult{&tr1}) + require.NoError(t, err) + r2, err := abci.MarshalTxResults([]*abci.ExecTxResult{&tr2}) + require.NoError(t, err) + require.Equal(t, merkle.HashFromByteSlices(r1), merkle.HashFromByteSlices(r2)) +} diff --git a/abci/version/version.go b/abci/version/version.go deleted file mode 100644 index f4dc4d2358..0000000000 --- a/abci/version/version.go +++ /dev/null @@ -1,9 +0,0 @@ -package version - -import ( - "github.com/tendermint/tendermint/version" -) - -// TODO: eliminate this after some version refactor - -const Version = version.ABCIVersion diff --git a/buf.gen.yaml b/buf.gen.yaml index dc56781dd4..d972360bbd 100644 --- a/buf.gen.yaml +++ b/buf.gen.yaml @@ -1,13 +1,9 @@ -# The version of the generation template. -# Required. -# The only currently-valid value is v1beta1. -version: v1beta1 - -# The plugins to run. +version: v1 plugins: - # The name of the plugin. - name: gogofaster - # The the relative output directory. - out: proto - # Any options to provide to the plugin. - opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative + out: ./proto/ + opt: + - Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types + - Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration + - plugins=grpc + - paths=source_relative diff --git a/buf.work.yaml b/buf.work.yaml new file mode 100644 index 0000000000..1878b341be --- /dev/null +++ b/buf.work.yaml @@ -0,0 +1,3 @@ +version: v1 +directories: + - proto diff --git a/cmd/priv_val_server/main.go b/cmd/priv_val_server/main.go index 203b3df0dd..9014221450 100644 --- a/cmd/priv_val_server/main.go +++ b/cmd/priv_val_server/main.go @@ -6,10 +6,11 @@ import ( "crypto/x509" "flag" "fmt" - "io/ioutil" "net" "net/http" "os" + "os/signal" + "syscall" "time" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" @@ -20,7 +21,6 @@ import ( "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" - tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/privval" grpcprivval "github.com/tendermint/tendermint/privval/grpc" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" @@ -45,12 +45,19 @@ func main() { keyFile = flag.String("keyfile", "", "absolute path to server key") rootCA = flag.String("rootcafile", "", "absolute path to root CA") prometheusAddr = flag.String("prometheus-addr", "", "address for prometheus endpoint (host:port)") - - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false). - With("module", "priv_val") ) flag.Parse() + logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to construct logger: %v", err) + os.Exit(1) + } + logger = logger.With("module", "priv_val") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger.Info( "Starting private validator", "addr", *addr, @@ -78,7 +85,7 @@ func main() { } certPool := x509.NewCertPool() - bs, err := ioutil.ReadFile(*rootCA) + bs, err := os.ReadFile(*rootCA) if err != nil { fmt.Fprintf(os.Stderr, "failed to read client ca cert: %s", err) os.Exit(1) @@ -106,7 +113,7 @@ func main() { // add prometheus metrics for unary RPC calls opts = append(opts, grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor)) - ss := grpcprivval.NewSignerServer(*chainID, pv, logger) + ss := grpcprivval.NewSignerServer(logger, *chainID, pv) protocol, address := tmnet.ProtocolAndAddress(*addr) @@ -131,9 +138,10 @@ func main() { os.Exit(1) } - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - logger.Debug("SignerServer: calling Close") + opctx, opcancel := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM) + defer opcancel() + go func() { + <-opctx.Done() if *prometheusAddr != "" { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() @@ -143,7 +151,7 @@ func main() { } } s.GracefulStop() - }) + }() // Run forever. select {} diff --git a/cmd/tenderdash/commands/completion.go b/cmd/tenderdash/commands/completion.go new file mode 100644 index 0000000000..d2c81f0afc --- /dev/null +++ b/cmd/tenderdash/commands/completion.go @@ -0,0 +1,46 @@ +package commands + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +// NewCompletionCmd returns a cobra.Command that generates bash and zsh +// completion scripts for the given root command. If hidden is true, the +// command will not show up in the root command's list of available commands. +func NewCompletionCmd(rootCmd *cobra.Command, hidden bool) *cobra.Command { + flagZsh := "zsh" + cmd := &cobra.Command{ + Use: "completion", + Short: "Generate shell completion scripts", + Long: fmt.Sprintf(`Generate Bash and Zsh completion scripts and print them to STDOUT. + +Once saved to file, a completion script can be loaded in the shell's +current session as shown: + + $ . <(%s completion) + +To configure your bash shell to load completions for each session add to +your $HOME/.bashrc or $HOME/.profile the following instruction: + + . <(%s completion) +`, rootCmd.Use, rootCmd.Use), + RunE: func(cmd *cobra.Command, _ []string) error { + zsh, err := cmd.Flags().GetBool(flagZsh) + if err != nil { + return err + } + if zsh { + return rootCmd.GenZshCompletion(cmd.OutOrStdout()) + } + return rootCmd.GenBashCompletion(cmd.OutOrStdout()) + }, + Hidden: hidden, + Args: cobra.NoArgs, + } + + cmd.Flags().Bool(flagZsh, false, "Generate Zsh completion script") + + return cmd +} diff --git a/cmd/tenderdash/commands/debug/debug.go b/cmd/tenderdash/commands/debug/debug.go index e07f7978de..7fd5b030f7 100644 --- a/cmd/tenderdash/commands/debug/debug.go +++ b/cmd/tenderdash/commands/debug/debug.go @@ -6,34 +6,26 @@ import ( "github.com/tendermint/tendermint/libs/log" ) -var ( - nodeRPCAddr string - profAddr string - frequency uint - +const ( flagNodeRPCAddr = "rpc-laddr" flagProfAddr = "pprof-laddr" flagFrequency = "frequency" - - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) ) -// DebugCmd defines the root command containing subcommands that assist in -// debugging running Tendermint processes. -var DebugCmd = &cobra.Command{ - Use: "debug", - Short: "A utility to kill or watch a Tendermint process while aggregating debugging data", -} - -func init() { - DebugCmd.PersistentFlags().SortFlags = true - DebugCmd.PersistentFlags().StringVar( - &nodeRPCAddr, +func GetDebugCommand(logger log.Logger) *cobra.Command { + cmd := &cobra.Command{ + Use: "debug", + Short: "A utility to kill or watch a Tendermint process while aggregating debugging data", + } + cmd.PersistentFlags().SortFlags = true + cmd.PersistentFlags().String( flagNodeRPCAddr, "tcp://localhost:26657", - "the Tendermint node's RPC address (:)", + "the Tendermint node's RPC address :)", ) - DebugCmd.AddCommand(killCmd) - DebugCmd.AddCommand(dumpCmd) + cmd.AddCommand(getKillCmd(logger)) + cmd.AddCommand(getDumpCmd(logger)) + return cmd + } diff --git a/cmd/tenderdash/commands/debug/dump.go b/cmd/tenderdash/commands/debug/dump.go index cb1cc942a8..d84f6e10aa 100644 --- a/cmd/tenderdash/commands/debug/dump.go +++ b/cmd/tenderdash/commands/debug/dump.go @@ -1,9 +1,9 @@ package debug import ( + "context" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "time" @@ -13,76 +13,102 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" + "github.com/tendermint/tendermint/libs/log" rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) -var dumpCmd = &cobra.Command{ - Use: "dump [output-directory]", - Short: "Continuously poll a Tendermint process and dump debugging data into a single location", - Long: `Continuously poll a Tendermint process and dump debugging data into a single +func getDumpCmd(logger log.Logger) *cobra.Command { + cmd := &cobra.Command{ + Use: "dump [output-directory]", + Short: "Continuously poll a Tendermint process and dump debugging data into a single location", + Long: `Continuously poll a Tendermint process and dump debugging data into a single location at a specified frequency. At each frequency interval, an archived and compressed file will contain node debugging information including the goroutine and heap profiles if enabled.`, - Args: cobra.ExactArgs(1), - RunE: dumpCmdHandler, -} - -func init() { - dumpCmd.Flags().UintVar( - &frequency, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + outDir := args[0] + if outDir == "" { + return errors.New("invalid output directory") + } + frequency, err := cmd.Flags().GetUint(flagFrequency) + if err != nil { + return fmt.Errorf("flag %q not defined: %w", flagFrequency, err) + } + + if frequency == 0 { + return errors.New("frequency must be positive") + } + + nodeRPCAddr, err := cmd.Flags().GetString(flagNodeRPCAddr) + if err != nil { + return fmt.Errorf("flag %q not defined: %w", flagNodeRPCAddr, err) + } + + profAddr, err := cmd.Flags().GetString(flagProfAddr) + if err != nil { + return fmt.Errorf("flag %q not defined: %w", flagProfAddr, err) + } + + if _, err := os.Stat(outDir); os.IsNotExist(err) { + if err := os.Mkdir(outDir, os.ModePerm); err != nil { + return fmt.Errorf("failed to create output directory: %w", err) + } + } + + rpc, err := rpchttp.New(nodeRPCAddr) + if err != nil { + return fmt.Errorf("failed to create new http client: %w", err) + } + + ctx := cmd.Context() + + home := viper.GetString(cli.HomeFlag) + conf := config.DefaultConfig() + conf = conf.SetRoot(home) + config.EnsureRoot(conf.RootDir) + + dumpArgs := dumpDebugDataArgs{ + conf: conf, + outDir: outDir, + profAddr: profAddr, + } + dumpDebugData(ctx, logger, rpc, dumpArgs) + + ticker := time.NewTicker(time.Duration(frequency) * time.Second) + for range ticker.C { + dumpDebugData(ctx, logger, rpc, dumpArgs) + } + + return nil + }, + } + cmd.Flags().Uint( flagFrequency, 30, "the frequency (seconds) in which to poll, aggregate and dump Tendermint debug data", ) - dumpCmd.Flags().StringVar( - &profAddr, + cmd.Flags().String( flagProfAddr, "", "the profiling server address (:)", ) -} -func dumpCmdHandler(_ *cobra.Command, args []string) error { - outDir := args[0] - if outDir == "" { - return errors.New("invalid output directory") - } + return cmd - if frequency == 0 { - return errors.New("frequency must be positive") - } - - if _, err := os.Stat(outDir); os.IsNotExist(err) { - if err := os.Mkdir(outDir, os.ModePerm); err != nil { - return fmt.Errorf("failed to create output directory: %w", err) - } - } - - rpc, err := rpchttp.New(nodeRPCAddr) - if err != nil { - return fmt.Errorf("failed to create new http client: %w", err) - } - - home := viper.GetString(cli.HomeFlag) - conf := config.DefaultConfig() - conf = conf.SetRoot(home) - config.EnsureRoot(conf.RootDir) - - dumpDebugData(outDir, conf, rpc) - - ticker := time.NewTicker(time.Duration(frequency) * time.Second) - for range ticker.C { - dumpDebugData(outDir, conf, rpc) - } +} - return nil +type dumpDebugDataArgs struct { + conf *config.Config + outDir string + profAddr string } -func dumpDebugData(outDir string, conf *config.Config, rpc *rpchttp.HTTP) { +func dumpDebugData(ctx context.Context, logger log.Logger, rpc *rpchttp.HTTP, args dumpDebugDataArgs) { start := time.Now().UTC() - tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp") + tmpDir, err := os.MkdirTemp(args.outDir, "tendermint_debug_tmp") if err != nil { logger.Error("failed to create temporary directory", "dir", tmpDir, "error", err) return @@ -90,44 +116,44 @@ func dumpDebugData(outDir string, conf *config.Config, rpc *rpchttp.HTTP) { defer os.RemoveAll(tmpDir) logger.Info("getting node status...") - if err := dumpStatus(rpc, tmpDir, "status.json"); err != nil { + if err := dumpStatus(ctx, rpc, tmpDir, "status.json"); err != nil { logger.Error("failed to dump node status", "error", err) return } logger.Info("getting node network info...") - if err := dumpNetInfo(rpc, tmpDir, "net_info.json"); err != nil { + if err := dumpNetInfo(ctx, rpc, tmpDir, "net_info.json"); err != nil { logger.Error("failed to dump node network info", "error", err) return } logger.Info("getting node consensus state...") - if err := dumpConsensusState(rpc, tmpDir, "consensus_state.json"); err != nil { + if err := dumpConsensusState(ctx, rpc, tmpDir, "consensus_state.json"); err != nil { logger.Error("failed to dump node consensus state", "error", err) return } logger.Info("copying node WAL...") - if err := copyWAL(conf, tmpDir); err != nil { + if err := copyWAL(args.conf, tmpDir); err != nil { logger.Error("failed to copy node WAL", "error", err) return } - if profAddr != "" { + if args.profAddr != "" { logger.Info("getting node goroutine profile...") - if err := dumpProfile(tmpDir, profAddr, "goroutine", 2); err != nil { + if err := dumpProfile(tmpDir, args.profAddr, "goroutine", 2); err != nil { logger.Error("failed to dump goroutine profile", "error", err) return } logger.Info("getting node heap profile...") - if err := dumpProfile(tmpDir, profAddr, "heap", 2); err != nil { + if err := dumpProfile(tmpDir, args.profAddr, "heap", 2); err != nil { logger.Error("failed to dump heap profile", "error", err) return } } - outFile := filepath.Join(outDir, fmt.Sprintf("%s.zip", start.Format(time.RFC3339))) + outFile := filepath.Join(args.outDir, fmt.Sprintf("%s.zip", start.Format(time.RFC3339))) if err := zipDir(tmpDir, outFile); err != nil { logger.Error("failed to create and compress archive", "file", outFile, "error", err) } diff --git a/cmd/tenderdash/commands/debug/io.go b/cmd/tenderdash/commands/debug/io.go index dcfff50c89..bf904cf5c6 100644 --- a/cmd/tenderdash/commands/debug/io.go +++ b/cmd/tenderdash/commands/debug/io.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "os" "path" "path/filepath" @@ -111,5 +110,5 @@ func writeStateJSONToFile(state interface{}, dir, filename string) error { return fmt.Errorf("failed to encode state dump: %w", err) } - return ioutil.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm) + return os.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm) } diff --git a/cmd/tenderdash/commands/debug/kill.go b/cmd/tenderdash/commands/debug/kill.go index 3e749e5131..a6c1ac7d86 100644 --- a/cmd/tenderdash/commands/debug/kill.go +++ b/cmd/tenderdash/commands/debug/kill.go @@ -3,7 +3,6 @@ package debug import ( "errors" "fmt" - "io/ioutil" "os" "os/exec" "path/filepath" @@ -16,88 +15,96 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" + "github.com/tendermint/tendermint/libs/log" rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) -var killCmd = &cobra.Command{ - Use: "kill [pid] [compressed-output-file]", - Short: "Kill a Tendermint process while aggregating and packaging debugging data", - Long: `Kill a Tendermint process while also aggregating Tendermint process data +func getKillCmd(logger log.Logger) *cobra.Command { + cmd := &cobra.Command{ + Use: "kill [pid] [compressed-output-file]", + Short: "Kill a Tendermint process while aggregating and packaging debugging data", + Long: `Kill a Tendermint process while also aggregating Tendermint process data such as the latest node state, including consensus and networking state, go-routine state, and the node's WAL and config information. This aggregated data is packaged into a compressed archive. Example: $ tendermint debug kill 34255 /path/to/tm-debug.zip`, - Args: cobra.ExactArgs(2), - RunE: killCmdHandler, -} - -func killCmdHandler(cmd *cobra.Command, args []string) error { - pid, err := strconv.ParseUint(args[0], 10, 64) - if err != nil { - return err - } - - outFile := args[1] - if outFile == "" { - return errors.New("invalid output file") - } - - rpc, err := rpchttp.New(nodeRPCAddr) - if err != nil { - return fmt.Errorf("failed to create new http client: %w", err) - } - - home := viper.GetString(cli.HomeFlag) - conf := config.DefaultConfig() - conf = conf.SetRoot(home) - config.EnsureRoot(conf.RootDir) - - // Create a temporary directory which will contain all the state dumps and - // relevant files and directories that will be compressed into a file. - tmpDir, err := ioutil.TempDir(os.TempDir(), "tendermint_debug_tmp") - if err != nil { - return fmt.Errorf("failed to create temporary directory: %w", err) - } - defer os.RemoveAll(tmpDir) - - logger.Info("getting node status...") - if err := dumpStatus(rpc, tmpDir, "status.json"); err != nil { - return err - } - - logger.Info("getting node network info...") - if err := dumpNetInfo(rpc, tmpDir, "net_info.json"); err != nil { - return err - } - - logger.Info("getting node consensus state...") - if err := dumpConsensusState(rpc, tmpDir, "consensus_state.json"); err != nil { - return err - } - - logger.Info("copying node WAL...") - if err := copyWAL(conf, tmpDir); err != nil { - if !os.IsNotExist(err) { - return err - } - - logger.Info("node WAL does not exist; continuing...") - } - - logger.Info("copying node configuration...") - if err := copyConfig(home, tmpDir); err != nil { - return err - } - - logger.Info("killing Tendermint process") - if err := killProc(pid, tmpDir); err != nil { - return err + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + pid, err := strconv.ParseInt(args[0], 10, 64) + if err != nil { + return err + } + + outFile := args[1] + if outFile == "" { + return errors.New("invalid output file") + } + nodeRPCAddr, err := cmd.Flags().GetString(flagNodeRPCAddr) + if err != nil { + return fmt.Errorf("flag %q not defined: %w", flagNodeRPCAddr, err) + } + + rpc, err := rpchttp.New(nodeRPCAddr) + if err != nil { + return fmt.Errorf("failed to create new http client: %w", err) + } + + home := viper.GetString(cli.HomeFlag) + conf := config.DefaultConfig() + conf = conf.SetRoot(home) + config.EnsureRoot(conf.RootDir) + + // Create a temporary directory which will contain all the state dumps and + // relevant files and directories that will be compressed into a file. + tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp") + if err != nil { + return fmt.Errorf("failed to create temporary directory: %w", err) + } + defer os.RemoveAll(tmpDir) + + logger.Info("getting node status...") + if err := dumpStatus(ctx, rpc, tmpDir, "status.json"); err != nil { + return err + } + + logger.Info("getting node network info...") + if err := dumpNetInfo(ctx, rpc, tmpDir, "net_info.json"); err != nil { + return err + } + + logger.Info("getting node consensus state...") + if err := dumpConsensusState(ctx, rpc, tmpDir, "consensus_state.json"); err != nil { + return err + } + + logger.Info("copying node WAL...") + if err := copyWAL(conf, tmpDir); err != nil { + if !os.IsNotExist(err) { + return err + } + + logger.Info("node WAL does not exist; continuing...") + } + + logger.Info("copying node configuration...") + if err := copyConfig(home, tmpDir); err != nil { + return err + } + + logger.Info("killing Tendermint process") + if err := killProc(int(pid), tmpDir); err != nil { + return err + } + + logger.Info("archiving and compressing debug directory...") + return zipDir(tmpDir, outFile) + }, } - logger.Info("archiving and compressing debug directory...") - return zipDir(tmpDir, outFile) + return cmd } // killProc attempts to kill the Tendermint process with a given PID with an @@ -105,7 +112,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { // is tailed and piped to a file under the directory dir. An error is returned // if the output file cannot be created or the tail command cannot be started. // An error is not returned if any subsequent syscall fails. -func killProc(pid uint64, dir string) error { +func killProc(pid int, dir string) error { // pipe STDERR output from tailing the Tendermint process to a file // // NOTE: This will only work on UNIX systems. @@ -128,7 +135,7 @@ func killProc(pid uint64, dir string) error { go func() { // Killing the Tendermint process with the '-ABRT|-6' signal will result in // a goroutine stacktrace. - p, err := os.FindProcess(int(pid)) + p, err := os.FindProcess(pid) if err != nil { fmt.Fprintf(os.Stderr, "failed to find PID to kill Tendermint process: %s", err) } else if err = p.Signal(syscall.SIGABRT); err != nil { diff --git a/cmd/tenderdash/commands/debug/util.go b/cmd/tenderdash/commands/debug/util.go index fa356c4880..24626207f5 100644 --- a/cmd/tenderdash/commands/debug/util.go +++ b/cmd/tenderdash/commands/debug/util.go @@ -3,7 +3,7 @@ package debug import ( "context" "fmt" - "io/ioutil" + "io" "net/http" "os" "path" @@ -15,8 +15,8 @@ import ( // dumpStatus gets node status state dump from the Tendermint RPC and writes it // to file. It returns an error upon failure. -func dumpStatus(rpc *rpchttp.HTTP, dir, filename string) error { - status, err := rpc.Status(context.Background()) +func dumpStatus(ctx context.Context, rpc *rpchttp.HTTP, dir, filename string) error { + status, err := rpc.Status(ctx) if err != nil { return fmt.Errorf("failed to get node status: %w", err) } @@ -26,8 +26,8 @@ func dumpStatus(rpc *rpchttp.HTTP, dir, filename string) error { // dumpNetInfo gets network information state dump from the Tendermint RPC and // writes it to file. It returns an error upon failure. -func dumpNetInfo(rpc *rpchttp.HTTP, dir, filename string) error { - netInfo, err := rpc.NetInfo(context.Background()) +func dumpNetInfo(ctx context.Context, rpc *rpchttp.HTTP, dir, filename string) error { + netInfo, err := rpc.NetInfo(ctx) if err != nil { return fmt.Errorf("failed to get node network information: %w", err) } @@ -37,8 +37,8 @@ func dumpNetInfo(rpc *rpchttp.HTTP, dir, filename string) error { // dumpConsensusState gets consensus state dump from the Tendermint RPC and // writes it to file. It returns an error upon failure. -func dumpConsensusState(rpc *rpchttp.HTTP, dir, filename string) error { - consDump, err := rpc.DumpConsensusState(context.Background()) +func dumpConsensusState(ctx context.Context, rpc *rpchttp.HTTP, dir, filename string) error { + consDump, err := rpc.DumpConsensusState(ctx) if err != nil { return fmt.Errorf("failed to get node consensus dump: %w", err) } @@ -73,10 +73,10 @@ func dumpProfile(dir, addr, profile string, debug int) error { } defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) + body, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("failed to read %s profile response body: %w", profile, err) } - return ioutil.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm) + return os.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm) } diff --git a/cmd/tenderdash/commands/gen_node_key.go b/cmd/tenderdash/commands/gen_node_key.go index 81ea2ae70a..2a0bb758eb 100644 --- a/cmd/tenderdash/commands/gen_node_key.go +++ b/cmd/tenderdash/commands/gen_node_key.go @@ -1,11 +1,11 @@ package commands import ( + "encoding/json" "fmt" "github.com/spf13/cobra" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/types" ) @@ -20,7 +20,7 @@ var GenNodeKeyCmd = &cobra.Command{ func genNodeKey(cmd *cobra.Command, args []string) error { nodeKey := types.GenNodeKey() - bz, err := tmjson.Marshal(nodeKey) + bz, err := json.Marshal(nodeKey) if err != nil { return fmt.Errorf("nodeKey -> json: %w", err) } diff --git a/cmd/tenderdash/commands/gen_validator.go b/cmd/tenderdash/commands/gen_validator.go index 0ab74af5b7..bbe09e9127 100644 --- a/cmd/tenderdash/commands/gen_validator.go +++ b/cmd/tenderdash/commands/gen_validator.go @@ -1,42 +1,33 @@ package commands import ( + "encoding/json" "fmt" "github.com/spf13/cobra" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/types" ) -var ( - keyType string -) - -// GenValidatorCmd allows the generation of a keypair for a +// MakeGenValidatorCommand allows the generation of a keypair for a // validator. -var GenValidatorCmd = &cobra.Command{ - Use: "gen-validator", - Short: "Generate new validator keypair", - RunE: genValidator, -} - -func init() { - GenValidatorCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, - "Key type to generate privval file with. Options: ed25519, secp256k1") -} - -func genValidator(cmd *cobra.Command, args []string) error { - pv := privval.GenFilePV("", "") - - jsbz, err := tmjson.Marshal(pv) - if err != nil { - return fmt.Errorf("validator -> json: %w", err) +func MakeGenValidatorCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "gen-validator", + Short: "Generate new validator keypair", + RunE: func(cmd *cobra.Command, args []string) error { + pv := privval.GenFilePV("", "") + + jsbz, err := json.Marshal(pv) + if err != nil { + return fmt.Errorf("validator -> json: %w", err) + } + + fmt.Printf("%v\n", string(jsbz)) + + return nil + }, } - fmt.Printf(`%v -`, string(jsbz)) - - return nil + return cmd } diff --git a/cmd/tenderdash/commands/init.go b/cmd/tenderdash/commands/init.go index 1786dd70d0..7634bdf496 100644 --- a/cmd/tenderdash/commands/init.go +++ b/cmd/tenderdash/commands/init.go @@ -8,57 +8,62 @@ import ( "github.com/dashevo/dashd-go/btcjson" "github.com/spf13/cobra" - cfg "github.com/tendermint/tendermint/config" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" ) -// InitFilesCmd initializes a fresh Tendermint Core instance. -var InitFilesCmd = &cobra.Command{ - Use: "init [full|validator|seed|single]", - Short: "Initializes a Tenderdash node", - ValidArgs: []string{"full", "validator", "seed", "single"}, - // We allow for zero args so we can throw a more informative error - Args: cobra.MaximumNArgs(1), - RunE: initFiles, -} - -var ( +type nodeConfig struct { + *config.Config quorumType int coreChainLockedHeight uint32 initChainInitialHeight int64 appHash []byte proTxHash []byte -) - -func AddInitFlags(cmd *cobra.Command) { - cmd.Flags().IntVar(&quorumType, "quorumType", 0, "Quorum Type") - cmd.Flags().Uint32Var(&coreChainLockedHeight, "coreChainLockedHeight", 1, "Initial Core Chain Locked Height") - cmd.Flags().Int64Var(&initChainInitialHeight, "initialHeight", 0, "Initial Height") - cmd.Flags().BytesHexVar(&proTxHash, "proTxHash", []byte(nil), "Node pro tx hash") - cmd.Flags().BytesHexVar(&appHash, "appHash", []byte(nil), "App hash") } -func initFiles(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("must specify a node type: tendermint init [validator|full|seed|single]") +// MakeInitFilesCommand returns the command to initialize a fresh Tendermint Core instance. +func MakeInitFilesCommand(conf *config.Config, logger log.Logger) *cobra.Command { + nodeConf := nodeConfig{Config: conf} + + cmd := &cobra.Command{ + Use: "init [full|validator|seed]", + Short: "Initializes a Tenderdash node", + ValidArgs: []string{"full", "validator", "seed"}, + // We allow for zero args so we can throw a more informative error + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("must specify a node type: tendermint init [validator|full|seed]") + } + nodeConf.Mode = args[0] + return initFilesWithConfig(cmd.Context(), nodeConf, logger) + }, } - config.Mode = args[0] - return initFilesWithConfig(config) + + cmd.Flags().IntVar(&nodeConf.quorumType, "quorumType", 0, "Quorum Type") + cmd.Flags().Uint32Var(&nodeConf.coreChainLockedHeight, "coreChainLockedHeight", 1, "Initial Core Chain Locked Height") + cmd.Flags().Int64Var(&nodeConf.initChainInitialHeight, "initialHeight", 0, "Initial Height") + cmd.Flags().BytesHexVar(&nodeConf.proTxHash, "proTxHash", []byte(nil), "Node pro tx hash") + cmd.Flags().BytesHexVar(&nodeConf.appHash, "appHash", []byte(nil), "App hash") + + return cmd } -func initFilesWithConfig(config *cfg.Config) error { +func initFilesWithConfig(ctx context.Context, conf nodeConfig, logger log.Logger) error { var ( pv *privval.FilePV err error ) - if config.Mode == cfg.ModeValidator { + if conf.Mode == config.ModeValidator { // private validator - privValKeyFile := config.PrivValidator.KeyFile() - privValStateFile := config.PrivValidator.StateFile() + privValKeyFile := conf.PrivValidator.KeyFile() + privValStateFile := conf.PrivValidator.StateFile() if tmos.FileExists(privValKeyFile) { pv, err = privval.LoadFilePV(privValKeyFile, privValStateFile) if err != nil { @@ -72,13 +77,15 @@ func initFilesWithConfig(config *cfg.Config) error { if err != nil { return err } - pv.Save() + if err := pv.Save(); err != nil { + return err + } logger.Info("Generated private validator", "keyFile", privValKeyFile, "stateFile", privValStateFile) } } - nodeKeyFile := config.NodeKeyFile() + nodeKeyFile := conf.NodeKeyFile() if tmos.FileExists(nodeKeyFile) { logger.Info("Found node key", "path", nodeKeyFile) } else { @@ -89,7 +96,7 @@ func initFilesWithConfig(config *cfg.Config) error { } // genesis file - genFile := config.GenesisFile() + genFile := conf.GenesisFile() if tmos.FileExists(genFile) { logger.Info("Found genesis file", "path", genFile) } else { @@ -98,13 +105,13 @@ func initFilesWithConfig(config *cfg.Config) error { ChainID: fmt.Sprintf("test-chain-%v", tmrand.Str(6)), GenesisTime: time.Now(), ConsensusParams: types.DefaultConsensusParams(), - QuorumType: btcjson.LLMQType(quorumType), - InitialCoreChainLockedHeight: coreChainLockedHeight, - InitialHeight: initChainInitialHeight, - AppHash: appHash, + QuorumType: btcjson.LLMQType(conf.quorumType), + InitialCoreChainLockedHeight: conf.coreChainLockedHeight, + InitialHeight: conf.initChainInitialHeight, + AppHash: conf.appHash, } - ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout) + ctx, cancel := context.WithTimeout(ctx, ctxTimeout) defer cancel() // if this is a validator we add it to genesis @@ -139,10 +146,10 @@ func initFilesWithConfig(config *cfg.Config) error { } // write config file - if err := cfg.WriteConfigFile(config.RootDir, config); err != nil { + if err := config.WriteConfigFile(conf.RootDir, conf.Config); err != nil { return err } - logger.Info("Generated config", "mode", config.Mode) + logger.Info("Generated config", "mode", conf.Mode) return nil } diff --git a/cmd/tenderdash/commands/inspect.go b/cmd/tenderdash/commands/inspect.go index 4f5ce2eccf..9c12ef5cf6 100644 --- a/cmd/tenderdash/commands/inspect.go +++ b/cmd/tenderdash/commands/inspect.go @@ -1,21 +1,22 @@ package commands import ( - "context" - "os" "os/signal" "syscall" "github.com/spf13/cobra" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/inspect" + "github.com/tendermint/tendermint/libs/log" ) -// InspectCmd is the command for starting an inspect server. -var InspectCmd = &cobra.Command{ - Use: "inspect", - Short: "Run an inspect server for investigating Tendermint state", - Long: ` +// InspectCmd constructs the command to start an inspect server. +func MakeInspectCommand(conf *config.Config, logger log.Logger) *cobra.Command { + cmd := &cobra.Command{ + Use: "inspect", + Short: "Run an inspect server for investigating Tendermint state", + Long: ` inspect runs a subset of Tendermint's RPC endpoints that are useful for debugging issues with Tendermint. @@ -24,40 +25,27 @@ var InspectCmd = &cobra.Command{ The inspect command can be used to query the block and state store using Tendermint RPC calls to debug issues of inconsistent state. `, - - RunE: runInspect, -} - -func init() { - InspectCmd.Flags(). - String("rpc.laddr", - config.RPC.ListenAddress, "RPC listenener address. Port required") - InspectCmd.Flags(). - String("db-backend", - config.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb") - InspectCmd.Flags(). - String("db-dir", config.DBPath, "database directory") -} - -func runInspect(cmd *cobra.Command, args []string) error { - ctx, cancel := context.WithCancel(cmd.Context()) - defer cancel() - - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGTERM, syscall.SIGINT) - go func() { - <-c - cancel() - }() - - ins, err := inspect.NewFromConfig(logger, config) - if err != nil { - return err + RunE: func(cmd *cobra.Command, args []string) error { + ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM, syscall.SIGINT) + defer cancel() + + ins, err := inspect.NewFromConfig(logger, conf) + if err != nil { + return err + } + + logger.Info("starting inspect server") + if err := ins.Run(ctx); err != nil { + return err + } + return nil + }, } + cmd.Flags().String("rpc.laddr", + conf.RPC.ListenAddress, "RPC listenener address. Port required") + cmd.Flags().String("db-backend", + conf.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb") + cmd.Flags().String("db-dir", conf.DBPath, "database directory") - logger.Info("starting inspect server") - if err := ins.Run(ctx); err != nil { - return err - } - return nil + return cmd } diff --git a/cmd/tenderdash/commands/key_migrate.go b/cmd/tenderdash/commands/key_migrate.go index 739af4a7d1..5866be341b 100644 --- a/cmd/tenderdash/commands/key_migrate.go +++ b/cmd/tenderdash/commands/key_migrate.go @@ -5,11 +5,14 @@ import ( "fmt" "github.com/spf13/cobra" + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/scripts/keymigrate" + "github.com/tendermint/tendermint/scripts/scmigrate" ) -func MakeKeyMigrateCommand() *cobra.Command { +func MakeKeyMigrateCommand(conf *cfg.Config, logger log.Logger) *cobra.Command { cmd := &cobra.Command{ Use: "key-migrate", Short: "Run Database key migration", @@ -38,7 +41,7 @@ func MakeKeyMigrateCommand() *cobra.Command { db, err := cfg.DefaultDBProvider(&cfg.DBContext{ ID: dbctx, - Config: config, + Config: conf, }) if err != nil { @@ -49,6 +52,13 @@ func MakeKeyMigrateCommand() *cobra.Command { return fmt.Errorf("running migration for context %q: %w", dbctx, err) } + + if dbctx == "blockstore" { + if err := scmigrate.Migrate(ctx, db); err != nil { + return fmt.Errorf("running seen commit migration: %w", err) + + } + } } logger.Info("completed database migration successfully") @@ -58,7 +68,7 @@ func MakeKeyMigrateCommand() *cobra.Command { } // allow database info to be overridden via cli - addDBFlags(cmd) + addDBFlags(cmd, conf) return cmd } diff --git a/cmd/tenderdash/commands/light.go b/cmd/tenderdash/commands/light.go index 6b01e9c412..5b37c6bd32 100644 --- a/cmd/tenderdash/commands/light.go +++ b/cmd/tenderdash/commands/light.go @@ -1,22 +1,22 @@ package commands import ( - "context" "errors" "fmt" "net/http" "os" + "os/signal" "path/filepath" "strings" + "syscall" "time" - dashcore "github.com/tendermint/tendermint/dashcore/rpc" - "github.com/spf13/cobra" dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/config" + dashcore "github.com/tendermint/tendermint/dash/core" "github.com/tendermint/tendermint/libs/log" - tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/light" lproxy "github.com/tendermint/tendermint/light/proxy" lrpc "github.com/tendermint/tendermint/light/rpc" @@ -24,11 +24,56 @@ import ( rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" ) -// LightCmd represents the base command when called without any subcommands -var LightCmd = &cobra.Command{ - Use: "light [chainID]", - Short: "Run a light client proxy server, verifying Tendermint rpc", - Long: `Run a light client proxy server, verifying Tendermint rpc. +// LightCmd constructs the base command called when invoked without any subcommands. +func MakeLightCommand(conf *config.Config, logger log.Logger) *cobra.Command { + var ( + listenAddr string + primaryAddr string + witnessAddrsJoined string + chainID string + dir string + maxOpenConnections int + + logLevel string + logFormat string + + primaryKey = []byte("primary") + witnessesKey = []byte("witnesses") + + dashCoreRPCHost string + dashCoreRPCUser string + dashCoreRPCPass string + ) + + checkForExistingProviders := func(db dbm.DB) (string, []string, error) { + primaryBytes, err := db.Get(primaryKey) + if err != nil { + return "", []string{""}, err + } + witnessesBytes, err := db.Get(witnessesKey) + if err != nil { + return "", []string{""}, err + } + witnessesAddrs := strings.Split(string(witnessesBytes), ",") + return string(primaryBytes), witnessesAddrs, nil + } + + saveProviders := func(db dbm.DB, primaryAddr, witnessesAddrs string) error { + err := db.Set(primaryKey, []byte(primaryAddr)) + if err != nil { + return fmt.Errorf("failed to save primary provider: %w", err) + } + err = db.Set(witnessesKey, []byte(witnessesAddrs)) + if err != nil { + return fmt.Errorf("failed to save witness providers: %w", err) + } + return nil + } + + cmd := &cobra.Command{ + Use: "light [chainID]", + Short: "Run a light client proxy server, verifying Tendermint rpc", + Long: `Run a light client proxy server, verifying Tendermint rpc. All calls that can be tracked back to a block header by a proof will be verified before passing them back to the caller. Other than @@ -38,6 +83,8 @@ Furthermore to the chainID, a fresh instance of a light client will need a primary RPC address and witness RPC addresses. To restart the node, thereafter only the chainID is required. +To restart the node, thereafter only the chainID is required. + When /abci_query is called, the Merkle key path format is: /{store name}/{key} @@ -45,167 +92,136 @@ When /abci_query is called, the Merkle key path format is: Please verify with your application that this Merkle key format is used (true for applications built w/ Cosmos SDK). `, - RunE: runProxy, - Args: cobra.ExactArgs(1), - Example: `light cosmoshub-3 -p http://52.57.29.196:26657 -w http://public-seed-node.cosmoshub.certus.one:26657 + RunE: func(cmd *cobra.Command, args []string) error { + chainID = args[0] + logger.Info("Creating client...", "chainID", chainID) + + var witnessesAddrs []string + if witnessAddrsJoined != "" { + witnessesAddrs = strings.Split(witnessAddrsJoined, ",") + } + + lightDB, err := dbm.NewGoLevelDB("light-client-db", dir) + if err != nil { + return fmt.Errorf("can't create a db: %w", err) + } + // create a prefixed db on the chainID + db := dbm.NewPrefixDB(lightDB, []byte(chainID)) + + if primaryAddr == "" { // check to see if we can start from an existing state + var err error + primaryAddr, witnessesAddrs, err = checkForExistingProviders(db) + if err != nil { + return fmt.Errorf("failed to retrieve primary or witness from db: %w", err) + } + if primaryAddr == "" { + return errors.New("no primary address was provided nor found. Please provide a primary (using -p)." + + " Run the command: tendermint light --help for more information") + } + } else { + err := saveProviders(db, primaryAddr, witnessAddrsJoined) + if err != nil { + logger.Error("Unable to save primary and or witness addresses", "err", err) + } + } + if primaryAddr == "" { // check to see if we can start from an existing state + var err error + primaryAddr, witnessesAddrs, err = checkForExistingProviders(db) + if err != nil { + return fmt.Errorf("failed to retrieve primary or witness from db: %w", err) + } + if primaryAddr == "" { + return errors.New( + "no primary address was provided nor found. Please provide a primary (using -p)." + + " Run the command: tendermint light --help for more information", + ) + } + } else { + err := saveProviders(db, primaryAddr, witnessAddrsJoined) + if err != nil { + logger.Error("Unable to save primary and or witness addresses", "err", err) + } + } + + options := []light.Option{ + light.Logger(logger), + light.DashCoreVerification(), + } + + rpcLogger := logger.With("module", dashcore.ModuleName) + dashCoreRPCClient, _ := dashcore.NewRPCClient(dashCoreRPCHost, dashCoreRPCUser, dashCoreRPCPass, rpcLogger) + + c, err := light.NewHTTPClient( + cmd.Context(), + chainID, + primaryAddr, + witnessesAddrs, + dbs.New(db), + dashCoreRPCClient, + options..., + ) + if err != nil { + return err + } + + cfg := rpcserver.DefaultConfig() + cfg.MaxBodyBytes = conf.RPC.MaxBodyBytes + cfg.MaxHeaderBytes = conf.RPC.MaxHeaderBytes + cfg.MaxOpenConnections = maxOpenConnections + // If necessary adjust global WriteTimeout to ensure it's greater than + // TimeoutBroadcastTxCommit. + // See https://github.com/tendermint/tendermint/issues/3435 + if cfg.WriteTimeout <= conf.RPC.TimeoutBroadcastTxCommit { + cfg.WriteTimeout = conf.RPC.TimeoutBroadcastTxCommit + 1*time.Second + } + + p, err := lproxy.NewProxy(c, listenAddr, primaryAddr, cfg, logger, lrpc.KeyPathFn(lrpc.DefaultMerkleKeyPathFn())) + if err != nil { + return err + } + + ctx, cancel := signal.NotifyContext(cmd.Context(), os.Interrupt, syscall.SIGTERM) + defer cancel() + + go func() { + <-ctx.Done() + p.Listener.Close() + }() + + logger.Info("Starting proxy...", "laddr", listenAddr) + if err := p.ListenAndServe(ctx); err != http.ErrServerClosed { + // Error starting or closing listener: + logger.Error("proxy ListenAndServe", "err", err) + } + + return nil + }, + Args: cobra.ExactArgs(1), + Example: `light cosmoshub-3 -p http://52.57.29.196:26657 -w http://public-seed-node.cosmoshub.certus.one:26657 --height 962118 --hash 28B97BE9F6DE51AC69F70E0B7BFD7E5C9CD1A595B7DC31AFF27C50D4948020CD`, -} - -var ( - listenAddr string - primaryAddr string - witnessAddrsJoined string - chainID string - dir string - maxOpenConnections int - - logLevel string - logFormat string - - primaryKey = []byte("primary") - witnessesKey = []byte("witnesses") - - dashCoreRPCHost string - dashCoreRPCUser string - dashCoreRPCPass string -) + } -func init() { - LightCmd.Flags().StringVar(&listenAddr, "laddr", "tcp://localhost:8888", + cmd.Flags().StringVar(&listenAddr, "laddr", "tcp://localhost:8888", "serve the proxy on the given address") - LightCmd.Flags().StringVarP(&primaryAddr, "primary", "p", "", + cmd.Flags().StringVarP(&primaryAddr, "primary", "p", "", "connect to a Tendermint node at this address") - LightCmd.Flags().StringVarP(&witnessAddrsJoined, "witnesses", "w", "", + cmd.Flags().StringVarP(&witnessAddrsJoined, "witnesses", "w", "", "tendermint nodes to cross-check the primary node, comma-separated") - LightCmd.Flags().StringVarP(&dir, "dir", "d", os.ExpandEnv(filepath.Join("$HOME", ".tendermint-light")), + cmd.Flags().StringVarP(&dir, "dir", "d", os.ExpandEnv(filepath.Join("$HOME", ".tendermint-light")), "specify the directory") - LightCmd.Flags().IntVar( + cmd.Flags().IntVar( &maxOpenConnections, "max-open-connections", 900, "maximum number of simultaneous connections (including WebSocket).") - LightCmd.Flags().StringVar(&logLevel, "log-level", log.LogLevelInfo, "The logging level (debug|info|warn|error|fatal)") - LightCmd.Flags().StringVar(&logFormat, "log-format", log.LogFormatPlain, "The logging format (text|json)") - LightCmd.Flags().StringVar(&dashCoreRPCHost, "dchost", "", + cmd.Flags().StringVar(&logLevel, "log-level", log.LogLevelInfo, "The logging level (debug|info|warn|error|fatal)") + cmd.Flags().StringVar(&logFormat, "log-format", log.LogFormatPlain, "The logging format (text|json)") + cmd.Flags().StringVar(&dashCoreRPCHost, "dchost", "", "host address of the Dash Core RPC node") - LightCmd.Flags().StringVar(&dashCoreRPCHost, "dcuser", "", + cmd.Flags().StringVar(&dashCoreRPCHost, "dcuser", "", "Dash Core RPC node user") - LightCmd.Flags().StringVar(&dashCoreRPCHost, "dcpass", "", + cmd.Flags().StringVar(&dashCoreRPCHost, "dcpass", "", "Dash Core RPC node password") -} - -func runProxy(cmd *cobra.Command, args []string) error { - logger, err := log.NewDefaultLogger(logFormat, logLevel, false) - if err != nil { - return err - } - - chainID = args[0] - logger.Info("Creating client...", "chainID", chainID) - - witnessesAddrs := []string{} - if witnessAddrsJoined != "" { - witnessesAddrs = strings.Split(witnessAddrsJoined, ",") - } - lightDB, err := dbm.NewGoLevelDB("light-client-db", dir) - if err != nil { - return fmt.Errorf("can't create a db: %w", err) - } - // create a prefixed db on the chainID - db := dbm.NewPrefixDB(lightDB, []byte(chainID)) - - if primaryAddr == "" { // check to see if we can start from an existing state - var err error - primaryAddr, witnessesAddrs, err = checkForExistingProviders(db) - if err != nil { - return fmt.Errorf("failed to retrieve primary or witness from db: %w", err) - } - if primaryAddr == "" { - return errors.New( - "no primary address was provided nor found. Please provide a primary (using -p)." + - " Run the command: tendermint light --help for more information", - ) - } - } else { - err := saveProviders(db, primaryAddr, witnessAddrsJoined) - if err != nil { - logger.Error("Unable to save primary and or witness addresses", "err", err) - } - } - - options := []light.Option{ - light.Logger(logger), - light.DashCoreVerification(), - } - - rpcLogger := logger.With("module", dashcore.ModuleName) - dashCoreRPCClient, _ := dashcore.NewRPCClient(dashCoreRPCHost, dashCoreRPCUser, dashCoreRPCPass, rpcLogger) - - c, err := light.NewHTTPClient( - context.Background(), - chainID, - primaryAddr, - witnessesAddrs, - dbs.New(db), - dashCoreRPCClient, - options..., - ) - if err != nil { - return err - } - - cfg := rpcserver.DefaultConfig() - cfg.MaxBodyBytes = config.RPC.MaxBodyBytes - cfg.MaxHeaderBytes = config.RPC.MaxHeaderBytes - cfg.MaxOpenConnections = maxOpenConnections - // If necessary adjust global WriteTimeout to ensure it's greater than - // TimeoutBroadcastTxCommit. - // See https://github.com/tendermint/tendermint/issues/3435 - if cfg.WriteTimeout <= config.RPC.TimeoutBroadcastTxCommit { - cfg.WriteTimeout = config.RPC.TimeoutBroadcastTxCommit + 1*time.Second - } - - p, err := lproxy.NewProxy(c, listenAddr, primaryAddr, cfg, logger, lrpc.KeyPathFn(lrpc.DefaultMerkleKeyPathFn())) - if err != nil { - return err - } - - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - p.Listener.Close() - }) - - logger.Info("Starting proxy...", "laddr", listenAddr) - if err := p.ListenAndServe(); err != http.ErrServerClosed { - // Error starting or closing listener: - logger.Error("proxy ListenAndServe", "err", err) - } - - return nil -} - -func checkForExistingProviders(db dbm.DB) (string, []string, error) { - primaryBytes, err := db.Get(primaryKey) - if err != nil { - return "", []string{""}, err - } - witnessesBytes, err := db.Get(witnessesKey) - if err != nil { - return "", []string{""}, err - } - witnessesAddrs := strings.Split(string(witnessesBytes), ",") - return string(primaryBytes), witnessesAddrs, nil -} - -func saveProviders(db dbm.DB, primaryAddr, witnessesAddrs string) error { - err := db.Set(primaryKey, []byte(primaryAddr)) - if err != nil { - return fmt.Errorf("failed to save primary provider: %w", err) - } - err = db.Set(witnessesKey, []byte(witnessesAddrs)) - if err != nil { - return fmt.Errorf("failed to save witness providers: %w", err) - } - return nil + return cmd } diff --git a/cmd/tenderdash/commands/probe_upnp.go b/cmd/tenderdash/commands/probe_upnp.go deleted file mode 100644 index 4c71e099a4..0000000000 --- a/cmd/tenderdash/commands/probe_upnp.go +++ /dev/null @@ -1,32 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/tendermint/tendermint/internal/p2p/upnp" - tmjson "github.com/tendermint/tendermint/libs/json" -) - -// ProbeUpnpCmd adds capabilities to test the UPnP functionality. -var ProbeUpnpCmd = &cobra.Command{ - Use: "probe-upnp", - Short: "Test UPnP functionality", - RunE: probeUpnp, -} - -func probeUpnp(cmd *cobra.Command, args []string) error { - capabilities, err := upnp.Probe(logger) - if err != nil { - fmt.Println("Probe failed: ", err) - } else { - fmt.Println("Probe success!") - jsonBytes, err := tmjson.Marshal(capabilities) - if err != nil { - return err - } - fmt.Println(string(jsonBytes)) - } - return nil -} diff --git a/cmd/tenderdash/commands/reindex_event.go b/cmd/tenderdash/commands/reindex_event.go index bd95779635..6cec32738a 100644 --- a/cmd/tenderdash/commands/reindex_event.go +++ b/cmd/tenderdash/commands/reindex_event.go @@ -17,6 +17,7 @@ import ( "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" "github.com/tendermint/tendermint/internal/store" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" @@ -26,59 +27,68 @@ const ( reindexFailed = "event re-index failed: " ) -// ReIndexEventCmd allows re-index the event by given block height interval -var ReIndexEventCmd = &cobra.Command{ - Use: "reindex-event", - Short: "reindex events to the event store backends", - Long: ` +// MakeReindexEventCommand constructs a command to re-index events in a block height interval. +func MakeReindexEventCommand(conf *tmcfg.Config, logger log.Logger) *cobra.Command { + var ( + startHeight int64 + endHeight int64 + ) + + cmd := &cobra.Command{ + Use: "reindex-event", + Short: "reindex events to the event store backends", + Long: ` reindex-event is an offline tooling to re-index block and tx events to the eventsinks, -you can run this command when the event store backend dropped/disconnected or you want to -replace the backend. The default start-height is 0, meaning the tooling will start -reindex from the base block height(inclusive); and the default end-height is 0, meaning +you can run this command when the event store backend dropped/disconnected or you want to +replace the backend. The default start-height is 0, meaning the tooling will start +reindex from the base block height(inclusive); and the default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omit either or both arguments. `, - Example: ` + Example: ` tendermint reindex-event tendermint reindex-event --start-height 2 tendermint reindex-event --end-height 10 tendermint reindex-event --start-height 2 --end-height 10 `, - Run: func(cmd *cobra.Command, args []string) { - bs, ss, err := loadStateAndBlockStore(config) - if err != nil { - fmt.Println(reindexFailed, err) - return - } - - if err := checkValidHeight(bs); err != nil { - fmt.Println(reindexFailed, err) - return - } + RunE: func(cmd *cobra.Command, args []string) error { + bs, ss, err := loadStateAndBlockStore(conf) + if err != nil { + return fmt.Errorf("%s: %w", reindexFailed, err) + } - es, err := loadEventSinks(config) - if err != nil { - fmt.Println(reindexFailed, err) - return - } + cvhArgs := checkValidHeightArgs{ + startHeight: startHeight, + endHeight: endHeight, + } + if err := checkValidHeight(bs, cvhArgs); err != nil { + return fmt.Errorf("%s: %w", reindexFailed, err) + } - if err = eventReIndex(cmd, es, bs, ss); err != nil { - fmt.Println(reindexFailed, err) - return - } + es, err := loadEventSinks(conf) + if err != nil { + return fmt.Errorf("%s: %w", reindexFailed, err) + } - fmt.Println("event re-index finished") - }, -} + riArgs := eventReIndexArgs{ + startHeight: startHeight, + endHeight: endHeight, + sinks: es, + blockStore: bs, + stateStore: ss, + } + if err := eventReIndex(cmd, riArgs); err != nil { + return fmt.Errorf("%s: %w", reindexFailed, err) + } -var ( - startHeight int64 - endHeight int64 -) + logger.Info("event re-index finished") + return nil + }, + } -func init() { - ReIndexEventCmd.Flags().Int64Var(&startHeight, "start-height", 0, "the block height would like to start for re-index") - ReIndexEventCmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index") + cmd.Flags().Int64Var(&startHeight, "start-height", 0, "the block height would like to start for re-index") + cmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index") + return cmd } func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) { @@ -109,7 +119,7 @@ func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) { if conn == "" { return nil, errors.New("the psql connection settings cannot be empty") } - es, err := psql.NewEventSink(conn, chainID) + es, err := psql.NewEventSink(conn, cfg.ChainID()) if err != nil { return nil, err } @@ -159,52 +169,58 @@ func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, return blockStore, stateStore, nil } -func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStore, ss state.Store) error { +type eventReIndexArgs struct { + startHeight int64 + endHeight int64 + sinks []indexer.EventSink + blockStore state.BlockStore + stateStore state.Store +} +func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error { var bar progressbar.Bar - bar.NewOption(startHeight-1, endHeight) + bar.NewOption(args.startHeight-1, args.endHeight) fmt.Println("start re-indexing events:") defer bar.Finish() - for i := startHeight; i <= endHeight; i++ { + for i := args.startHeight; i <= args.endHeight; i++ { select { case <-cmd.Context().Done(): return fmt.Errorf("event re-index terminated at height %d: %w", i, cmd.Context().Err()) default: - b := bs.LoadBlock(i) + b := args.blockStore.LoadBlock(i) if b == nil { return fmt.Errorf("not able to load block at height %d from the blockstore", i) } - r, err := ss.LoadABCIResponses(i) + r, err := args.stateStore.LoadABCIResponses(i) if err != nil { return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i) } e := types.EventDataNewBlockHeader{ - Header: b.Header, - NumTxs: int64(len(b.Txs)), - ResultBeginBlock: *r.BeginBlock, - ResultEndBlock: *r.EndBlock, + Header: b.Header, + NumTxs: int64(len(b.Txs)), + ResultFinalizeBlock: *r.FinalizeBlock, } var batch *indexer.Batch if e.NumTxs > 0 { batch = indexer.NewBatch(e.NumTxs) - for i, tx := range b.Data.Txs { + for i := range b.Data.Txs { tr := abcitypes.TxResult{ Height: b.Height, Index: uint32(i), - Tx: tx, - Result: *(r.DeliverTxs[i]), + Tx: b.Data.Txs[i], + Result: *(r.FinalizeBlock.TxResults[i]), } _ = batch.Add(&tr) } } - for _, sink := range es { + for _, sink := range args.sinks { if err := sink.IndexBlockEvents(e); err != nil { return fmt.Errorf("block event re-index at height %d failed: %w", i, err) } @@ -223,40 +239,45 @@ func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStor return nil } -func checkValidHeight(bs state.BlockStore) error { +type checkValidHeightArgs struct { + startHeight int64 + endHeight int64 +} + +func checkValidHeight(bs state.BlockStore, args checkValidHeightArgs) error { base := bs.Base() - if startHeight == 0 { - startHeight = base + if args.startHeight == 0 { + args.startHeight = base fmt.Printf("set the start block height to the base height of the blockstore %d \n", base) } - if startHeight < base { + if args.startHeight < base { return fmt.Errorf("%s (requested start height: %d, base height: %d)", - coretypes.ErrHeightNotAvailable, startHeight, base) + coretypes.ErrHeightNotAvailable, args.startHeight, base) } height := bs.Height() - if startHeight > height { + if args.startHeight > height { return fmt.Errorf( - "%s (requested start height: %d, store height: %d)", coretypes.ErrHeightNotAvailable, startHeight, height) + "%s (requested start height: %d, store height: %d)", coretypes.ErrHeightNotAvailable, args.startHeight, height) } - if endHeight == 0 || endHeight > height { - endHeight = height + if args.endHeight == 0 || args.endHeight > height { + args.endHeight = height fmt.Printf("set the end block height to the latest height of the blockstore %d \n", height) } - if endHeight < base { + if args.endHeight < base { return fmt.Errorf( - "%s (requested end height: %d, base height: %d)", coretypes.ErrHeightNotAvailable, endHeight, base) + "%s (requested end height: %d, base height: %d)", coretypes.ErrHeightNotAvailable, args.endHeight, base) } - if endHeight < startHeight { + if args.endHeight < args.startHeight { return fmt.Errorf( "%s (requested the end height: %d is less than the start height: %d)", - coretypes.ErrInvalidRequest, startHeight, endHeight) + coretypes.ErrInvalidRequest, args.startHeight, args.endHeight) } return nil diff --git a/cmd/tenderdash/commands/reindex_event_test.go b/cmd/tenderdash/commands/reindex_event_test.go index 2008251bc1..f60fe1b04e 100644 --- a/cmd/tenderdash/commands/reindex_event_test.go +++ b/cmd/tenderdash/commands/reindex_event_test.go @@ -8,14 +8,15 @@ import ( "github.com/spf13/cobra" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" abcitypes "github.com/tendermint/tendermint/abci/types" - tmcfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/state/mocks" + "github.com/tendermint/tendermint/libs/log" prototmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" _ "github.com/lib/pq" // for the psql sink ) @@ -25,13 +26,15 @@ const ( base int64 = 2 ) -func setupReIndexEventCmd() *cobra.Command { +func setupReIndexEventCmd(ctx context.Context, conf *config.Config, logger log.Logger) *cobra.Command { + cmd := MakeReindexEventCommand(conf, logger) + reIndexEventCmd := &cobra.Command{ - Use: ReIndexEventCmd.Use, + Use: cmd.Use, Run: func(cmd *cobra.Command, args []string) {}, } - _ = reIndexEventCmd.ExecuteContext(context.Background()) + _ = reIndexEventCmd.ExecuteContext(ctx) return reIndexEventCmd } @@ -68,10 +71,7 @@ func TestReIndexEventCheckHeight(t *testing.T) { } for _, tc := range testCases { - startHeight = tc.startHeight - endHeight = tc.endHeight - - err := checkValidHeight(mockBlockStore) + err := checkValidHeight(mockBlockStore, checkValidHeightArgs{startHeight: tc.startHeight, endHeight: tc.endHeight}) if tc.validHeight { require.NoError(t, err) } else { @@ -97,7 +97,7 @@ func TestLoadEventSink(t *testing.T) { } for _, tc := range testCases { - cfg := tmcfg.TestConfig() + cfg := config.TestConfig() cfg.TxIndex.Indexer = tc.sinks cfg.TxIndex.PsqlConn = tc.connURL _, err := loadEventSinks(cfg) @@ -110,7 +110,7 @@ func TestLoadEventSink(t *testing.T) { } func TestLoadBlockStore(t *testing.T) { - testCfg, err := tmcfg.ResetTestRoot(t.Name()) + testCfg, err := config.ResetTestRoot(t.TempDir(), t.Name()) require.NoError(t, err) testCfg.DBBackend = "goleveldb" _, _, err = loadStateAndBlockStore(testCfg) @@ -152,11 +152,11 @@ func TestReIndexEvent(t *testing.T) { On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once(). On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil) - dtx := abcitypes.ResponseDeliverTx{} + dtx := abcitypes.ExecTxResult{} abciResp := &prototmstate.ABCIResponses{ - DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx}, - EndBlock: &abcitypes.ResponseEndBlock{}, - BeginBlock: &abcitypes.ResponseBeginBlock{}, + FinalizeBlock: &abcitypes.ResponseFinalizeBlock{ + TxResults: []*abcitypes.ExecTxResult{&dtx}, + }, } mockStateStore. @@ -177,11 +177,22 @@ func TestReIndexEvent(t *testing.T) { {height, height, false}, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewNopLogger() + conf := config.DefaultConfig() + for _, tc := range testCases { - startHeight = tc.startHeight - endHeight = tc.endHeight + err := eventReIndex( + setupReIndexEventCmd(ctx, conf, logger), + eventReIndexArgs{ + sinks: []indexer.EventSink{mockEventSink}, + blockStore: mockBlockStore, + stateStore: mockStateStore, + startHeight: tc.startHeight, + endHeight: tc.endHeight, + }) - err := eventReIndex(setupReIndexEventCmd(), []indexer.EventSink{mockEventSink}, mockBlockStore, mockStateStore) if tc.reIndexErr { require.Error(t, err) } else { diff --git a/cmd/tenderdash/commands/replay.go b/cmd/tenderdash/commands/replay.go index 023921d9e4..fb6f19e55d 100644 --- a/cmd/tenderdash/commands/replay.go +++ b/cmd/tenderdash/commands/replay.go @@ -2,25 +2,30 @@ package commands import ( "github.com/spf13/cobra" + + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/libs/log" ) -// ReplayCmd allows replaying of messages from the WAL. -var ReplayCmd = &cobra.Command{ - Use: "replay", - Short: "Replay messages from WAL", - Run: func(cmd *cobra.Command, args []string) { - consensus.RunReplayFile(config.BaseConfig, config.Consensus, false) - }, +// MakeReplayCommand constructs a command to replay messages from the WAL into consensus. +func MakeReplayCommand(conf *config.Config, logger log.Logger) *cobra.Command { + return &cobra.Command{ + Use: "replay", + Short: "Replay messages from WAL", + RunE: func(cmd *cobra.Command, args []string) error { + return consensus.RunReplayFile(cmd.Context(), logger, conf.BaseConfig, conf.Consensus, false) + }, + } } -// ReplayConsoleCmd allows replaying of messages from the WAL in a -// console. -var ReplayConsoleCmd = &cobra.Command{ - Use: "replay-console", - Short: "Replay messages from WAL in a console", - Run: func(cmd *cobra.Command, args []string) { - consensus.RunReplayFile(config.BaseConfig, config.Consensus, true) - }, - PreRun: deprecateSnakeCase, +// MakeReplayConsoleCommand constructs a command to replay WAL messages to stdout. +func MakeReplayConsoleCommand(conf *config.Config, logger log.Logger) *cobra.Command { + return &cobra.Command{ + Use: "replay-console", + Short: "Replay messages from WAL in a console", + RunE: func(cmd *cobra.Command, args []string) error { + return consensus.RunReplayFile(cmd.Context(), logger, conf.BaseConfig, conf.Consensus, true) + }, + } } diff --git a/cmd/tenderdash/commands/reset.go b/cmd/tenderdash/commands/reset.go new file mode 100644 index 0000000000..38beffb629 --- /dev/null +++ b/cmd/tenderdash/commands/reset.go @@ -0,0 +1,179 @@ +package commands + +import ( + "os" + "path/filepath" + + "github.com/spf13/cobra" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" + tmos "github.com/tendermint/tendermint/libs/os" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/types" +) + +// MakeResetCommand constructs a command that removes the database of +// the specified Tendermint core instance. +func MakeResetCommand(conf *config.Config, logger log.Logger) *cobra.Command { + var keyType string + + resetCmd := &cobra.Command{ + Use: "reset", + Short: "Set of commands to conveniently reset tendermint related data", + } + + resetBlocksCmd := &cobra.Command{ + Use: "blockchain", + Short: "Removes all blocks, state, transactions and evidence stored by the tendermint node", + RunE: func(cmd *cobra.Command, args []string) error { + return ResetState(conf.DBDir(), logger) + }, + } + + resetPeersCmd := &cobra.Command{ + Use: "peers", + Short: "Removes all peer addresses", + RunE: func(cmd *cobra.Command, args []string) error { + return ResetPeerStore(conf.DBDir()) + }, + } + + resetSignerCmd := &cobra.Command{ + Use: "unsafe-signer", + Short: "esets private validator signer state", + Long: `Resets private validator signer state. +Only use in testing. This can cause the node to double sign`, + RunE: func(cmd *cobra.Command, args []string) error { + return ResetFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile(), logger, keyType) + }, + } + + resetAllCmd := &cobra.Command{ + Use: "unsafe-all", + Short: "Removes all tendermint data including signing state", + Long: `Removes all tendermint data including signing state. +Only use in testing. This can cause the node to double sign`, + RunE: func(cmd *cobra.Command, args []string) error { + return ResetAll(conf.DBDir(), conf.PrivValidator.KeyFile(), + conf.PrivValidator.StateFile(), logger, keyType) + }, + } + + resetSignerCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + "Signer key type. Options: ed25519, secp256k1") + + resetAllCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + "Signer key type. Options: ed25519, secp256k1") + + resetCmd.AddCommand(resetBlocksCmd) + resetCmd.AddCommand(resetPeersCmd) + resetCmd.AddCommand(resetSignerCmd) + resetCmd.AddCommand(resetAllCmd) + + return resetCmd +} + +// ResetAll removes address book files plus all data, and resets the privValdiator data. +// Exported for extenal CLI usage +// XXX: this is unsafe and should only suitable for testnets. +func ResetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error { + if err := os.RemoveAll(dbDir); err == nil { + logger.Info("Removed all blockchain history", "dir", dbDir) + } else { + logger.Error("error removing all blockchain history", "dir", dbDir, "err", err) + } + + if err := tmos.EnsureDir(dbDir, 0700); err != nil { + logger.Error("unable to recreate dbDir", "err", err) + } + + // recreate the dbDir since the privVal state needs to live there + return ResetFilePV(privValKeyFile, privValStateFile, logger, keyType) +} + +// ResetState removes all blocks, tendermint state, indexed transactions and evidence. +func ResetState(dbDir string, logger log.Logger) error { + blockdb := filepath.Join(dbDir, "blockstore.db") + state := filepath.Join(dbDir, "state.db") + wal := filepath.Join(dbDir, "cs.wal") + evidence := filepath.Join(dbDir, "evidence.db") + txIndex := filepath.Join(dbDir, "tx_index.db") + + if tmos.FileExists(blockdb) { + if err := os.RemoveAll(blockdb); err == nil { + logger.Info("Removed all blockstore.db", "dir", blockdb) + } else { + logger.Error("error removing all blockstore.db", "dir", blockdb, "err", err) + } + } + + if tmos.FileExists(state) { + if err := os.RemoveAll(state); err == nil { + logger.Info("Removed all state.db", "dir", state) + } else { + logger.Error("error removing all state.db", "dir", state, "err", err) + } + } + + if tmos.FileExists(wal) { + if err := os.RemoveAll(wal); err == nil { + logger.Info("Removed all cs.wal", "dir", wal) + } else { + logger.Error("error removing all cs.wal", "dir", wal, "err", err) + } + } + + if tmos.FileExists(evidence) { + if err := os.RemoveAll(evidence); err == nil { + logger.Info("Removed all evidence.db", "dir", evidence) + } else { + logger.Error("error removing all evidence.db", "dir", evidence, "err", err) + } + } + + if tmos.FileExists(txIndex) { + if err := os.RemoveAll(txIndex); err == nil { + logger.Info("Removed tx_index.db", "dir", txIndex) + } else { + logger.Error("error removing tx_index.db", "dir", txIndex, "err", err) + } + } + + return tmos.EnsureDir(dbDir, 0700) +} + +// ResetFilePV loads the file private validator and resets the watermark to 0. If used on an existing network, +// this can cause the node to double sign. +// XXX: this is unsafe and should only suitable for testnets. +func ResetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error { + if _, err := os.Stat(privValKeyFile); err == nil { + pv, err := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile) + if err != nil { + return err + } + if err := pv.Reset(); err != nil { + return err + } + logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile, + "stateFile", privValStateFile) + } else { + pv := privval.GenFilePV(privValKeyFile, privValStateFile) + if err := pv.Save(); err != nil { + return err + } + logger.Info("Generated private validator file", "keyFile", privValKeyFile, + "stateFile", privValStateFile) + } + return nil +} + +// ResetPeerStore removes the peer store containing all information used by the tendermint networking layer +// In the case of a reset, new peers will need to be set either via the config or through the discovery mechanism +func ResetPeerStore(dbDir string) error { + peerstore := filepath.Join(dbDir, "peerstore.db") + if tmos.FileExists(peerstore) { + return os.RemoveAll(peerstore) + } + return nil +} diff --git a/cmd/tenderdash/commands/reset_priv_validator.go b/cmd/tenderdash/commands/reset_priv_validator.go deleted file mode 100644 index 06e18a19d2..0000000000 --- a/cmd/tenderdash/commands/reset_priv_validator.go +++ /dev/null @@ -1,94 +0,0 @@ -package commands - -import ( - "os" - - "github.com/spf13/cobra" - - "github.com/tendermint/tendermint/libs/log" - tmos "github.com/tendermint/tendermint/libs/os" - "github.com/tendermint/tendermint/privval" -) - -// ResetAllCmd removes the database of this Tendermint core -// instance. -var ResetAllCmd = &cobra.Command{ - Use: "unsafe-reset-all", - Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state", - RunE: resetAll, -} - -var keepAddrBook bool - -func init() { - ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "keep the address book intact") -} - -// ResetPrivValidatorCmd resets the private validator files. -var ResetPrivValidatorCmd = &cobra.Command{ - Use: "unsafe-reset-priv-validator", - Short: "(unsafe) Reset this node's validator to genesis state", - RunE: resetPrivValidator, -} - -// XXX: this is totally unsafe. -// it's only suitable for testnets. -func resetAll(cmd *cobra.Command, args []string) error { - return ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidator.KeyFile(), - config.PrivValidator.StateFile(), logger) -} - -// XXX: this is totally unsafe. -// it's only suitable for testnets. -func resetPrivValidator(cmd *cobra.Command, args []string) error { - return resetFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile(), logger) -} - -// ResetAll removes address book files plus all data, and resets the privValdiator data. -// Exported so other CLI tools can use it. -func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) error { - if keepAddrBook { - logger.Info("The address book remains intact") - } else { - removeAddrBook(addrBookFile, logger) - } - if err := os.RemoveAll(dbDir); err == nil { - logger.Info("Removed all blockchain history", "dir", dbDir) - } else { - logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err) - } - // recreate the dbDir since the privVal state needs to live there - if err := tmos.EnsureDir(dbDir, 0700); err != nil { - logger.Error("unable to recreate dbDir", "err", err) - } - return resetFilePV(privValKeyFile, privValStateFile, logger) -} - -func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) error { - if _, err := os.Stat(privValKeyFile); err == nil { - pv, err := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile) - if err != nil { - return err - } - pv.Reset() - logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile, - "stateFile", privValStateFile) - } else { - pv := privval.GenFilePV(privValKeyFile, privValStateFile) - if err != nil { - return err - } - pv.Save() - logger.Info("Generated private validator file", "keyFile", privValKeyFile, - "stateFile", privValStateFile) - } - return nil -} - -func removeAddrBook(addrBookFile string, logger log.Logger) { - if err := os.Remove(addrBookFile); err == nil { - logger.Info("Removed existing address book", "file", addrBookFile) - } else if !os.IsNotExist(err) { - logger.Info("Error removing address book", "file", addrBookFile, "err", err) - } -} diff --git a/cmd/tenderdash/commands/reset_test.go b/cmd/tenderdash/commands/reset_test.go new file mode 100644 index 0000000000..fd3963e885 --- /dev/null +++ b/cmd/tenderdash/commands/reset_test.go @@ -0,0 +1,62 @@ +package commands + +import ( + "context" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/types" +) + +func Test_ResetAll(t *testing.T) { + config := cfg.TestConfig() + dir := t.TempDir() + config.SetRoot(dir) + logger := log.NewNopLogger() + cfg.EnsureRoot(dir) + require.NoError(t, initFilesWithConfig(context.Background(), nodeConfig{Config: config}, logger)) + pv, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) + require.NoError(t, err) + pv.LastSignState.Height = 10 + require.NoError(t, pv.Save()) + require.NoError(t, ResetAll(config.DBDir(), config.PrivValidator.KeyFile(), + config.PrivValidator.StateFile(), logger, types.ABCIPubKeyTypeEd25519)) + require.DirExists(t, config.DBDir()) + require.NoFileExists(t, filepath.Join(config.DBDir(), "block.db")) + require.NoFileExists(t, filepath.Join(config.DBDir(), "state.db")) + require.NoFileExists(t, filepath.Join(config.DBDir(), "evidence.db")) + require.NoFileExists(t, filepath.Join(config.DBDir(), "tx_index.db")) + require.FileExists(t, config.PrivValidator.StateFile()) + pv, err = privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) + require.NoError(t, err) + require.Equal(t, int64(0), pv.LastSignState.Height) +} + +func Test_ResetState(t *testing.T) { + config := cfg.TestConfig() + dir := t.TempDir() + config.SetRoot(dir) + logger := log.NewNopLogger() + cfg.EnsureRoot(dir) + require.NoError(t, initFilesWithConfig(context.Background(), nodeConfig{Config: config}, logger)) + pv, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) + require.NoError(t, err) + pv.LastSignState.Height = 10 + require.NoError(t, pv.Save()) + require.NoError(t, ResetState(config.DBDir(), logger)) + require.DirExists(t, config.DBDir()) + require.NoFileExists(t, filepath.Join(config.DBDir(), "block.db")) + require.NoFileExists(t, filepath.Join(config.DBDir(), "state.db")) + require.NoFileExists(t, filepath.Join(config.DBDir(), "evidence.db")) + require.NoFileExists(t, filepath.Join(config.DBDir(), "tx_index.db")) + require.FileExists(t, config.PrivValidator.StateFile()) + pv, err = privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) + require.NoError(t, err) + // private validator state should still be in tact. + require.Equal(t, int64(10), pv.LastSignState.Height) +} diff --git a/cmd/tenderdash/commands/rollback.go b/cmd/tenderdash/commands/rollback.go index 8391ee506a..a604341783 100644 --- a/cmd/tenderdash/commands/rollback.go +++ b/cmd/tenderdash/commands/rollback.go @@ -5,14 +5,15 @@ import ( "github.com/spf13/cobra" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/state" ) -var RollbackStateCmd = &cobra.Command{ - Use: "rollback", - Short: "rollback tendermint state by one height", - Long: ` +func MakeRollbackStateCommand(conf *config.Config) *cobra.Command { + return &cobra.Command{ + Use: "rollback", + Short: "rollback tendermint state by one height", + Long: ` A state rollback is performed to recover from an incorrect application state transition, when Tendermint has persisted an incorrect app hash and is thus unable to make progress. Rollback overwrites a state at height n with the state at height n - 1. @@ -20,21 +21,23 @@ The application should also roll back to height n - 1. No blocks are removed, so restarting Tendermint the transactions in block n will be re-executed against the application. `, - RunE: func(cmd *cobra.Command, args []string) error { - height, hash, err := RollbackState(config) - if err != nil { - return fmt.Errorf("failed to rollback state: %w", err) - } - - fmt.Printf("Rolled back state to height %d and hash %X", height, hash) - return nil - }, + RunE: func(cmd *cobra.Command, args []string) error { + height, hash, err := RollbackState(conf) + if err != nil { + return fmt.Errorf("failed to rollback state: %w", err) + } + + fmt.Printf("Rolled back state to height %d and hash %X", height, hash) + return nil + }, + } + } // RollbackState takes the state at the current height n and overwrites it with the state // at height n - 1. Note state here refers to tendermint state not application state. // Returns the latest state height and app hash alongside an error if there was one. -func RollbackState(config *cfg.Config) (int64, []byte, error) { +func RollbackState(config *config.Config) (int64, []byte, error) { // use the parsed config to load the block and state store blockStore, stateStore, err := loadStateAndBlockStore(config) if err != nil { diff --git a/cmd/tenderdash/commands/root.go b/cmd/tenderdash/commands/root.go index 02f260de57..fdee638bcb 100644 --- a/cmd/tenderdash/commands/root.go +++ b/cmd/tenderdash/commands/root.go @@ -2,73 +2,68 @@ package commands import ( "fmt" - "strings" + "os" + "path/filepath" "time" "github.com/spf13/cobra" "github.com/spf13/viper" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/cli" "github.com/tendermint/tendermint/libs/log" ) -var ( - config = cfg.DefaultConfig() - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) - ctxTimeout = 4 * time.Second -) - -func init() { - registerFlagsRootCmd(RootCmd) -} - -func registerFlagsRootCmd(cmd *cobra.Command) { - cmd.PersistentFlags().String("log-level", config.LogLevel, "log level") -} +const ctxTimeout = 4 * time.Second // ParseConfig retrieves the default environment configuration, // sets up the Tendermint root and ensures that the root exists -func ParseConfig() (*cfg.Config, error) { - conf := cfg.DefaultConfig() - err := viper.Unmarshal(conf) - if err != nil { +func ParseConfig(conf *config.Config) (*config.Config, error) { + if err := viper.Unmarshal(conf); err != nil { return nil, err } + conf.SetRoot(conf.RootDir) - cfg.EnsureRoot(conf.RootDir) + if err := conf.ValidateBasic(); err != nil { - return nil, fmt.Errorf("error in config file: %v", err) + return nil, fmt.Errorf("error in config file: %w", err) } return conf, nil } -// RootCmd is the root command for Tendermint core. -var RootCmd = &cobra.Command{ - Use: "tendermint", - Short: "BFT state machine replication for applications in any programming languages", - PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { - if cmd.Name() == VersionCmd.Name() { - return nil - } +// RootCommand constructs the root command-line entry point for Tendermint core. +func RootCommand(conf *config.Config, logger log.Logger) *cobra.Command { + cmd := &cobra.Command{ + Use: "tendermint", + Short: "BFT state machine replication for applications in any programming languages", + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if cmd.Name() == VersionCmd.Name() { + return nil + } - config, err = ParseConfig() - if err != nil { - return err - } + if err := cli.BindFlagsLoadViper(cmd, args); err != nil { + return err + } - logger, err = log.NewDefaultLogger(config.LogFormat, config.LogLevel, false) - if err != nil { - return err - } + pconf, err := ParseConfig(conf) + if err != nil { + return err + } + *conf = *pconf + config.EnsureRoot(conf.RootDir) + if err := log.OverrideWithNewLogger(logger, conf.LogFormat, conf.LogLevel); err != nil { + return err + } + if warning := pconf.DeprecatedFieldWarning(); warning != nil { + logger.Info("WARNING", "deprecated field warning", warning) + } - logger = logger.With("module", "main") - return nil - }, -} - -// deprecateSnakeCase is a util function for 0.34.1. Should be removed in 0.35 -func deprecateSnakeCase(cmd *cobra.Command, args []string) { - if strings.Contains(cmd.CalledAs(), "_") { - fmt.Println("Deprecated: snake_case commands will be replaced by hyphen-case commands in the next major release") + return nil + }, } + cmd.PersistentFlags().StringP(cli.HomeFlag, "", os.ExpandEnv(filepath.Join("$HOME", config.DefaultTendermintDir)), "directory for config and data") + cmd.PersistentFlags().Bool(cli.TraceFlag, false, "print out full stack trace on errors") + cmd.PersistentFlags().String("log-level", conf.LogLevel, "log level") + cobra.OnInitialize(func() { cli.InitEnv("TM") }) + return cmd } diff --git a/cmd/tenderdash/commands/root_test.go b/cmd/tenderdash/commands/root_test.go index cd4bc9f5f7..a4f4fb08d5 100644 --- a/cmd/tenderdash/commands/root_test.go +++ b/cmd/tenderdash/commands/root_test.go @@ -1,11 +1,10 @@ package commands import ( + "context" "fmt" - "io/ioutil" "os" "path/filepath" - "strconv" "testing" "github.com/spf13/cobra" @@ -15,47 +14,54 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" + "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" ) -// clearConfig clears env vars, the given root dir, and resets viper. -func clearConfig(dir string) { - if err := os.Unsetenv("TMHOME"); err != nil { - panic(err) - } - if err := os.Unsetenv("TM_HOME"); err != nil { - panic(err) +// writeConfigVals writes a toml file with the given values. +// It returns an error if writing was impossible. +func writeConfigVals(dir string, vals map[string]string) error { + data := "" + for k, v := range vals { + data += fmt.Sprintf("%s = \"%s\"\n", k, v) } + cfile := filepath.Join(dir, "config.toml") + return os.WriteFile(cfile, []byte(data), 0600) +} + +// clearConfig clears env vars, the given root dir, and resets viper. +func clearConfig(t *testing.T, dir string) *cfg.Config { + t.Helper() + require.NoError(t, os.Unsetenv("TMHOME")) + require.NoError(t, os.Unsetenv("TM_HOME")) + require.NoError(t, os.RemoveAll(dir)) - if err := os.RemoveAll(dir); err != nil { - panic(err) - } viper.Reset() - config = cfg.DefaultConfig() + conf := cfg.DefaultConfig() + conf.RootDir = dir + return conf } // prepare new rootCmd -func testRootCmd() *cobra.Command { - rootCmd := &cobra.Command{ - Use: RootCmd.Use, - PersistentPreRunE: RootCmd.PersistentPreRunE, - Run: func(cmd *cobra.Command, args []string) {}, - } - registerFlagsRootCmd(rootCmd) +func testRootCmd(conf *cfg.Config) *cobra.Command { + logger := log.NewNopLogger() + cmd := RootCommand(conf, logger) + cmd.RunE = func(cmd *cobra.Command, args []string) error { return nil } + var l string - rootCmd.PersistentFlags().String("log", l, "Log") - return rootCmd + cmd.PersistentFlags().String("log", l, "Log") + return cmd } -func testSetup(rootDir string, args []string, env map[string]string) error { - clearConfig(rootDir) +func testSetup(ctx context.Context, t *testing.T, conf *cfg.Config, args []string, env map[string]string) error { + t.Helper() - rootCmd := testRootCmd() - cmd := cli.PrepareBaseCmd(rootCmd, "TM", rootDir) + cmd := testRootCmd(conf) + viper.Set(cli.HomeFlag, conf.RootDir) // run with the args and env - args = append([]string{rootCmd.Use}, args...) - return cli.RunWithArgs(cmd, args, env) + args = append([]string{cmd.Use}, args...) + return cli.RunWithArgs(ctx, cmd, args, env) } func TestRootHome(t *testing.T) { @@ -71,23 +77,29 @@ func TestRootHome(t *testing.T) { {nil, map[string]string{"TMHOME": newRoot}, newRoot}, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for i, tc := range cases { - idxString := strconv.Itoa(i) + t.Run(fmt.Sprint(i), func(t *testing.T) { + conf := clearConfig(t, tc.root) - err := testSetup(defaultRoot, tc.args, tc.env) - require.Nil(t, err, idxString) + err := testSetup(ctx, t, conf, tc.args, tc.env) + require.NoError(t, err) - assert.Equal(t, tc.root, config.RootDir, idxString) - assert.Equal(t, tc.root, config.P2P.RootDir, idxString) - assert.Equal(t, tc.root, config.Consensus.RootDir, idxString) - assert.Equal(t, tc.root, config.Mempool.RootDir, idxString) + require.Equal(t, tc.root, conf.RootDir) + require.Equal(t, tc.root, conf.P2P.RootDir) + require.Equal(t, tc.root, conf.Consensus.RootDir) + require.Equal(t, tc.root, conf.Mempool.RootDir) + }) } } func TestRootFlagsEnv(t *testing.T) { - // defaults defaults := cfg.DefaultConfig() + defaultDir := t.TempDir() + defaultLogLvl := defaults.LogLevel cases := []struct { @@ -102,18 +114,25 @@ func TestRootFlagsEnv(t *testing.T) { {nil, map[string]string{"TM_LOG_LEVEL": "debug"}, "debug"}, // right env } - defaultRoot := t.TempDir() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for i, tc := range cases { - idxString := strconv.Itoa(i) + t.Run(fmt.Sprint(i), func(t *testing.T) { + conf := clearConfig(t, defaultDir) - err := testSetup(defaultRoot, tc.args, tc.env) - require.Nil(t, err, idxString) + err := testSetup(ctx, t, conf, tc.args, tc.env) + require.NoError(t, err) + + assert.Equal(t, tc.logLevel, conf.LogLevel) + }) - assert.Equal(t, tc.logLevel, config.LogLevel, idxString) } } func TestRootConfig(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // write non-default config nonDefaultLogLvl := "debug" @@ -122,9 +141,8 @@ func TestRootConfig(t *testing.T) { } cases := []struct { - args []string - env map[string]string - + args []string + env map[string]string logLvl string }{ {nil, nil, nonDefaultLogLvl}, // should load config @@ -133,29 +151,31 @@ func TestRootConfig(t *testing.T) { } for i, tc := range cases { - defaultRoot := t.TempDir() - idxString := strconv.Itoa(i) - clearConfig(defaultRoot) - - // XXX: path must match cfg.defaultConfigPath - configFilePath := filepath.Join(defaultRoot, "config") - err := tmos.EnsureDir(configFilePath, 0700) - require.Nil(t, err) - - // write the non-defaults to a different path - // TODO: support writing sub configs so we can test that too - err = WriteConfigVals(configFilePath, cvals) - require.Nil(t, err) - - rootCmd := testRootCmd() - cmd := cli.PrepareBaseCmd(rootCmd, "TM", defaultRoot) - - // run with the args and env - tc.args = append([]string{rootCmd.Use}, tc.args...) - err = cli.RunWithArgs(cmd, tc.args, tc.env) - require.Nil(t, err, idxString) - - assert.Equal(t, tc.logLvl, config.LogLevel, idxString) + t.Run(fmt.Sprint(i), func(t *testing.T) { + defaultRoot := t.TempDir() + conf := clearConfig(t, defaultRoot) + conf.LogLevel = tc.logLvl + + // XXX: path must match cfg.defaultConfigPath + configFilePath := filepath.Join(defaultRoot, "config") + err := tmos.EnsureDir(configFilePath, 0700) + require.NoError(t, err) + + // write the non-defaults to a different path + // TODO: support writing sub configs so we can test that too + err = writeConfigVals(configFilePath, cvals) + require.NoError(t, err) + + cmd := testRootCmd(conf) + viper.Set(cli.HomeFlag, conf.RootDir) + + // run with the args and env + tc.args = append([]string{cmd.Use}, tc.args...) + err = cli.RunWithArgs(ctx, cmd, tc.args, tc.env) + require.NoError(t, err) + + require.Equal(t, tc.logLvl, conf.LogLevel) + }) } } @@ -167,5 +187,5 @@ func WriteConfigVals(dir string, vals map[string]string) error { data += fmt.Sprintf("%s = \"%s\"\n", k, v) } cfile := filepath.Join(dir, "config.toml") - return ioutil.WriteFile(cfile, []byte(data), 0600) + return os.WriteFile(cfile, []byte(data), 0600) } diff --git a/cmd/tenderdash/commands/run_node.go b/cmd/tenderdash/commands/run_node.go index 435ce9ea4e..347a04034e 100644 --- a/cmd/tenderdash/commands/run_node.go +++ b/cmd/tenderdash/commands/run_node.go @@ -3,155 +3,128 @@ package commands import ( "bytes" "crypto/sha256" - "errors" - "flag" "fmt" "io" "os" + "os/signal" + "syscall" "github.com/spf13/cobra" cfg "github.com/tendermint/tendermint/config" - tmos "github.com/tendermint/tendermint/libs/os" + "github.com/tendermint/tendermint/libs/log" ) var ( genesisHash []byte ) -// AddNodeFlags exposes some common configuration options on the command-line -// These are exposed for convenience of commands embedding a tendermint node -func AddNodeFlags(cmd *cobra.Command) { +// AddNodeFlags exposes some common configuration options from conf in the flag +// set for cmd. This is a convenience for commands embedding a Tendermint node. +func AddNodeFlags(cmd *cobra.Command, conf *cfg.Config) { // bind flags - cmd.Flags().String("moniker", config.Moniker, "node name") + cmd.Flags().String("moniker", conf.Moniker, "node name") // mode flags - cmd.Flags().String("mode", config.Mode, "node mode (full | validator | seed)") + cmd.Flags().String("mode", conf.Mode, "node mode (full | validator | seed)") // priv val flags cmd.Flags().String( "priv-validator-laddr", - config.PrivValidator.ListenAddr, + conf.PrivValidator.ListenAddr, "socket address to listen on for connections from external priv-validator process") // node flags - cmd.Flags().Bool("blocksync.enable", config.BlockSync.Enable, "enable fast blockchain syncing") - - // TODO (https://github.com/tendermint/tendermint/issues/6908): remove this check after the v0.35 release cycle - // This check was added to give users an upgrade prompt to use the new flag for syncing. - // - // The pflag package does not have a native way to print a depcrecation warning - // and return an error. This logic was added to print a deprecation message to the user - // and then crash if the user attempts to use the old --fast-sync flag. - fs := flag.NewFlagSet("", flag.ExitOnError) - fs.Func("fast-sync", "deprecated", - func(string) error { - return errors.New("--fast-sync has been deprecated, please use --blocksync.enable") - }) - cmd.Flags().AddGoFlagSet(fs) - - cmd.Flags().MarkHidden("fast-sync") //nolint:errcheck + cmd.Flags().BytesHexVar( &genesisHash, "genesis-hash", []byte{}, "optional SHA-256 hash of the genesis file") - cmd.Flags().Int64("consensus.double-sign-check-height", config.Consensus.DoubleSignCheckHeight, + cmd.Flags().Int64("consensus.double-sign-check-height", conf.Consensus.DoubleSignCheckHeight, "how many blocks to look back to check existence of the node's "+ "consensus votes before joining consensus") // abci flags cmd.Flags().String( "proxy-app", - config.ProxyApp, + conf.ProxyApp, "proxy app address, or one of: 'kvstore',"+ " 'persistent_kvstore', 'e2e' or 'noop' for local testing.") - cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)") + cmd.Flags().String("abci", conf.ABCI, "specify abci transport (socket | grpc)") // rpc flags - cmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required") - cmd.Flags().String( - "rpc.grpc-laddr", - config.RPC.GRPCListenAddress, - "GRPC listen address (BroadcastTx only). Port required") - cmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "enabled unsafe rpc methods") - cmd.Flags().String("rpc.pprof-laddr", config.RPC.PprofListenAddress, "pprof listen address (https://golang.org/pkg/net/http/pprof)") + cmd.Flags().String("rpc.laddr", conf.RPC.ListenAddress, "RPC listen address. Port required") + cmd.Flags().Bool("rpc.unsafe", conf.RPC.Unsafe, "enabled unsafe rpc methods") + cmd.Flags().String("rpc.pprof-laddr", conf.RPC.PprofListenAddress, "pprof listen address (https://golang.org/pkg/net/http/pprof)") // p2p flags cmd.Flags().String( "p2p.laddr", - config.P2P.ListenAddress, + conf.P2P.ListenAddress, "node listen address. (0.0.0.0:0 means any interface, any port)") - cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes") //nolint: staticcheck - cmd.Flags().String("p2p.persistent-peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") - cmd.Flags().String("p2p.unconditional-peer-ids", - config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers") - cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding") - cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange") - cmd.Flags().String("p2p.private-peer-ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs") + cmd.Flags().String("p2p.seeds", conf.P2P.Seeds, "comma-delimited ID@host:port seed nodes") //nolint: staticcheck + cmd.Flags().String("p2p.persistent-peers", conf.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") + cmd.Flags().Bool("p2p.upnp", conf.P2P.UPNP, "enable/disable UPNP port forwarding") + cmd.Flags().Bool("p2p.pex", conf.P2P.PexReactor, "enable/disable Peer-Exchange") + cmd.Flags().String("p2p.private-peer-ids", conf.P2P.PrivatePeerIDs, "comma-delimited private peer IDs") // consensus flags cmd.Flags().Bool( "consensus.create-empty-blocks", - config.Consensus.CreateEmptyBlocks, + conf.Consensus.CreateEmptyBlocks, "set this to false to only produce blocks when there are txs or when the AppHash changes") cmd.Flags().String( "consensus.create-empty-blocks-interval", - config.Consensus.CreateEmptyBlocksInterval.String(), + conf.Consensus.CreateEmptyBlocksInterval.String(), "the possible interval between empty blocks") - addDBFlags(cmd) + addDBFlags(cmd, conf) } -func addDBFlags(cmd *cobra.Command) { +func addDBFlags(cmd *cobra.Command, conf *cfg.Config) { cmd.Flags().String( "db-backend", - config.DBBackend, + conf.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb") cmd.Flags().String( "db-dir", - config.DBPath, + conf.DBPath, "database directory") } // NewRunNodeCmd returns the command that allows the CLI to start a node. // It can be used with a custom PrivValidator and in-process ABCI application. -func NewRunNodeCmd(nodeProvider cfg.ServiceProvider) *cobra.Command { +func NewRunNodeCmd(nodeProvider cfg.ServiceProvider, conf *cfg.Config, logger log.Logger) *cobra.Command { cmd := &cobra.Command{ Use: "start", Aliases: []string{"node", "run"}, Short: "Run the tendermint node", RunE: func(cmd *cobra.Command, args []string) error { - if err := checkGenesisHash(config); err != nil { + if err := checkGenesisHash(conf); err != nil { return err } - n, err := nodeProvider(config, logger) + ctx, cancel := signal.NotifyContext(cmd.Context(), os.Interrupt, syscall.SIGTERM) + defer cancel() + + n, err := nodeProvider(ctx, conf, logger) if err != nil { return fmt.Errorf("failed to create node: %w", err) } - if err := n.Start(); err != nil { + if err := n.Start(ctx); err != nil { return fmt.Errorf("failed to start node: %w", err) } - logger.Info("started node", "node", n.String()) - - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - if n.IsRunning() { - if err := n.Stop(); err != nil { - logger.Error("unable to stop the node", "error", err) - } - } - }) + logger.Info("started node", "chain", conf.ChainID()) - // Run forever. - select {} + <-ctx.Done() + return nil }, } - AddNodeFlags(cmd) + AddNodeFlags(cmd, conf) return cmd } diff --git a/cmd/tenderdash/commands/show_node_id.go b/cmd/tenderdash/commands/show_node_id.go index 488f4c3228..ffc6c4d5e0 100644 --- a/cmd/tenderdash/commands/show_node_id.go +++ b/cmd/tenderdash/commands/show_node_id.go @@ -4,21 +4,23 @@ import ( "fmt" "github.com/spf13/cobra" + + "github.com/tendermint/tendermint/config" ) -// ShowNodeIDCmd dumps node's ID to the standard output. -var ShowNodeIDCmd = &cobra.Command{ - Use: "show-node-id", - Short: "Show this node's ID", - RunE: showNodeID, -} +// MakeShowNodeIDCommand constructs a command to dump the node ID to stdout. +func MakeShowNodeIDCommand(conf *config.Config) *cobra.Command { + return &cobra.Command{ + Use: "show-node-id", + Short: "Show this node's ID", + RunE: func(cmd *cobra.Command, args []string) error { + nodeKeyID, err := conf.LoadNodeKeyID() + if err != nil { + return err + } -func showNodeID(cmd *cobra.Command, args []string) error { - nodeKeyID, err := config.LoadNodeKeyID() - if err != nil { - return err + fmt.Println(nodeKeyID) + return nil + }, } - - fmt.Println(nodeKeyID) - return nil } diff --git a/cmd/tenderdash/commands/show_validator.go b/cmd/tenderdash/commands/show_validator.go index 03ddecd9d6..548b2a3c51 100644 --- a/cmd/tenderdash/commands/show_validator.go +++ b/cmd/tenderdash/commands/show_validator.go @@ -6,74 +6,78 @@ import ( "github.com/spf13/cobra" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" - tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/internal/jsontypes" + "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/privval" tmgrpc "github.com/tendermint/tendermint/privval/grpc" ) -// ShowValidatorCmd adds capabilities for showing the validator info. -var ShowValidatorCmd = &cobra.Command{ - Use: "show-validator", - Short: "Show this node's validator info", - RunE: showValidator, -} - -func showValidator(cmd *cobra.Command, args []string) error { - var ( - pubKey crypto.PubKey - err error - ) +// MakeShowValidatorCommand constructs a command to show the validator info. +func MakeShowValidatorCommand(conf *config.Config, logger log.Logger) *cobra.Command { + return &cobra.Command{ + Use: "show-validator", + Short: "Show this node's validator info", + RunE: func(cmd *cobra.Command, args []string) error { + var ( + pubKey crypto.PubKey + err error + bctx = cmd.Context() + ) + //TODO: remove once gRPC is the only supported protocol + protocol, _ := tmnet.ProtocolAndAddress(conf.PrivValidator.ListenAddr) + switch protocol { + case "grpc": + pvsc, err := tmgrpc.DialRemoteSigner( + bctx, + conf.PrivValidator, + conf.ChainID(), + logger, + conf.Instrumentation.Prometheus, + ) + if err != nil { + return fmt.Errorf("can't connect to remote validator %w", err) + } - //TODO: remove once gRPC is the only supported protocol - protocol, _ := tmnet.ProtocolAndAddress(config.PrivValidator.ListenAddr) - switch protocol { - case "grpc": - pvsc, err := tmgrpc.DialRemoteSigner( - config.PrivValidator, - config.ChainID(), - logger, - config.Instrumentation.Prometheus, - ) - if err != nil { - return fmt.Errorf("can't connect to remote validator %w", err) - } + ctx, cancel := context.WithTimeout(bctx, ctxTimeout) + defer cancel() - ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout) - defer cancel() + _, err = pvsc.GetProTxHash(ctx) + if err != nil { + return fmt.Errorf("can't get proTxHash: %w", err) + } + default: - proTxHash, err = pvsc.GetProTxHash(ctx) - if err != nil { - return fmt.Errorf("can't get proTxHash: %w", err) - } - default: + keyFilePath := conf.PrivValidator.KeyFile() + if !tmos.FileExists(keyFilePath) { + return fmt.Errorf("private validator file %s does not exist", keyFilePath) + } - keyFilePath := config.PrivValidator.KeyFile() - if !tmos.FileExists(keyFilePath) { - return fmt.Errorf("private validator file %s does not exist", keyFilePath) - } + pv, err := privval.LoadFilePV(keyFilePath, conf.PrivValidator.StateFile()) + if err != nil { + return err + } - pv, err := privval.LoadFilePV(keyFilePath, config.PrivValidator.StateFile()) - if err != nil { - return err - } + ctx, cancel := context.WithTimeout(bctx, ctxTimeout) + defer cancel() - ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout) - defer cancel() + _, err = pv.GetProTxHash(ctx) + if err != nil { + return fmt.Errorf("can't get proTxHash: %w", err) + } + } - proTxHash, err = pv.GetProTxHash(ctx) - if err != nil { - return fmt.Errorf("can't get proTxHash: %w", err) - } - } + bz, err := jsontypes.Marshal(pubKey) + if err != nil { + return fmt.Errorf("failed to marshal private validator pubkey: %w", err) + } - bz, err := tmjson.Marshal(pubKey) - if err != nil { - return fmt.Errorf("failed to marshal private validator pubkey: %w", err) + fmt.Println(string(bz)) + return nil + }, } - fmt.Println(string(bz)) - return nil } diff --git a/cmd/tenderdash/commands/testnet.go b/cmd/tenderdash/commands/testnet.go index af7fcd46f9..50d82b21b0 100644 --- a/cmd/tenderdash/commands/testnet.go +++ b/cmd/tenderdash/commands/testnet.go @@ -15,283 +15,318 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" ) -var ( - nValidators int - nNonValidators int - initialHeight int64 - configFile string - outputDir string - nodeDirPrefix string - - populatePersistentPeers bool - hostnamePrefix string - hostnameSuffix string - startingIPAddress string - hostnames []string - p2pPort int - randomMonikers bool -) - const ( nodeDirPerm = 0755 ) -func init() { - TestnetFilesCmd.Flags().IntVar(&nValidators, "v", 4, +// MakeTestnetFilesCommand constructs a command to generate testnet config files. +func MakeTestnetFilesCommand(conf *cfg.Config, logger log.Logger) *cobra.Command { + cmd := &cobra.Command{ + Use: "testnet", + Short: "Initialize files for a Tendermint testnet", + Long: `testnet will create "v" + "n" number of directories and populate each with +necessary files (private validator, genesis, config, etc.). + +Note, strict routability for addresses is turned off in the config file. + +Optionally, it will fill in persistent-peers list in config file using either hostnames or IPs. + +Example: + + tendermint testnet --v 4 --o ./output --populate-persistent-peers --starting-ip-address 192.168.10.2 + `, + } + var ( + nValidators int + nNonValidators int + initialHeight int64 + configFile string + outputDir string + nodeDirPrefix string + + populatePersistentPeers bool + hostnamePrefix string + hostnameSuffix string + startingIPAddress string + hostnames []string + p2pPort int + randomMonikers bool + keyType string + ) + + cmd.Flags().IntVar(&nValidators, "v", 4, "number of validators to initialize the testnet with") - TestnetFilesCmd.Flags().StringVar(&configFile, "config", "", + cmd.Flags().StringVar(&configFile, "config", "", "config file to use (note some options may be overwritten)") - TestnetFilesCmd.Flags().IntVar(&nNonValidators, "n", 0, + cmd.Flags().IntVar(&nNonValidators, "n", 0, "number of non-validators to initialize the testnet with") - TestnetFilesCmd.Flags().StringVar(&outputDir, "o", "./mytestnet", + cmd.Flags().StringVar(&outputDir, "o", "./mytestnet", "directory to store initialization data for the testnet") - TestnetFilesCmd.Flags().StringVar(&nodeDirPrefix, "node-dir-prefix", "node", + cmd.Flags().StringVar(&nodeDirPrefix, "node-dir-prefix", "node", "prefix the directory name for each node with (node results in node0, node1, ...)") - TestnetFilesCmd.Flags().Int64Var(&initialHeight, "initial-height", 0, + cmd.Flags().Int64Var(&initialHeight, "initial-height", 0, "initial height of the first block") - TestnetFilesCmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true, + cmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true, "update config of each node with the list of persistent peers build using either"+ " hostname-prefix or"+ " starting-ip-address") - TestnetFilesCmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node", + cmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node", "hostname prefix (\"node\" results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)") - TestnetFilesCmd.Flags().StringVar(&hostnameSuffix, "hostname-suffix", "", + cmd.Flags().StringVar(&hostnameSuffix, "hostname-suffix", "", "hostname suffix ("+ "\".xyz.com\""+ " results in persistent peers list ID0@node0.xyz.com:26656, ID1@node1.xyz.com:26656, ...)") - TestnetFilesCmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "", + cmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "", "starting IP address ("+ "\"192.168.0.1\""+ " results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)") - TestnetFilesCmd.Flags().StringArrayVar(&hostnames, "hostname", []string{}, + cmd.Flags().StringArrayVar(&hostnames, "hostname", []string{}, "manually override all hostnames of validators and non-validators (use --hostname multiple times for multiple hosts)") - TestnetFilesCmd.Flags().IntVar(&p2pPort, "p2p-port", 26656, + cmd.Flags().IntVar(&p2pPort, "p2p-port", 26656, "P2P Port") - TestnetFilesCmd.Flags().BoolVar(&randomMonikers, "random-monikers", false, + cmd.Flags().BoolVar(&randomMonikers, "random-monikers", false, "randomize the moniker for each generated node") -} - -// TestnetFilesCmd allows initialisation of files for a Tendermint testnet. -var TestnetFilesCmd = &cobra.Command{ - Use: "testnet", - Short: "Initialize files for a Tendermint testnet", - Long: `testnet will create "v" + "n" number of directories and populate each with -necessary files (private validator, genesis, config, etc.). - -Note, strict routability for addresses is turned off in the config file. - -Optionally, it will fill in persistent-peers list in config file using either hostnames or IPs. - -Example: - - tendermint testnet --v 4 --o ./output --populate-persistent-peers --starting-ip-address 192.168.10.2 - `, - RunE: testnetFiles, -} - -func testnetFiles(cmd *cobra.Command, args []string) error { - if len(hostnames) > 0 && len(hostnames) != (nValidators+nNonValidators) { - return fmt.Errorf( - "testnet needs precisely %d hostnames (number of validators plus non-validators) if --hostname parameter is used", - nValidators+nNonValidators, - ) - } + cmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + "Key type to generate privval file with. Options: ed25519, secp256k1") + + cmd.RunE = func(cmd *cobra.Command, args []string) error { + if len(hostnames) > 0 && len(hostnames) != (nValidators+nNonValidators) { + return fmt.Errorf( + "testnet needs precisely %d hostnames (number of validators plus non-validators) if --hostname parameter is used", + nValidators+nNonValidators, + ) + } - // set mode to validator for testnet - config := cfg.DefaultValidatorConfig() + // set mode to validator for testnet + config := cfg.DefaultValidatorConfig() - // overwrite default config if set and valid - if configFile != "" { - viper.SetConfigFile(configFile) - if err := viper.ReadInConfig(); err != nil { - return err - } - if err := viper.Unmarshal(config); err != nil { - return err - } - if err := config.ValidateBasic(); err != nil { - return err + // overwrite default config if set and valid + if configFile != "" { + viper.SetConfigFile(configFile) + if err := viper.ReadInConfig(); err != nil { + return err + } + if err := viper.Unmarshal(config); err != nil { + return err + } + if err := config.ValidateBasic(); err != nil { + return err + } } - } - genVals := make([]types.GenesisValidator, nValidators) + genVals := make([]types.GenesisValidator, nValidators) + ctx := cmd.Context() + for i := 0; i < nValidators; i++ { + nodeDirName := fmt.Sprintf("%s%d", nodeDirPrefix, i) + nodeDir := filepath.Join(outputDir, nodeDirName) + config.SetRoot(nodeDir) + + err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm) + if err != nil { + _ = os.RemoveAll(outputDir) + return err + } + err = os.MkdirAll(filepath.Join(nodeDir, "data"), nodeDirPerm) + if err != nil { + _ = os.RemoveAll(outputDir) + return err + } - for i := 0; i < nValidators; i++ { - nodeDirName := fmt.Sprintf("%s%d", nodeDirPrefix, i) - nodeDir := filepath.Join(outputDir, nodeDirName) - config.SetRoot(nodeDir) + if err := initFilesWithConfig(ctx, nodeConfig{Config: config}, logger); err != nil { + return err + } - err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm) - if err != nil { - _ = os.RemoveAll(outputDir) - return err - } - err = os.MkdirAll(filepath.Join(nodeDir, "data"), nodeDirPerm) - if err != nil { - _ = os.RemoveAll(outputDir) - return err - } + pvKeyFile := filepath.Join(nodeDir, config.PrivValidator.Key) + pvStateFile := filepath.Join(nodeDir, config.PrivValidator.State) + pv, err := privval.LoadFilePV(pvKeyFile, pvStateFile) + if err != nil { + return err + } - if err := initFilesWithConfig(config); err != nil { - return err - } + ctx, cancel := context.WithTimeout(ctx, ctxTimeout) + defer cancel() - pvKeyFile := filepath.Join(nodeDir, config.PrivValidator.Key) - pvStateFile := filepath.Join(nodeDir, config.PrivValidator.State) - pv, err := privval.LoadFilePV(pvKeyFile, pvStateFile) - if err != nil { - return err + pubKey, err := pv.GetPubKey(ctx, crypto.QuorumHash{}) + if err != nil { + return fmt.Errorf("can't get pubkey in testnet files: %w", err) + } + genVals[i] = types.GenesisValidator{ + PubKey: pubKey, + Power: 1, + Name: nodeDirName, + } } - ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout) - defer cancel() + for i := 0; i < nNonValidators; i++ { + nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i+nValidators)) + config.SetRoot(nodeDir) - pubKey, err := pv.GetPubKey(ctx, crypto.QuorumHash{}) - if err != nil { - return fmt.Errorf("can't get pubkey in testnet files: %w", err) - } - genVals[i] = types.GenesisValidator{ - PubKey: pubKey, - Power: 1, - Name: nodeDirName, - } - } + err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm) + if err != nil { + _ = os.RemoveAll(outputDir) + return err + } - for i := 0; i < nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i+nValidators)) - config.SetRoot(nodeDir) + err = os.MkdirAll(filepath.Join(nodeDir, "data"), nodeDirPerm) + if err != nil { + _ = os.RemoveAll(outputDir) + return err + } - err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm) - if err != nil { - _ = os.RemoveAll(outputDir) - return err + if err := initFilesWithConfig(ctx, nodeConfig{Config: conf}, logger); err != nil { + return err + } } - err = os.MkdirAll(filepath.Join(nodeDir, "data"), nodeDirPerm) - if err != nil { - _ = os.RemoveAll(outputDir) - return err + // Generate genesis doc from generated validators + genDoc := &types.GenesisDoc{ + ChainID: "chain-" + tmrand.Str(6), + GenesisTime: tmtime.Now(), + InitialHeight: initialHeight, + Validators: genVals, + ConsensusParams: types.DefaultConsensusParams(), } - - if err := initFilesWithConfig(config); err != nil { - return err + if keyType == "secp256k1" { + genDoc.ConsensusParams.Validator = types.ValidatorParams{ + PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1}, + } } - } - // Generate genesis doc from generated validators - genDoc := &types.GenesisDoc{ - ChainID: "chain-" + tmrand.Str(6), - GenesisTime: tmtime.Now(), - InitialHeight: initialHeight, - Validators: genVals, - ConsensusParams: types.DefaultConsensusParams(), - } - if keyType == "secp256k1" { - genDoc.ConsensusParams.Validator = types.ValidatorParams{ - PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1}, + // Write genesis file. + for i := 0; i < nValidators+nNonValidators; i++ { + nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) + if err := genDoc.SaveAs(filepath.Join(nodeDir, config.BaseConfig.Genesis)); err != nil { + _ = os.RemoveAll(outputDir) + return err + } } - } - // Write genesis file. - for i := 0; i < nValidators+nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) - if err := genDoc.SaveAs(filepath.Join(nodeDir, config.BaseConfig.Genesis)); err != nil { - _ = os.RemoveAll(outputDir) - return err + // Gather persistent peer addresses. + var ( + persistentPeers = make([]string, 0) + err error + ) + tpargs := testnetPeerArgs{ + numValidators: nValidators, + numNonValidators: nNonValidators, + peerToPeerPort: p2pPort, + nodeDirPrefix: nodeDirPrefix, + outputDir: outputDir, + hostnames: hostnames, + startingIPAddr: startingIPAddress, + hostnamePrefix: hostnamePrefix, + hostnameSuffix: hostnameSuffix, + randomMonikers: randomMonikers, } - } - // Gather persistent peer addresses. - var ( - persistentPeers = make([]string, 0) - err error - ) - if populatePersistentPeers { - persistentPeers, err = persistentPeersArray(config) - if err != nil { - _ = os.RemoveAll(outputDir) - return err + if populatePersistentPeers { + + persistentPeers, err = persistentPeersArray(config, tpargs) + if err != nil { + _ = os.RemoveAll(outputDir) + return err + } } - } - // Overwrite default config. - for i := 0; i < nValidators+nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) - config.SetRoot(nodeDir) - config.P2P.AddrBookStrict = false - config.P2P.AllowDuplicateIP = true - if populatePersistentPeers { - persistentPeersWithoutSelf := make([]string, 0) - for j := 0; j < len(persistentPeers); j++ { - if j == i { - continue + // Overwrite default config. + for i := 0; i < nValidators+nNonValidators; i++ { + nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) + config.SetRoot(nodeDir) + config.P2P.AllowDuplicateIP = true + if populatePersistentPeers { + persistentPeersWithoutSelf := make([]string, 0) + for j := 0; j < len(persistentPeers); j++ { + if j == i { + continue + } + persistentPeersWithoutSelf = append(persistentPeersWithoutSelf, persistentPeers[j]) } - persistentPeersWithoutSelf = append(persistentPeersWithoutSelf, persistentPeers[j]) + config.P2P.PersistentPeers = strings.Join(persistentPeersWithoutSelf, ",") } - config.P2P.PersistentPeers = strings.Join(persistentPeersWithoutSelf, ",") - } - config.Moniker = moniker(i) + config.Moniker = tpargs.moniker(i) - if err := cfg.WriteConfigFile(nodeDir, config); err != nil { - return err + if err := cfg.WriteConfigFile(nodeDir, config); err != nil { + return err + } } + + fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators) + return nil } - fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators) - return nil + return cmd } -func hostnameOrIP(i int) string { - if len(hostnames) > 0 && i < len(hostnames) { - return hostnames[i] +type testnetPeerArgs struct { + numValidators int + numNonValidators int + peerToPeerPort int + nodeDirPrefix string + outputDir string + hostnames []string + startingIPAddr string + hostnamePrefix string + hostnameSuffix string + randomMonikers bool +} + +func (args *testnetPeerArgs) hostnameOrIP(i int) (string, error) { + if len(args.hostnames) > 0 && i < len(args.hostnames) { + return args.hostnames[i], nil } - if startingIPAddress == "" { - return fmt.Sprintf("%s%d%s", hostnamePrefix, i, hostnameSuffix) + if args.startingIPAddr == "" { + return fmt.Sprintf("%s%d%s", args.hostnamePrefix, i, args.hostnameSuffix), nil } - ip := net.ParseIP(startingIPAddress) + ip := net.ParseIP(args.startingIPAddr) ip = ip.To4() if ip == nil { - fmt.Printf("%v: non ipv4 address\n", startingIPAddress) - os.Exit(1) + return "", fmt.Errorf("%v is non-ipv4 address", args.startingIPAddr) } for j := 0; j < i; j++ { ip[3]++ } - return ip.String() + return ip.String(), nil + } // get an array of persistent peers -func persistentPeersArray(config *cfg.Config) ([]string, error) { - peers := make([]string, nValidators+nNonValidators) - for i := 0; i < nValidators+nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) +func persistentPeersArray(config *cfg.Config, args testnetPeerArgs) ([]string, error) { + peers := make([]string, args.numValidators+args.numNonValidators) + for i := 0; i < len(peers); i++ { + nodeDir := filepath.Join(args.outputDir, fmt.Sprintf("%s%d", args.nodeDirPrefix, i)) config.SetRoot(nodeDir) nodeKey, err := config.LoadNodeKeyID() if err != nil { - return []string{}, err + return nil, err } - peers[i] = nodeKey.AddressString(fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort)) + addr, err := args.hostnameOrIP(i) + if err != nil { + return nil, err + } + + peers[i] = nodeKey.AddressString(fmt.Sprintf("%s:%d", addr, args.peerToPeerPort)) } return peers, nil } -func moniker(i int) string { - if randomMonikers { +func (args *testnetPeerArgs) moniker(i int) string { + if args.randomMonikers { return randomMoniker() } - if len(hostnames) > 0 && i < len(hostnames) { - return hostnames[i] + if len(args.hostnames) > 0 && i < len(args.hostnames) { + return args.hostnames[i] } - if startingIPAddress == "" { - return fmt.Sprintf("%s%d%s", hostnamePrefix, i, hostnameSuffix) + if args.startingIPAddr == "" { + return fmt.Sprintf("%s%d%s", args.hostnamePrefix, i, args.hostnameSuffix) } return randomMoniker() } diff --git a/cmd/tenderdash/main.go b/cmd/tenderdash/main.go index f71b7538e1..7320267fbd 100644 --- a/cmd/tenderdash/main.go +++ b/cmd/tenderdash/main.go @@ -1,41 +1,48 @@ package main import ( - "os" - "path/filepath" + "context" - cmd "github.com/tendermint/tendermint/cmd/tenderdash/commands" + "github.com/tendermint/tendermint/cmd/tenderdash/commands" "github.com/tendermint/tendermint/cmd/tenderdash/commands/debug" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/node" ) func main() { - initFilesCommand := cmd.InitFilesCmd - cmd.AddInitFlags(initFilesCommand) - - rootCmd := cmd.RootCmd - rootCmd.AddCommand( - cmd.GenValidatorCmd, - cmd.ReIndexEventCmd, - cmd.InitFilesCmd, - cmd.ProbeUpnpCmd, - cmd.LightCmd, - cmd.ReplayCmd, - cmd.ReplayConsoleCmd, - cmd.ResetAllCmd, - cmd.ResetPrivValidatorCmd, - cmd.ShowValidatorCmd, - cmd.TestnetFilesCmd, - cmd.ShowNodeIDCmd, - cmd.GenNodeKeyCmd, - cmd.VersionCmd, - cmd.InspectCmd, - cmd.RollbackStateCmd, - cmd.MakeKeyMigrateCommand(), - debug.DebugCmd, - cli.NewCompletionCmd(rootCmd, true), + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + conf, err := commands.ParseConfig(config.DefaultConfig()) + if err != nil { + panic(err) + } + + logger, err := log.NewDefaultLogger(conf.LogFormat, conf.LogLevel) + if err != nil { + panic(err) + } + + rcmd := commands.RootCommand(conf, logger) + rcmd.AddCommand( + commands.MakeGenValidatorCommand(), + commands.MakeReindexEventCommand(conf, logger), + commands.MakeInitFilesCommand(conf, logger), + commands.MakeLightCommand(conf, logger), + commands.MakeReplayCommand(conf, logger), + commands.MakeReplayConsoleCommand(conf, logger), + commands.MakeShowValidatorCommand(conf, logger), + commands.MakeTestnetFilesCommand(conf, logger), + commands.MakeShowNodeIDCommand(conf), + commands.GenNodeKeyCmd, + commands.VersionCmd, + commands.MakeInspectCommand(conf, logger), + commands.MakeRollbackStateCommand(conf), + commands.MakeKeyMigrateCommand(conf, logger), + debug.GetDebugCommand(logger), + commands.NewCompletionCmd(rcmd, true), ) // NOTE: @@ -49,10 +56,9 @@ func main() { nodeFunc := node.NewDefault // Create & start node - rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc)) + rcmd.AddCommand(commands.NewRunNodeCmd(nodeFunc, conf, logger)) - cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", config.DefaultTendermintDir))) - if err := cmd.Execute(); err != nil { + if err := cli.RunWithTrace(ctx, rcmd); err != nil { panic(err) } } diff --git a/config/config.go b/config/config.go index 3af0a86be5..3ccab645ed 100644 --- a/config/config.go +++ b/config/config.go @@ -2,18 +2,18 @@ package config import ( "encoding/hex" + "encoding/json" "errors" "fmt" - "io/ioutil" "net/http" "os" "path/filepath" + "strings" "time" "github.com/dashevo/dashd-go/btcjson" "github.com/tendermint/tendermint/crypto" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/types" @@ -31,12 +31,6 @@ const ( ModeFull = "full" ModeValidator = "validator" ModeSeed = "seed" - - BlockSyncV0 = "v0" - BlockSyncV2 = "v2" - - MempoolV0 = "v0" - MempoolV1 = "v1" ) // NOTE: Most of the structs & relevant comments + the @@ -57,19 +51,14 @@ var ( defaultPrivValKeyName = "priv_validator_key.json" defaultPrivValStateName = "priv_validator_state.json" - defaultNodeKeyName = "node_key.json" - defaultAddrBookName = "addrbook.json" + defaultNodeKeyName = "node_key.json" defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName) defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName) defaultPrivValKeyPath = filepath.Join(defaultConfigDir, defaultPrivValKeyName) defaultPrivValStatePath = filepath.Join(defaultDataDir, defaultPrivValStateName) - defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName) - defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName) - - minSubscriptionBufferSize = 100 - defaultSubscriptionBufferSize = 200 + defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName) ) // Config defines the top level configuration for a Tendermint node @@ -82,7 +71,6 @@ type Config struct { P2P *P2PConfig `mapstructure:"p2p"` Mempool *MempoolConfig `mapstructure:"mempool"` StateSync *StateSyncConfig `mapstructure:"statesync"` - BlockSync *BlockSyncConfig `mapstructure:"blocksync"` Consensus *ConsensusConfig `mapstructure:"consensus"` TxIndex *TxIndexConfig `mapstructure:"tx-index"` Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"` @@ -97,7 +85,6 @@ func DefaultConfig() *Config { P2P: DefaultP2PConfig(), Mempool: DefaultMempoolConfig(), StateSync: DefaultStateSyncConfig(), - BlockSync: DefaultBlockSyncConfig(), Consensus: DefaultConsensusConfig(), TxIndex: DefaultTxIndexConfig(), Instrumentation: DefaultInstrumentationConfig(), @@ -120,7 +107,6 @@ func TestConfig() *Config { P2P: TestP2PConfig(), Mempool: TestMempoolConfig(), StateSync: TestStateSyncConfig(), - BlockSync: TestBlockSyncConfig(), Consensus: TestConsensusConfig(), TxIndex: TestTxIndexConfig(), Instrumentation: TestInstrumentationConfig(), @@ -148,18 +134,12 @@ func (cfg *Config) ValidateBasic() error { if err := cfg.RPC.ValidateBasic(); err != nil { return fmt.Errorf("error in [rpc] section: %w", err) } - if err := cfg.P2P.ValidateBasic(); err != nil { - return fmt.Errorf("error in [p2p] section: %w", err) - } if err := cfg.Mempool.ValidateBasic(); err != nil { return fmt.Errorf("error in [mempool] section: %w", err) } if err := cfg.StateSync.ValidateBasic(); err != nil { return fmt.Errorf("error in [statesync] section: %w", err) } - if err := cfg.BlockSync.ValidateBasic(); err != nil { - return fmt.Errorf("error in [blocksync] section: %w", err) - } if err := cfg.Consensus.ValidateBasic(); err != nil { return fmt.Errorf("error in [consensus] section: %w", err) } @@ -169,6 +149,10 @@ func (cfg *Config) ValidateBasic() error { return nil } +func (cfg *Config) DeprecatedFieldWarning() error { + return cfg.Consensus.DeprecatedFieldWarning() +} + //----------------------------------------------------------------------------- // BaseConfig @@ -306,12 +290,12 @@ func (cfg BaseConfig) NodeKeyFile() string { // LoadNodeKey loads NodeKey located in filePath. func (cfg BaseConfig) LoadNodeKeyID() (types.NodeID, error) { - jsonBytes, err := ioutil.ReadFile(cfg.NodeKeyFile()) + jsonBytes, err := os.ReadFile(cfg.NodeKeyFile()) if err != nil { return "", err } nodeKey := types.NodeKey{} - err = tmjson.Unmarshal(jsonBytes, &nodeKey) + err = json.Unmarshal(jsonBytes, &nodeKey) if err != nil { return "", err } @@ -362,28 +346,6 @@ func (cfg BaseConfig) ValidateBasic() error { return fmt.Errorf("unknown mode: %v", cfg.Mode) } - // TODO (https://github.com/tendermint/tendermint/issues/6908) remove this check after the v0.35 release cycle. - // This check was added to give users an upgrade prompt to use the new - // configuration option in v0.35. In future release cycles they should no longer - // be using this configuration parameter so the check can be removed. - // The cfg.Other field can likely be removed at the same time if it is not referenced - // elsewhere as it was added to service this check. - if fs, ok := cfg.Other["fastsync"]; ok { - if _, ok := fs.(map[string]interface{}); ok { - return fmt.Errorf("a configuration section named 'fastsync' was found in the " + - "configuration file. The 'fastsync' section has been renamed to " + - "'blocksync', please update the 'fastsync' field in your configuration file to 'blocksync'") - } - } - if fs, ok := cfg.Other["fast-sync"]; ok { - if fs != "" { - return fmt.Errorf("a parameter named 'fast-sync' was found in the " + - "configuration file. The parameter to enable or disable quickly syncing with a blockchain" + - "has moved to the [blocksync] section of the configuration file as blocksync.enable. " + - "Please move the 'fast-sync' field in your configuration file to 'blocksync.enable'") - } - } - return nil } @@ -498,24 +460,10 @@ type RPCConfig struct { // A list of non simple headers the client is allowed to use with cross-domain requests. CORSAllowedHeaders []string `mapstructure:"cors-allowed-headers"` - // TCP or UNIX socket address for the gRPC server to listen on - // NOTE: This server only supports /broadcast_tx_commit - // Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36. - GRPCListenAddress string `mapstructure:"grpc-laddr"` - - // Maximum number of simultaneous connections. - // Does not include RPC (HTTP&WebSocket) connections. See max-open-connections - // If you want to accept a larger number than the default, make sure - // you increase your OS limits. - // 0 - unlimited. - // Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36. - GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"` - // Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool Unsafe bool `mapstructure:"unsafe"` // Maximum number of simultaneous connections (including WebSocket). - // Does not include gRPC connections. See grpc-max-open-connections // If you want to accept a larger number than the default, make sure // you increase your OS limits. // 0 - unlimited. @@ -529,32 +477,36 @@ type RPCConfig struct { MaxSubscriptionClients int `mapstructure:"max-subscription-clients"` // Maximum number of unique queries a given client can /subscribe to - // If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set + // If you're using a Local RPC client and /broadcast_tx_commit, set this // to the estimated maximum number of broadcast_tx_commit calls per block. MaxSubscriptionsPerClient int `mapstructure:"max-subscriptions-per-client"` - // The number of events that can be buffered per subscription before - // returning `ErrOutOfCapacity`. - SubscriptionBufferSize int `mapstructure:"experimental-subscription-buffer-size"` - - // The maximum number of responses that can be buffered per WebSocket - // client. If clients cannot read from the WebSocket endpoint fast enough, - // they will be disconnected, so increasing this parameter may reduce the - // chances of them being disconnected (but will cause the node to use more - // memory). + // If true, disable the websocket interface to the RPC service. This has + // the effect of disabling the /subscribe, /unsubscribe, and /unsubscribe_all + // methods for event subscription. // - // Must be at least the same as `SubscriptionBufferSize`, otherwise - // connections may be dropped unnecessarily. - WebSocketWriteBufferSize int `mapstructure:"experimental-websocket-write-buffer-size"` - - // If a WebSocket client cannot read fast enough, at present we may - // silently drop events instead of generating an error or disconnecting the - // client. + // EXPERIMENTAL: This setting will be removed in Tendermint v0.37. + ExperimentalDisableWebsocket bool `mapstructure:"experimental-disable-websocket"` + + // The time window size for the event log. All events up to this long before + // the latest (up to EventLogMaxItems) will be available for subscribers to + // fetch via the /events method. If 0 (the default) the event log and the + // /events RPC method are disabled. + EventLogWindowSize time.Duration `mapstructure:"event-log-window-size"` + + // The maxiumum number of events that may be retained by the event log. If + // this value is 0, no upper limit is set. Otherwise, items in excess of + // this number will be discarded from the event log. // - // Enabling this parameter will cause the WebSocket connection to be closed - // instead if it cannot read fast enough, allowing for greater - // predictability in subscription behavior. - CloseOnSlowClient bool `mapstructure:"experimental-close-on-slow-client"` + // Warning: This setting is a safety valve. Setting it too low may cause + // subscribers to miss events. Try to choose a value higher than the + // maximum worst-case expected event load within the chosen window size in + // ordinary operation. + // + // For example, if the window size is 10 minutes and the node typically + // averages 1000 events per ten minutes, but with occasional known spikes of + // up to 2000, choose a value > 2000. + EventLogMaxItems int `mapstructure:"event-log-max-items"` // How long to wait for a tx to be committed during /broadcast_tx_commit // WARNING: Using a value larger than 10s will result in increasing the @@ -593,21 +545,22 @@ type RPCConfig struct { // DefaultRPCConfig returns a default configuration for the RPC server func DefaultRPCConfig() *RPCConfig { return &RPCConfig{ - ListenAddress: "tcp://127.0.0.1:26657", - CORSAllowedOrigins: []string{}, - CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost}, - CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"}, - GRPCListenAddress: "", - GRPCMaxOpenConnections: 900, + ListenAddress: "tcp://127.0.0.1:26657", + CORSAllowedOrigins: []string{}, + CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost}, + CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"}, Unsafe: false, MaxOpenConnections: 900, - MaxSubscriptionClients: 100, - MaxSubscriptionsPerClient: 5, - SubscriptionBufferSize: defaultSubscriptionBufferSize, - TimeoutBroadcastTxCommit: 10 * time.Second, - WebSocketWriteBufferSize: defaultSubscriptionBufferSize, + // Settings for event subscription. + MaxSubscriptionClients: 100, + MaxSubscriptionsPerClient: 5, + ExperimentalDisableWebsocket: false, // compatible with TM v0.35 and earlier + EventLogWindowSize: 0, // disables /events RPC by default + EventLogMaxItems: 0, + + TimeoutBroadcastTxCommit: 10 * time.Second, MaxBodyBytes: int64(1000000), // 1MB MaxHeaderBytes: 1 << 20, // same as the net/http default @@ -621,7 +574,6 @@ func DefaultRPCConfig() *RPCConfig { func TestRPCConfig() *RPCConfig { cfg := DefaultRPCConfig() cfg.ListenAddress = "tcp://127.0.0.1:36657" - cfg.GRPCListenAddress = "tcp://127.0.0.1:36658" cfg.Unsafe = true return cfg } @@ -629,9 +581,6 @@ func TestRPCConfig() *RPCConfig { // ValidateBasic performs basic validation (checking param bounds, etc.) and // returns an error if any check fails. func (cfg *RPCConfig) ValidateBasic() error { - if cfg.GRPCMaxOpenConnections < 0 { - return errors.New("grpc-max-open-connections can't be negative") - } if cfg.MaxOpenConnections < 0 { return errors.New("max-open-connections can't be negative") } @@ -641,17 +590,11 @@ func (cfg *RPCConfig) ValidateBasic() error { if cfg.MaxSubscriptionsPerClient < 0 { return errors.New("max-subscriptions-per-client can't be negative") } - if cfg.SubscriptionBufferSize < minSubscriptionBufferSize { - return fmt.Errorf( - "experimental-subscription-buffer-size must be >= %d", - minSubscriptionBufferSize, - ) + if cfg.EventLogWindowSize < 0 { + return errors.New("event-log-window-size must not be negative") } - if cfg.WebSocketWriteBufferSize < cfg.SubscriptionBufferSize { - return fmt.Errorf( - "experimental-websocket-write-buffer-size must be >= experimental-subscription-buffer-size (%d)", - cfg.SubscriptionBufferSize, - ) + if cfg.EventLogMaxItems < 0 { + return errors.New("event-log-max-items must not be negative") } if cfg.TimeoutBroadcastTxCommit < 0 { return errors.New("timeout-broadcast-tx-commit can't be negative") @@ -723,25 +666,6 @@ type P2PConfig struct { //nolint: maligned // UPNP port forwarding UPNP bool `mapstructure:"upnp"` - // Path to address book - AddrBook string `mapstructure:"addr-book-file"` - - // Set true for strict address routability rules - // Set false for private or local networks - AddrBookStrict bool `mapstructure:"addr-book-strict"` - - // Maximum number of inbound peers - // - // TODO: Remove once p2p refactor is complete in favor of MaxConnections. - // ref: https://github.com/tendermint/tendermint/issues/5670 - MaxNumInboundPeers int `mapstructure:"max-num-inbound-peers"` - - // Maximum number of outbound peers to connect to, excluding persistent peers. - // - // TODO: Remove once p2p refactor is complete in favor of MaxConnections. - // ref: https://github.com/tendermint/tendermint/issues/5670 - MaxNumOutboundPeers int `mapstructure:"max-num-outbound-peers"` - // MaxConnections defines the maximum number of connected peers (inbound and // outbound). MaxConnections uint16 `mapstructure:"max-connections"` @@ -750,11 +674,15 @@ type P2PConfig struct { //nolint: maligned // attempts per IP address. MaxIncomingConnectionAttempts uint `mapstructure:"max-incoming-connection-attempts"` - // List of node IDs, to which a connection will be (re)established ignoring any existing limits - UnconditionalPeerIDs string `mapstructure:"unconditional-peer-ids"` + // Set true to enable the peer-exchange reactor + PexReactor bool `mapstructure:"pex"` + + // Comma separated list of peer IDs to keep private (will not be gossiped to + // other peers) + PrivatePeerIDs string `mapstructure:"private-peer-ids"` - // Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) - PersistentPeersMaxDialPeriod time.Duration `mapstructure:"persistent-peers-max-dial-period"` + // Toggle to disable guard against peers connecting from the same ip. + AllowDuplicateIP bool `mapstructure:"allow-duplicate-ip"` // Time to wait before flushing messages out on the connection FlushThrottleTimeout time.Duration `mapstructure:"flush-throttle-timeout"` @@ -768,16 +696,6 @@ type P2PConfig struct { //nolint: maligned // Rate at which packets can be received, in bytes/second RecvRate int64 `mapstructure:"recv-rate"` - // Set true to enable the peer-exchange reactor - PexReactor bool `mapstructure:"pex"` - - // Comma separated list of peer IDs to keep private (will not be gossiped to - // other peers) - PrivatePeerIDs string `mapstructure:"private-peer-ids"` - - // Toggle to disable guard against peers connecting from the same ip. - AllowDuplicateIP bool `mapstructure:"allow-duplicate-ip"` - // Peer connection configuration. HandshakeTimeout time.Duration `mapstructure:"handshake-timeout"` DialTimeout time.Duration `mapstructure:"dial-timeout"` @@ -786,13 +704,8 @@ type P2PConfig struct { //nolint: maligned // Force dial to fail TestDialFail bool `mapstructure:"test-dial-fail"` - // UseLegacy enables the "legacy" P2P implementation and - // disables the newer default implementation. This flag will - // be removed in a future release. - UseLegacy bool `mapstructure:"use-legacy"` - // Makes it possible to configure which queue backend the p2p - // layer uses. Options are: "fifo", "priority" and "wdrr", + // layer uses. Options are: "fifo" and "priority", // with the default being "priority". QueueType string `mapstructure:"queue-type"` } @@ -803,13 +716,8 @@ func DefaultP2PConfig() *P2PConfig { ListenAddress: "tcp://0.0.0.0:26656", ExternalAddress: "", UPNP: false, - AddrBook: defaultAddrBookPath, - AddrBookStrict: true, - MaxNumInboundPeers: 40, - MaxNumOutboundPeers: 10, MaxConnections: 64, MaxIncomingConnectionAttempts: 100, - PersistentPeersMaxDialPeriod: 0 * time.Second, FlushThrottleTimeout: 100 * time.Millisecond, // The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes. // The IP header and the TCP header take up 20 bytes each at least (unless @@ -825,39 +733,15 @@ func DefaultP2PConfig() *P2PConfig { DialTimeout: 3 * time.Second, TestDialFail: false, QueueType: "priority", - UseLegacy: false, } } -// TestP2PConfig returns a configuration for testing the peer-to-peer layer -func TestP2PConfig() *P2PConfig { - cfg := DefaultP2PConfig() - cfg.ListenAddress = "tcp://127.0.0.1:36656" - cfg.FlushThrottleTimeout = 10 * time.Millisecond - cfg.AllowDuplicateIP = true - return cfg -} - -// AddrBookFile returns the full path to the address book -func (cfg *P2PConfig) AddrBookFile() string { - return rootify(cfg.AddrBook, cfg.RootDir) -} - // ValidateBasic performs basic validation (checking param bounds, etc.) and // returns an error if any check fails. func (cfg *P2PConfig) ValidateBasic() error { - if cfg.MaxNumInboundPeers < 0 { - return errors.New("max-num-inbound-peers can't be negative") - } - if cfg.MaxNumOutboundPeers < 0 { - return errors.New("max-num-outbound-peers can't be negative") - } if cfg.FlushThrottleTimeout < 0 { return errors.New("flush-throttle-timeout can't be negative") } - if cfg.PersistentPeersMaxDialPeriod < 0 { - return errors.New("persistent-peers-max-dial-period can't be negative") - } if cfg.MaxPacketMsgPayloadSize < 0 { return errors.New("max-packet-msg-payload-size can't be negative") } @@ -870,12 +754,20 @@ func (cfg *P2PConfig) ValidateBasic() error { return nil } +// TestP2PConfig returns a configuration for testing the peer-to-peer layer +func TestP2PConfig() *P2PConfig { + cfg := DefaultP2PConfig() + cfg.ListenAddress = "tcp://127.0.0.1:36656" + cfg.AllowDuplicateIP = true + cfg.FlushThrottleTimeout = 10 * time.Millisecond + return cfg +} + //----------------------------------------------------------------------------- // MempoolConfig // MempoolConfig defines the configuration options for the Tendermint mempool. type MempoolConfig struct { - Version string `mapstructure:"version"` RootDir string `mapstructure:"home"` Recheck bool `mapstructure:"recheck"` Broadcast bool `mapstructure:"broadcast"` @@ -925,7 +817,6 @@ type MempoolConfig struct { // DefaultMempoolConfig returns a default configuration for the Tendermint mempool. func DefaultMempoolConfig() *MempoolConfig { return &MempoolConfig{ - Version: MempoolV1, Recheck: true, Broadcast: true, // Each signature verification takes .5ms, Size reduced until we implement @@ -1092,42 +983,6 @@ func (cfg *StateSyncConfig) ValidateBasic() error { return nil } -//----------------------------------------------------------------------------- - -// BlockSyncConfig (formerly known as FastSync) defines the configuration for the Tendermint block sync service -// If this node is many blocks behind the tip of the chain, BlockSync -// allows them to catchup quickly by downloading blocks in parallel -// and verifying their commits. -type BlockSyncConfig struct { - Enable bool `mapstructure:"enable"` - Version string `mapstructure:"version"` -} - -// DefaultBlockSyncConfig returns a default configuration for the block sync service -func DefaultBlockSyncConfig() *BlockSyncConfig { - return &BlockSyncConfig{ - Enable: true, - Version: BlockSyncV0, - } -} - -// TestBlockSyncConfig returns a default configuration for the block sync. -func TestBlockSyncConfig() *BlockSyncConfig { - return DefaultBlockSyncConfig() -} - -// ValidateBasic performs basic validation. -func (cfg *BlockSyncConfig) ValidateBasic() error { - switch cfg.Version { - case BlockSyncV0: - return nil - case BlockSyncV2: - return errors.New("blocksync version v2 is no longer supported. Please use v0") - default: - return fmt.Errorf("unknown blocksync version %s", cfg.Version) - } -} - //----------------------------------------------------------------------------- // ConsensusConfig @@ -1138,42 +993,22 @@ type ConsensusConfig struct { WalPath string `mapstructure:"wal-file"` walFile string // overrides WalPath if set - // TODO: remove timeout configs, these should be global not local - // How long we wait for a proposal block before prevoting nil - TimeoutPropose time.Duration `mapstructure:"timeout-propose"` - // How much timeout-propose increases with each round - TimeoutProposeDelta time.Duration `mapstructure:"timeout-propose-delta"` - // How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) - TimeoutPrevote time.Duration `mapstructure:"timeout-prevote"` - // How much the timeout-prevote increases with each round - TimeoutPrevoteDelta time.Duration `mapstructure:"timeout-prevote-delta"` - // How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) - TimeoutPrecommit time.Duration `mapstructure:"timeout-precommit"` - // How much the timeout-precommit increases with each round - TimeoutPrecommitDelta time.Duration `mapstructure:"timeout-precommit-delta"` - // How long we wait after committing a block, before starting on the new - // height (this gives us a chance to receive some more precommits, even - // though we already have +2/3). - TimeoutCommit time.Duration `mapstructure:"timeout-commit"` + // EmptyBlocks mode and possible interval between empty blocks + CreateEmptyBlocks bool `mapstructure:"create-empty-blocks"` + CreateEmptyBlocksInterval time.Duration `mapstructure:"create-empty-blocks-interval"` + // CreateProofBlockRange determines how many past blocks are inspected in order to determine if we need to create + // additional proof block. + CreateProofBlockRange int64 `mapstructure:"create-proof-block-range"` // The proposed block time window is doubling of the value in twice // that means for 10 sec the window will be 20 sec, 10 sec before NOW and 10 sec after // this value is used to validate a block time ProposedBlockTimeWindow time.Duration `mapstructure:"proposed-block-time-window"` - // Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) - SkipTimeoutCommit bool `mapstructure:"skip-timeout-commit"` // Don't propose a block if the node is set to the proposer, the block proposal instead // has to be manual (useful for tests) DontAutoPropose bool `mapstructure:"dont-auto-propose'"` - // EmptyBlocks mode and possible interval between empty blocks - CreateEmptyBlocks bool `mapstructure:"create-empty-blocks"` - CreateEmptyBlocksInterval time.Duration `mapstructure:"create-empty-blocks-interval"` - // CreateProofBlockRange determines how many past blocks are inspected in order to determine if we need to create - // additional proof block. - CreateProofBlockRange int64 `mapstructure:"create-proof-block-range"` - // Reactor sleep duration parameters PeerGossipSleepDuration time.Duration `mapstructure:"peer-gossip-sleep-duration"` PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer-query-maj23-sleep-duration"` @@ -1183,22 +1018,59 @@ type ConsensusConfig struct { QuorumType btcjson.LLMQType `mapstructure:"quorum-type"` AppHashSize int `mapstructure:"app-hash-size"` + + // TODO: The following fields are all temporary overrides that should exist only + // for the duration of the v0.36 release. The below fields should be completely + // removed in the v0.37 release of Tendermint. + // See: https://github.com/tendermint/tendermint/issues/8188 + + // UnsafeProposeTimeoutOverride provides an unsafe override of the Propose + // timeout consensus parameter. It configures how long the consensus engine + // will wait to receive a proposal block before prevoting nil. + UnsafeProposeTimeoutOverride time.Duration `mapstructure:"unsafe-propose-timeout-override"` + // UnsafeProposeTimeoutDeltaOverride provides an unsafe override of the + // ProposeDelta timeout consensus parameter. It configures how much the + // propose timeout increases with each round. + UnsafeProposeTimeoutDeltaOverride time.Duration `mapstructure:"unsafe-propose-timeout-delta-override"` + // UnsafeVoteTimeoutOverride provides an unsafe override of the Vote timeout + // consensus parameter. It configures how long the consensus engine will wait + // to gather additional votes after receiving +2/3 votes in a round. + UnsafeVoteTimeoutOverride time.Duration `mapstructure:"unsafe-vote-timeout-override"` + // UnsafeVoteTimeoutDeltaOverride provides an unsafe override of the VoteDelta + // timeout consensus parameter. It configures how much the vote timeout + // increases with each round. + UnsafeVoteTimeoutDeltaOverride time.Duration `mapstructure:"unsafe-vote-timeout-delta-override"` + // UnsafeCommitTimeoutOverride provides an unsafe override of the Commit timeout + // consensus parameter. It configures how long the consensus engine will wait + // after receiving +2/3 precommits before beginning the next height. + UnsafeCommitTimeoutOverride time.Duration `mapstructure:"unsafe-commit-timeout-override"` + + // UnsafeBypassCommitTimeoutOverride provides an unsafe override of the + // BypassCommitTimeout consensus parameter. It configures if the consensus + // engine will wait for the full Commit timeout before proceeding to the next height. + // If it is set to true, the consensus engine will proceed to the next height + // as soon as the node has gathered votes from all of the validators on the network. + UnsafeBypassCommitTimeoutOverride *bool `mapstructure:"unsafe-bypass-commit-timeout-override"` + + // Deprecated timeout parameters. These parameters are present in this struct + // so that they can be parsed so that validation can check if they have erroneously + // been included and provide a helpful error message. + // These fields should be completely removed in v0.37. + // See: https://github.com/tendermint/tendermint/issues/8188 + DeprecatedTimeoutPropose *interface{} `mapstructure:"timeout-propose"` + DeprecatedTimeoutProposeDelta *interface{} `mapstructure:"timeout-propose-delta"` + DeprecatedTimeoutPrevote *interface{} `mapstructure:"timeout-prevote"` + DeprecatedTimeoutPrevoteDelta *interface{} `mapstructure:"timeout-prevote-delta"` + DeprecatedTimeoutPrecommit *interface{} `mapstructure:"timeout-precommit"` + DeprecatedTimeoutPrecommitDelta *interface{} `mapstructure:"timeout-precommit-delta"` + DeprecatedTimeoutCommit *interface{} `mapstructure:"timeout-commit"` + DeprecatedSkipTimeoutCommit *interface{} `mapstructure:"skip-timeout-commit"` } // DefaultConsensusConfig returns a default configuration for the consensus service func DefaultConsensusConfig() *ConsensusConfig { return &ConsensusConfig{ WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"), - TimeoutPropose: 3000 * time.Millisecond, - TimeoutProposeDelta: 500 * time.Millisecond, - TimeoutPrevote: 1000 * time.Millisecond, - TimeoutPrevoteDelta: 500 * time.Millisecond, - TimeoutPrecommit: 1000 * time.Millisecond, - TimeoutPrecommitDelta: 500 * time.Millisecond, - TimeoutCommit: 1000 * time.Millisecond, - ProposedBlockTimeWindow: 10 * time.Second, - SkipTimeoutCommit: false, - DontAutoPropose: false, CreateEmptyBlocks: true, CreateEmptyBlocksInterval: 0 * time.Second, CreateProofBlockRange: 1, @@ -1207,21 +1079,14 @@ func DefaultConsensusConfig() *ConsensusConfig { DoubleSignCheckHeight: int64(0), AppHashSize: crypto.DefaultAppHashSize, QuorumType: btcjson.LLMQType_5_60, + ProposedBlockTimeWindow: 10 * time.Second, + DontAutoPropose: false, } } // TestConsensusConfig returns a configuration for testing the consensus service func TestConsensusConfig() *ConsensusConfig { cfg := DefaultConsensusConfig() - cfg.TimeoutPropose = 80 * time.Millisecond - cfg.TimeoutProposeDelta = 5 * time.Millisecond - cfg.TimeoutPrevote = 50 * time.Millisecond - cfg.TimeoutPrevoteDelta = 5 * time.Millisecond - cfg.TimeoutPrecommit = 50 * time.Millisecond - cfg.TimeoutPrecommitDelta = 5 * time.Millisecond - // NOTE: when modifying, make sure to update time_iota_ms (testGenesisFmt) in toml.go - cfg.TimeoutCommit = 10 * time.Millisecond - cfg.SkipTimeoutCommit = true cfg.PeerGossipSleepDuration = 5 * time.Millisecond cfg.PeerQueryMaj23SleepDuration = 250 * time.Millisecond cfg.DoubleSignCheckHeight = int64(0) @@ -1235,33 +1100,6 @@ func (cfg *ConsensusConfig) WaitForTxs() bool { return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0 } -// Propose returns the amount of time to wait for a proposal -func (cfg *ConsensusConfig) Propose(round int32) time.Duration { - return time.Duration( - cfg.TimeoutPropose.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round), - ) * time.Nanosecond -} - -// Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes -func (cfg *ConsensusConfig) Prevote(round int32) time.Duration { - return time.Duration( - cfg.TimeoutPrevote.Nanoseconds()+cfg.TimeoutPrevoteDelta.Nanoseconds()*int64(round), - ) * time.Nanosecond -} - -// Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits -func (cfg *ConsensusConfig) Precommit(round int32) time.Duration { - return time.Duration( - cfg.TimeoutPrecommit.Nanoseconds()+cfg.TimeoutPrecommitDelta.Nanoseconds()*int64(round), - ) * time.Nanosecond -} - -// Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits -// for a single block (ie. a commit). -func (cfg *ConsensusConfig) Commit(t time.Time) time.Time { - return t.Add(cfg.TimeoutCommit) -} - // WalFile returns the full path to the write-ahead log file func (cfg *ConsensusConfig) WalFile() string { if cfg.walFile != "" { @@ -1278,26 +1116,20 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) { // ValidateBasic performs basic validation (checking param bounds, etc.) and // returns an error if any check fails. func (cfg *ConsensusConfig) ValidateBasic() error { - if cfg.TimeoutPropose < 0 { - return errors.New("timeout-propose can't be negative") + if cfg.UnsafeProposeTimeoutOverride < 0 { + return errors.New("unsafe-propose-timeout-override can't be negative") } - if cfg.TimeoutProposeDelta < 0 { - return errors.New("timeout-propose-delta can't be negative") + if cfg.UnsafeProposeTimeoutDeltaOverride < 0 { + return errors.New("unsafe-propose-timeout-delta-override can't be negative") } - if cfg.TimeoutPrevote < 0 { - return errors.New("timeout-prevote can't be negative") + if cfg.UnsafeVoteTimeoutOverride < 0 { + return errors.New("unsafe-vote-timeout-override can't be negative") } - if cfg.TimeoutPrevoteDelta < 0 { - return errors.New("timeout-prevote-delta can't be negative") + if cfg.UnsafeVoteTimeoutDeltaOverride < 0 { + return errors.New("unsafe-vote-timeout-delta-override can't be negative") } - if cfg.TimeoutPrecommit < 0 { - return errors.New("timeout-precommit can't be negative") - } - if cfg.TimeoutPrecommitDelta < 0 { - return errors.New("timeout-precommit-delta can't be negative") - } - if cfg.TimeoutCommit < 0 { - return errors.New("timeout-commit can't be negative") + if cfg.UnsafeCommitTimeoutOverride < 0 { + return errors.New("unsafe-commit-timeout-override can't be negative") } if cfg.ProposedBlockTimeWindow < 0 { return errors.New("proposed-block-time can't be negative") @@ -1320,6 +1152,41 @@ func (cfg *ConsensusConfig) ValidateBasic() error { return nil } +func (cfg *ConsensusConfig) DeprecatedFieldWarning() error { + var fields []string + if cfg.DeprecatedSkipTimeoutCommit != nil { + fields = append(fields, "skip-timeout-commit") + } + if cfg.DeprecatedTimeoutPropose != nil { + fields = append(fields, "timeout-propose") + } + if cfg.DeprecatedTimeoutProposeDelta != nil { + fields = append(fields, "timeout-propose-delta") + } + if cfg.DeprecatedTimeoutPrevote != nil { + fields = append(fields, "timeout-prevote") + } + if cfg.DeprecatedTimeoutPrevoteDelta != nil { + fields = append(fields, "timeout-prevote-delta") + } + if cfg.DeprecatedTimeoutPrecommit != nil { + fields = append(fields, "timeout-precommit") + } + if cfg.DeprecatedTimeoutPrecommitDelta != nil { + fields = append(fields, "timeout-precommit-delta") + } + if cfg.DeprecatedTimeoutCommit != nil { + fields = append(fields, "timeout-commit") + } + if len(fields) != 0 { + return fmt.Errorf("the following deprecated fields were set in the "+ + "configuration file: %s. These fields were removed in v0.36. Timeout "+ + "configuration has been moved to the ConsensusParams. For more information see "+ + "https://tinyurl.com/adr074", strings.Join(fields, ", ")) + } + return nil +} + //----------------------------------------------------------------------------- // TxIndexConfig // Remember that Event has the following structure: @@ -1336,9 +1203,8 @@ type TxIndexConfig struct { // If list contains `null`, meaning no indexer service will be used. // // Options: - // 1) "null" - no indexer services. - // 2) "kv" (default) - the simplest possible indexer, - // backed by key-value storage (defaults to levelDB; see DBBackend). + // 1) "null" (default) - no indexer services. + // 2) "kv" - a simple indexer backed by key-value storage (see DBBackend) // 3) "psql" - the indexer services backed by PostgreSQL. Indexer []string `mapstructure:"indexer"` @@ -1349,14 +1215,12 @@ type TxIndexConfig struct { // DefaultTxIndexConfig returns a default configuration for the transaction indexer. func DefaultTxIndexConfig() *TxIndexConfig { - return &TxIndexConfig{ - Indexer: []string{"kv"}, - } + return &TxIndexConfig{Indexer: []string{"null"}} } // TestTxIndexConfig returns a default configuration for the transaction indexer. func TestTxIndexConfig() *TxIndexConfig { - return DefaultTxIndexConfig() + return &TxIndexConfig{Indexer: []string{"kv"}} } //----------------------------------------------------------------------------- diff --git a/config/config_test.go b/config/config_test.go index aa536dd61f..a86ab84636 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -10,46 +10,43 @@ import ( ) func TestDefaultConfig(t *testing.T) { - assert := assert.New(t) - // set up some defaults cfg := DefaultConfig() - assert.NotNil(cfg.P2P) - assert.NotNil(cfg.Mempool) - assert.NotNil(cfg.Consensus) + assert.NotNil(t, cfg.P2P) + assert.NotNil(t, cfg.Mempool) + assert.NotNil(t, cfg.Consensus) // check the root dir stuff... cfg.SetRoot("/foo") cfg.Genesis = "bar" cfg.DBPath = "/opt/data" - assert.Equal("/foo/bar", cfg.GenesisFile()) - assert.Equal("/opt/data", cfg.DBDir()) + assert.Equal(t, "/foo/bar", cfg.GenesisFile()) + assert.Equal(t, "/opt/data", cfg.DBDir()) } func TestConfigValidateBasic(t *testing.T) { cfg := DefaultConfig() assert.NoError(t, cfg.ValidateBasic()) - // tamper with timeout_propose - cfg.Consensus.TimeoutPropose = -10 * time.Second + // tamper with unsafe-propose-timeout-override + cfg.Consensus.UnsafeProposeTimeoutOverride = -10 * time.Second assert.Error(t, cfg.ValidateBasic()) } func TestTLSConfiguration(t *testing.T) { - assert := assert.New(t) cfg := DefaultConfig() cfg.SetRoot("/home/user") cfg.RPC.TLSCertFile = "file.crt" - assert.Equal("/home/user/config/file.crt", cfg.RPC.CertFile()) + assert.Equal(t, "/home/user/config/file.crt", cfg.RPC.CertFile()) cfg.RPC.TLSKeyFile = "file.key" - assert.Equal("/home/user/config/file.key", cfg.RPC.KeyFile()) + assert.Equal(t, "/home/user/config/file.key", cfg.RPC.KeyFile()) cfg.RPC.TLSCertFile = "/abs/path/to/file.crt" - assert.Equal("/abs/path/to/file.crt", cfg.RPC.CertFile()) + assert.Equal(t, "/abs/path/to/file.crt", cfg.RPC.CertFile()) cfg.RPC.TLSKeyFile = "/abs/path/to/file.key" - assert.Equal("/abs/path/to/file.key", cfg.RPC.KeyFile()) + assert.Equal(t, "/abs/path/to/file.key", cfg.RPC.KeyFile()) } func TestBaseConfigValidateBasic(t *testing.T) { @@ -66,7 +63,6 @@ func TestRPCConfigValidateBasic(t *testing.T) { assert.NoError(t, cfg.ValidateBasic()) fieldsToTest := []string{ - "GRPCMaxOpenConnections", "MaxOpenConnections", "MaxSubscriptionClients", "MaxSubscriptionsPerClient", @@ -82,26 +78,6 @@ func TestRPCConfigValidateBasic(t *testing.T) { } } -func TestP2PConfigValidateBasic(t *testing.T) { - cfg := TestP2PConfig() - assert.NoError(t, cfg.ValidateBasic()) - - fieldsToTest := []string{ - "MaxNumInboundPeers", - "MaxNumOutboundPeers", - "FlushThrottleTimeout", - "MaxPacketMsgPayloadSize", - "SendRate", - "RecvRate", - } - - for _, fieldName := range fieldsToTest { - reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) - assert.Error(t, cfg.ValidateBasic()) - reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) - } -} - func TestMempoolConfigValidateBasic(t *testing.T) { cfg := TestMempoolConfig() assert.NoError(t, cfg.ValidateBasic()) @@ -125,42 +101,26 @@ func TestStateSyncConfigValidateBasic(t *testing.T) { require.NoError(t, cfg.ValidateBasic()) } -func TestBlockSyncConfigValidateBasic(t *testing.T) { - cfg := TestBlockSyncConfig() - assert.NoError(t, cfg.ValidateBasic()) - - // tamper with version - cfg.Version = "v2" - assert.Error(t, cfg.ValidateBasic()) - - cfg.Version = "invalid" - assert.Error(t, cfg.ValidateBasic()) -} - func TestConsensusConfig_ValidateBasic(t *testing.T) { testcases := map[string]struct { modify func(*ConsensusConfig) expectErr bool }{ - "TimeoutPropose": {func(c *ConsensusConfig) { c.TimeoutPropose = time.Second }, false}, - "TimeoutPropose negative": {func(c *ConsensusConfig) { c.TimeoutPropose = -1 }, true}, - "TimeoutProposeDelta": {func(c *ConsensusConfig) { c.TimeoutProposeDelta = time.Second }, false}, - "TimeoutProposeDelta negative": {func(c *ConsensusConfig) { c.TimeoutProposeDelta = -1 }, true}, - "TimeoutPrevote": {func(c *ConsensusConfig) { c.TimeoutPrevote = time.Second }, false}, - "TimeoutPrevote negative": {func(c *ConsensusConfig) { c.TimeoutPrevote = -1 }, true}, - "TimeoutPrevoteDelta": {func(c *ConsensusConfig) { c.TimeoutPrevoteDelta = time.Second }, false}, - "TimeoutPrevoteDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrevoteDelta = -1 }, true}, - "TimeoutPrecommit": {func(c *ConsensusConfig) { c.TimeoutPrecommit = time.Second }, false}, - "TimeoutPrecommit negative": {func(c *ConsensusConfig) { c.TimeoutPrecommit = -1 }, true}, - "TimeoutPrecommitDelta": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = time.Second }, false}, - "TimeoutPrecommitDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = -1 }, true}, - "TimeoutCommit": {func(c *ConsensusConfig) { c.TimeoutCommit = time.Second }, false}, - "TimeoutCommit negative": {func(c *ConsensusConfig) { c.TimeoutCommit = -1 }, true}, - "PeerGossipSleepDuration": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = time.Second }, false}, - "PeerGossipSleepDuration negative": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = -1 }, true}, - "PeerQueryMaj23SleepDuration": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = time.Second }, false}, - "PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true}, - "DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true}, + "UnsafeProposeTimeoutOverride": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutOverride = time.Second }, false}, + "UnsafeProposeTimeoutOverride negative": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutOverride = -1 }, true}, + "UnsafeProposeTimeoutDeltaOverride": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutDeltaOverride = time.Second }, false}, + "UnsafeProposeTimeoutDeltaOverride negative": {func(c *ConsensusConfig) { c.UnsafeProposeTimeoutDeltaOverride = -1 }, true}, + "UnsafePrevoteTimeoutOverride": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutOverride = time.Second }, false}, + "UnsafePrevoteTimeoutOverride negative": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutOverride = -1 }, true}, + "UnsafePrevoteTimeoutDeltaOverride": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutDeltaOverride = time.Second }, false}, + "UnsafePrevoteTimeoutDeltaOverride negative": {func(c *ConsensusConfig) { c.UnsafeVoteTimeoutDeltaOverride = -1 }, true}, + "UnsafeCommitTimeoutOverride": {func(c *ConsensusConfig) { c.UnsafeCommitTimeoutOverride = time.Second }, false}, + "UnsafeCommitTimeoutOverride negative": {func(c *ConsensusConfig) { c.UnsafeCommitTimeoutOverride = -1 }, true}, + "PeerGossipSleepDuration": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = time.Second }, false}, + "PeerGossipSleepDuration negative": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = -1 }, true}, + "PeerQueryMaj23SleepDuration": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = time.Second }, false}, + "PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true}, + "DoubleSignCheckHeight negative": {func(c *ConsensusConfig) { c.DoubleSignCheckHeight = -1 }, true}, } for desc, tc := range testcases { tc := tc // appease linter @@ -186,3 +146,21 @@ func TestInstrumentationConfigValidateBasic(t *testing.T) { cfg.MaxOpenConnections = -1 assert.Error(t, cfg.ValidateBasic()) } + +func TestP2PConfigValidateBasic(t *testing.T) { + cfg := TestP2PConfig() + assert.NoError(t, cfg.ValidateBasic()) + + fieldsToTest := []string{ + "FlushThrottleTimeout", + "MaxPacketMsgPayloadSize", + "SendRate", + "RecvRate", + } + + for _, fieldName := range fieldsToTest { + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) + assert.Error(t, cfg.ValidateBasic()) + reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) + } +} diff --git a/config/db.go b/config/db.go index 8f489a87aa..f508354e07 100644 --- a/config/db.go +++ b/config/db.go @@ -1,6 +1,8 @@ package config import ( + "context" + dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/libs/log" @@ -8,7 +10,7 @@ import ( ) // ServiceProvider takes a config and a logger and returns a ready to go Node. -type ServiceProvider func(*Config, log.Logger) (service.Service, error) +type ServiceProvider func(context.Context, *Config, log.Logger) (service.Service, error) // DBContext specifies config information for loading a new DB. type DBContext struct { diff --git a/config/toml.go b/config/toml.go index d5b432a7c6..ee5df22f6a 100644 --- a/config/toml.go +++ b/config/toml.go @@ -3,17 +3,17 @@ package config import ( "bytes" "fmt" - "io/ioutil" "os" "path/filepath" "strings" "text/template" tmos "github.com/tendermint/tendermint/libs/os" + tmrand "github.com/tendermint/tendermint/libs/rand" ) -// DefaultDirPerm is the default permissions used when creating directories. -const DefaultDirPerm = 0700 +// defaultDirPerm is the default permissions used when creating directories. +const defaultDirPerm = 0700 var configTemplate *template.Template @@ -32,13 +32,13 @@ func init() { // EnsureRoot creates the root, config, and data directories if they don't exist, // and panics if it fails. func EnsureRoot(rootDir string) { - if err := tmos.EnsureDir(rootDir, DefaultDirPerm); err != nil { + if err := tmos.EnsureDir(rootDir, defaultDirPerm); err != nil { panic(err.Error()) } - if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil { + if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), defaultDirPerm); err != nil { panic(err.Error()) } - if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil { + if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), defaultDirPerm); err != nil { panic(err.Error()) } } @@ -209,26 +209,10 @@ cors-allowed-methods = [{{ range .RPC.CORSAllowedMethods }}{{ printf "%q, " . }} # A list of non simple headers the client is allowed to use with cross-domain requests cors-allowed-headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}{{end}}] -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. -grpc-laddr = "{{ .RPC.GRPCListenAddress }}" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max-open-connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. -grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }} - # Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool unsafe = {{ .RPC.Unsafe }} # Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc-max-open-connections # If you want to accept a larger number than the default, make sure # you increase your OS limits. # 0 - unlimited. @@ -242,36 +226,36 @@ max-open-connections = {{ .RPC.MaxOpenConnections }} max-subscription-clients = {{ .RPC.MaxSubscriptionClients }} # Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. +# If you're using a Local RPC client and /broadcast_tx_commit, set this +# to the estimated maximum number of broadcast_tx_commit calls per block. max-subscriptions-per-client = {{ .RPC.MaxSubscriptionsPerClient }} -# Experimental parameter to specify the maximum number of events a node will -# buffer, per subscription, before returning an error and closing the -# subscription. Must be set to at least 100, but higher values will accommodate -# higher event throughput rates (and will use more memory). -experimental-subscription-buffer-size = {{ .RPC.SubscriptionBufferSize }} - -# Experimental parameter to specify the maximum number of RPC responses that -# can be buffered per WebSocket client. If clients cannot read from the -# WebSocket endpoint fast enough, they will be disconnected, so increasing this -# parameter may reduce the chances of them being disconnected (but will cause -# the node to use more memory). +# If true, disable the websocket interface to the RPC service. This has +# the effect of disabling the /subscribe, /unsubscribe, and /unsubscribe_all +# methods for event subscription. # -# Must be at least the same as "experimental-subscription-buffer-size", -# otherwise connections could be dropped unnecessarily. This value should -# ideally be somewhat higher than "experimental-subscription-buffer-size" to -# accommodate non-subscription-related RPC responses. -experimental-websocket-write-buffer-size = {{ .RPC.WebSocketWriteBufferSize }} - -# If a WebSocket client cannot read fast enough, at present we may -# silently drop events instead of generating an error or disconnecting the -# client. +# EXPERIMENTAL: This setting will be removed in Tendermint v0.37. +experimental-disable-websocket = {{ .RPC.ExperimentalDisableWebsocket }} + +# The time window size for the event log. All events up to this long before +# the latest (up to EventLogMaxItems) will be available for subscribers to +# fetch via the /events method. If 0 (the default) the event log and the +# /events RPC method are disabled. +event-log-window-size = "{{ .RPC.EventLogWindowSize }}" + +# The maxiumum number of events that may be retained by the event log. If +# this value is 0, no upper limit is set. Otherwise, items in excess of +# this number will be discarded from the event log. # -# Enabling this experimental parameter will cause the WebSocket connection to -# be closed instead if it cannot read fast enough, allowing for greater -# predictability in subscription behavior. -experimental-close-on-slow-client = {{ .RPC.CloseOnSlowClient }} +# Warning: This setting is a safety valve. Setting it too low may cause +# subscribers to miss events. Try to choose a value higher than the +# maximum worst-case expected event load within the chosen window size in +# ordinary operation. +# +# For example, if the window size is 10 minutes and the node typically +# averages 1000 events per ten minutes, but with occasional known spikes of +# up to 2000, choose a value > 2000. +event-log-max-items = {{ .RPC.EventLogMaxItems }} # How long to wait for a tx to be committed during /broadcast_tx_commit. # WARNING: Using a value larger than 10s will result in increasing the @@ -308,9 +292,6 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}" ####################################################### [p2p] -# Enable the legacy p2p layer. -use-legacy = {{ .P2P.UseLegacy }} - # Select the p2p internal queue queue-type = "{{ .P2P.QueueType }}" @@ -342,86 +323,48 @@ persistent-peers = "{{ .P2P.PersistentPeers }}" # UPNP port forwarding upnp = {{ .P2P.UPNP }} -# Path to address book -# TODO: Remove once p2p refactor is complete in favor of peer store. -addr-book-file = "{{ js .P2P.AddrBook }}" - -# Set true for strict address routability rules -# Set false for private or local networks -addr-book-strict = {{ .P2P.AddrBookStrict }} - -# Maximum number of inbound peers -# -# TODO: Remove once p2p refactor is complete in favor of MaxConnections. -# ref: https://github.com/tendermint/tendermint/issues/5670 -max-num-inbound-peers = {{ .P2P.MaxNumInboundPeers }} - -# Maximum number of outbound peers to connect to, excluding persistent peers -# -# TODO: Remove once p2p refactor is complete in favor of MaxConnections. -# ref: https://github.com/tendermint/tendermint/issues/5670 -max-num-outbound-peers = {{ .P2P.MaxNumOutboundPeers }} - # Maximum number of connections (inbound and outbound). max-connections = {{ .P2P.MaxConnections }} # Rate limits the number of incoming connection attempts per IP address. max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }} -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -# TODO: Remove once p2p refactor is complete. -# ref: https://github.com/tendermint/tendermint/issues/5670 -unconditional-peer-ids = "{{ .P2P.UnconditionalPeerIDs }}" +# Set true to enable the peer-exchange reactor +pex = {{ .P2P.PexReactor }} -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 -persistent-peers-max-dial-period = "{{ .P2P.PersistentPeersMaxDialPeriod }}" +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +# Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055 +private-peer-ids = "{{ .P2P.PrivatePeerIDs }}" + +# Toggle to disable guard against peers connecting from the same ip. +allow-duplicate-ip = {{ .P2P.AllowDuplicateIP }} + +# Peer connection configuration. +handshake-timeout = "{{ .P2P.HandshakeTimeout }}" +dial-timeout = "{{ .P2P.DialTimeout }}" # Time to wait before flushing messages out on the connection -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 +# TODO: Remove once MConnConnection is removed. flush-throttle-timeout = "{{ .P2P.FlushThrottleTimeout }}" # Maximum size of a message packet payload, in bytes -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 +# TODO: Remove once MConnConnection is removed. max-packet-msg-payload-size = {{ .P2P.MaxPacketMsgPayloadSize }} # Rate at which packets can be sent, in bytes/second -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 +# TODO: Remove once MConnConnection is removed. send-rate = {{ .P2P.SendRate }} # Rate at which packets can be received, in bytes/second -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 +# TODO: Remove once MConnConnection is removed. recv-rate = {{ .P2P.RecvRate }} -# Set true to enable the peer-exchange reactor -pex = {{ .P2P.PexReactor }} - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -# Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055 -private-peer-ids = "{{ .P2P.PrivatePeerIDs }}" - -# Toggle to disable guard against peers connecting from the same ip. -allow-duplicate-ip = {{ .P2P.AllowDuplicateIP }} - -# Peer connection configuration. -handshake-timeout = "{{ .P2P.HandshakeTimeout }}" -dial-timeout = "{{ .P2P.DialTimeout }}" ####################################################### ### Mempool Configuration Option ### ####################################################### [mempool] -# Mempool version to use: -# 1) "v0" - The legacy non-prioritized mempool reactor. -# 2) "v1" (default) - The prioritized mempool reactor. -version = "{{ .Mempool.Version }}" - recheck = {{ .Mempool.Recheck }} broadcast = {{ .Mempool.Broadcast }} @@ -510,21 +453,6 @@ chunk-request-timeout = "{{ .StateSync.ChunkRequestTimeout }}" # The number of concurrent chunk and block fetchers to run (default: 4). fetchers = "{{ .StateSync.Fetchers }}" -####################################################### -### Block Sync Configuration Connections ### -####################################################### -[blocksync] - -# If this node is many blocks behind the tip of the chain, BlockSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -enable = {{ .BlockSync.Enable }} - -# Block Sync version to use: -# 1) "v0" (default) - the standard Block Sync implementation -# 2) "v2" - DEPRECATED, please use v0 -version = "{{ .BlockSync.Version }}" - ####################################################### ### Consensus Configuration Options ### ####################################################### @@ -532,22 +460,6 @@ version = "{{ .BlockSync.Version }}" wal-file = "{{ js .Consensus.WalPath }}" -# How long we wait for a proposal block before prevoting nil -timeout-propose = "{{ .Consensus.TimeoutPropose }}" -# How much timeout-propose increases with each round -timeout-propose-delta = "{{ .Consensus.TimeoutProposeDelta }}" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout-prevote = "{{ .Consensus.TimeoutPrevote }}" -# How much the timeout-prevote increases with each round -timeout-prevote-delta = "{{ .Consensus.TimeoutPrevoteDelta }}" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout-precommit = "{{ .Consensus.TimeoutPrecommit }}" -# How much the timeout-precommit increases with each round -timeout-precommit-delta = "{{ .Consensus.TimeoutPrecommitDelta }}" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout-commit = "{{ .Consensus.TimeoutCommit }}" # How long is the window for the min proposed block time proposed-block-time-window = "{{ .Consensus.ProposedBlockTimeWindow }}" @@ -557,9 +469,6 @@ proposed-block-time-window = "{{ .Consensus.ProposedBlockTimeWindow }}" # So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. double-sign-check-height = {{ .Consensus.DoubleSignCheckHeight }} -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip-timeout-commit = {{ .Consensus.SkipTimeoutCommit }} - # EmptyBlocks mode and possible interval between empty blocks create-empty-blocks = {{ .Consensus.CreateEmptyBlocks }} create-empty-blocks-interval = "{{ .Consensus.CreateEmptyBlocksInterval }}" @@ -571,6 +480,50 @@ create-proof-block-range = "{{ .Consensus.CreateProofBlockRange }}" peer-gossip-sleep-duration = "{{ .Consensus.PeerGossipSleepDuration }}" peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}" +### Unsafe Timeout Overrides ### + +# These fields provide temporary overrides for the Timeout consensus parameters. +# Use of these parameters is strongly discouraged. Using these parameters may have serious +# liveness implications for the validator and for the chain. +# +# These fields will be removed from the configuration file in the v0.37 release of Tendermint. +# For additional information, see ADR-74: +# https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-074-timeout-params.md + +# This field provides an unsafe override of the Propose timeout consensus parameter. +# This field configures how long the consensus engine will wait for a proposal block before prevoting nil. +# If this field is set to a value greater than 0, it will take effect. +# unsafe-propose-timeout-override = {{ .Consensus.UnsafeProposeTimeoutOverride }} + +# This field provides an unsafe override of the ProposeDelta timeout consensus parameter. +# This field configures how much the propose timeout increases with each round. +# If this field is set to a value greater than 0, it will take effect. +# unsafe-propose-timeout-delta-override = {{ .Consensus.UnsafeProposeTimeoutDeltaOverride }} + +# This field provides an unsafe override of the Vote timeout consensus parameter. +# This field configures how long the consensus engine will wait after +# receiving +2/3 votes in a round. +# If this field is set to a value greater than 0, it will take effect. +# unsafe-vote-timeout-override = {{ .Consensus.UnsafeVoteTimeoutOverride }} + +# This field provides an unsafe override of the VoteDelta timeout consensus parameter. +# This field configures how much the vote timeout increases with each round. +# If this field is set to a value greater than 0, it will take effect. +# unsafe-vote-timeout-delta-override = {{ .Consensus.UnsafeVoteTimeoutDeltaOverride }} + +# This field provides an unsafe override of the Commit timeout consensus parameter. +# This field configures how long the consensus engine will wait after receiving +# +2/3 precommits before beginning the next height. +# If this field is set to a value greater than 0, it will take effect. +# unsafe-commit-timeout-override = {{ .Consensus.UnsafeCommitTimeoutOverride }} + +# This field provides an unsafe override of the BypassCommitTimeout consensus parameter. +# This field configures if the consensus engine will wait for the full Commit timeout +# before proceeding to the next height. +# If this field is set to true, the consensus engine will proceed to the next height +# as soon as the node has gathered votes from all of the validators on the network. +# unsafe-bypass-commit-timeout-override = + # Signing parameters quorum-type = "{{ .Consensus.QuorumType }}" @@ -589,8 +542,8 @@ app-hash-size = "{{ .Consensus.AppHashSize }}" # to decide which txs to index based on configuration set in the application. # # Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# 1) "null" (default) - no indexer services. +# 2) "kv" - a simple indexer backed by key-value storage (see DBBackend) # 3) "psql" - the indexer services backed by PostgreSQL. # When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. indexer = [{{ range $i, $e := .TxIndex.Indexer }}{{if $i}}, {{end}}{{ printf "%q" $e}}{{end}}] @@ -624,21 +577,21 @@ namespace = "{{ .Instrumentation.Namespace }}" /****** these are for test settings ***********/ -func ResetTestRoot(testName string) (*Config, error) { - return ResetTestRootWithChainID(testName, "") +func ResetTestRoot(dir, testName string) (*Config, error) { + return ResetTestRootWithChainID(dir, testName, "") } -func ResetTestRootWithChainID(testName string, chainID string) (*Config, error) { +func ResetTestRootWithChainID(dir, testName string, chainID string) (*Config, error) { // create a unique, concurrency-safe test directory under os.TempDir() - rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName)) + rootDir, err := os.MkdirTemp(dir, fmt.Sprintf("%s-%s_", chainID, testName)) if err != nil { return nil, err } // ensure config and data subdirs are created - if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil { + if err := tmos.EnsureDir(filepath.Join(rootDir, defaultConfigDir), defaultDirPerm); err != nil { return nil, err } - if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil { + if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), defaultDirPerm); err != nil { return nil, err } @@ -670,17 +623,18 @@ func ResetTestRootWithChainID(testName string, chainID string) (*Config, error) } config := TestConfig().SetRoot(rootDir) + config.Instrumentation.Namespace = fmt.Sprintf("%s_%s_%s", testName, chainID, tmrand.Str(16)) return config, nil } func writeFile(filePath string, contents []byte, mode os.FileMode) error { - if err := ioutil.WriteFile(filePath, contents, mode); err != nil { + if err := os.WriteFile(filePath, contents, mode); err != nil { return fmt.Errorf("failed to write file: %w", err) } return nil } -var testGenesisFmt = `{ +const testGenesisFmt = `{ "genesis_time": "2018-10-10T08:20:13.695936996Z", "chain_id": "%s", "initial_height": "1", @@ -691,6 +645,18 @@ var testGenesisFmt = `{ "max_gas": "-1", "time_iota_ms": "10" }, + "synchrony": { + "message_delay": "500000000", + "precision": "10000000" + }, + "timeout": { + "propose": "30000000", + "propose_delta": "50000", + "vote": "30000000", + "vote_delta": "50000", + "commit": "10000000", + "bypass_timeout_commit": true + }, "evidence": { "max_age_num_blocks": "100000", "max_age_duration": "172800000000000", @@ -709,7 +675,7 @@ var testGenesisFmt = `{ "type": "tendermint/PubKeyBLS12381", "value":"F5BjXeh0DppqaxX7a3LzoWr6CXPZcZeba6VHYdbiUCxQ23b00mFD8FRZpCz9Ug1E" }, - "power": "100", + "power": 100, "name": "", "pro_tx_hash": "51BF39CC1F41B9FC63DFA5B1EDF3F0CA3AD5CAFAE4B12B4FE9263B08BB50C45F" } @@ -744,7 +710,7 @@ var testPrivValidatorKey = `{ "pro_tx_hash": "51BF39CC1F41B9FC63DFA5B1EDF3F0CA3AD5CAFAE4B12B4FE9263B08BB50C45F" }` -var testPrivValidatorState = `{ +const testPrivValidatorState = `{ "height": "0", "round": 0, "step": 0 diff --git a/config/toml_test.go b/config/toml_test.go index 26376b72d2..cf27c4484a 100644 --- a/config/toml_test.go +++ b/config/toml_test.go @@ -1,7 +1,6 @@ package config import ( - "io/ioutil" "os" "path/filepath" "strings" @@ -15,26 +14,22 @@ func ensureFiles(t *testing.T, rootDir string, files ...string) { for _, f := range files { p := rootify(rootDir, f) _, err := os.Stat(p) - assert.Nil(t, err, p) + assert.NoError(t, err, p) } } func TestEnsureRoot(t *testing.T) { - require := require.New(t) - // setup temp dir for test - tmpDir, err := ioutil.TempDir("", "config-test") - require.NoError(err) - defer os.RemoveAll(tmpDir) + tmpDir := t.TempDir() // create root dir EnsureRoot(tmpDir) - require.NoError(WriteConfigFile(tmpDir, DefaultConfig())) + require.NoError(t, WriteConfigFile(tmpDir, DefaultConfig())) // make sure config is set properly - data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath)) - require.NoError(err) + data, err := os.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath)) + require.NoError(t, err) checkConfig(t, string(data)) @@ -42,19 +37,17 @@ func TestEnsureRoot(t *testing.T) { } func TestEnsureTestRoot(t *testing.T) { - require := require.New(t) - testName := "ensureTestRoot" // create root dir - cfg, err := ResetTestRoot(testName) - require.NoError(err) + cfg, err := ResetTestRoot(t.TempDir(), testName) + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) rootDir := cfg.RootDir // make sure config is set properly - data, err := ioutil.ReadFile(filepath.Join(rootDir, defaultConfigFilePath)) - require.Nil(err) + data, err := os.ReadFile(filepath.Join(rootDir, defaultConfigFilePath)) + require.NoError(t, err) checkConfig(t, string(data)) @@ -71,7 +64,6 @@ func checkConfig(t *testing.T, configFile string) { "moniker", "seeds", "proxy-app", - "blocksync", "create-empty-blocks", "peer", "timeout", diff --git a/crypto/README.md b/crypto/README.md index 20346d7155..d60628d970 100644 --- a/crypto/README.md +++ b/crypto/README.md @@ -12,7 +12,7 @@ For any specific algorithm, use its specific module e.g. ## Binary encoding -For Binary encoding, please refer to the [Tendermint encoding specification](https://docs.tendermint.com/master/spec/blockchain/encoding.html). +For Binary encoding, please refer to the [Tendermint encoding specification](https://docs.tendermint.com/master/spec/core/encoding.html). ## JSON Encoding diff --git a/crypto/bls12381/bls12381.go b/crypto/bls12381/bls12381.go index e6297bd29a..cb554e8418 100644 --- a/crypto/bls12381/bls12381.go +++ b/crypto/bls12381/bls12381.go @@ -2,6 +2,8 @@ package bls12381 import ( "bytes" + "crypto/rand" + "crypto/sha256" "crypto/subtle" "encoding/hex" "errors" @@ -9,11 +11,10 @@ import ( "io" bls "github.com/dashpay/bls-signatures/go-bindings" + "github.com/tendermint/tendermint/internal/jsontypes" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/tmhash" tmbytes "github.com/tendermint/tendermint/libs/bytes" - tmjson "github.com/tendermint/tendermint/libs/json" ) //------------------------------------- @@ -48,13 +49,16 @@ var ( ) func init() { - tmjson.RegisterType(PubKey{}, PubKeyName) - tmjson.RegisterType(PrivKey{}, PrivKeyName) + jsontypes.MustRegister(PubKey{}) + jsontypes.MustRegister(PrivKey{}) } // PrivKey implements crypto.PrivKey. type PrivKey []byte +// TypeTag satisfies the jsontypes.Tagged interface. +func (PrivKey) TypeTag() string { return PrivKeyName } + // Bytes returns the privkey byte format. func (privKey PrivKey) Bytes() []byte { return privKey @@ -145,7 +149,7 @@ func (privKey PrivKey) TypeValue() crypto.KeyType { // It uses OS randomness in conjunction with the current global random seed // in tendermint/libs/common to generate the private key. func GenPrivKey() PrivKey { - return genPrivKey(crypto.CReader()) + return genPrivKey(rand.Reader) } // genPrivKey generates a new bls12381 private key using the provided reader. @@ -168,8 +172,8 @@ func genPrivKey(rand io.Reader) PrivKey { // NOTE: secret should be the output of a KDF like bcrypt, // if it's derived from user input. func GenPrivKeyFromSecret(secret []byte) PrivKey { - seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes. - privKey, err := bls.PrivateKeyFromSeed(seed) + seed := sha256.Sum256(secret) // Not Ripemd160 because we want 32 bytes. + privKey, err := bls.PrivateKeyFromSeed(seed[:]) if err != nil { panic(err) } @@ -205,7 +209,7 @@ func RecoverThresholdPublicKeyFromPublicKeys(publicKeys []crypto.PubKey, blsIds } for i, blsID := range blsIds { - if len(blsID) != tmhash.Size { + if len(blsID) != crypto.HashSize { return nil, fmt.Errorf("blsID incorrect size in public key recovery, expected 32 bytes (got %d)", len(blsID)) } var hash bls.Hash @@ -241,7 +245,7 @@ func RecoverThresholdSignatureFromShares(sigSharesData [][]byte, blsIds [][]byte } for i, blsID := range blsIds { - if len(blsID) != tmhash.Size { + if len(blsID) != crypto.HashSize { return nil, fmt.Errorf("blsID incorrect size in signature recovery, expected 32 bytes (got %d)", len(blsID)) } var hash bls.Hash @@ -263,12 +267,15 @@ var _ crypto.PubKey = PubKey{} // PubKey PubKeyBLS12381 implements crypto.PubKey for the bls12381 signature scheme. type PubKey []byte +// TypeTag satisfies the jsontypes.Tagged interface. +func (PubKey) TypeTag() string { return PubKeyName } + // Address is the SHA256-20 of the raw pubkey bytes. func (pubKey PubKey) Address() crypto.Address { if len(pubKey) != PubKeySize { panic("pubkey is incorrect size") } - return tmhash.SumTruncated(pubKey) + return crypto.AddressHash(pubKey) } // Bytes returns the PubKey byte format. diff --git a/crypto/crypto.go b/crypto/crypto.go index 9c5073a1f2..7572d2d074 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -2,18 +2,23 @@ package crypto import ( "bytes" + "crypto/sha256" + "encoding/json" "errors" "fmt" "github.com/dashevo/dashd-go/btcjson" - "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/internal/jsontypes" tmbytes "github.com/tendermint/tendermint/libs/bytes" ) const ( + // HashSize is the size in bytes of an AddressHash. + HashSize = sha256.Size + // AddressSize is the size of a pubkey address. - AddressSize = tmhash.TruncatedSize + AddressSize = 20 DefaultHashSize = 32 LargeAppHashSize = DefaultHashSize SmallAppHashSize = 20 @@ -45,8 +50,23 @@ type ProTxHash = tmbytes.HexBytes type QuorumHash = tmbytes.HexBytes +// AddressHash computes a truncated SHA-256 hash of bz for use as +// a peer address. +// +// See: https://docs.tendermint.com/master/spec/core/data_structures.html#address +func AddressHash(bz []byte) Address { + h := sha256.Sum256(bz) + return Address(h[:AddressSize]) +} + +// Checksum returns the SHA256 of the bz. +func Checksum(bz []byte) []byte { + h := sha256.Sum256(bz) + return h[:] +} + func ProTxHashFromSeedBytes(bz []byte) ProTxHash { - return tmhash.Sum(bz) + return Checksum(bz) } func RandProTxHash() ProTxHash { @@ -98,9 +118,50 @@ func (sptxh SortProTxHash) Swap(i, j int) { } type QuorumKeys struct { - PrivKey PrivKey `json:"priv_key"` - PubKey PubKey `json:"pub_key"` - ThresholdPublicKey PubKey `json:"threshold_public_key"` + PrivKey PrivKey + PubKey PubKey + ThresholdPublicKey PubKey +} + +type quorumKeysJSON struct { + PrivKey json.RawMessage `json:"priv_key"` + PubKey json.RawMessage `json:"pub_key"` + ThresholdPublicKey json.RawMessage `json:"threshold_public_key"` +} + +func (pvKey QuorumKeys) MarshalJSON() ([]byte, error) { + var keys quorumKeysJSON + var err error + keys.PrivKey, err = jsontypes.Marshal(pvKey.PrivKey) + if err != nil { + return nil, err + } + keys.PubKey, err = jsontypes.Marshal(pvKey.PubKey) + if err != nil { + return nil, err + } + keys.ThresholdPublicKey, err = jsontypes.Marshal(pvKey.ThresholdPublicKey) + if err != nil { + return nil, err + } + return json.Marshal(keys) +} + +func (pvKey *QuorumKeys) UnmarshalJSON(data []byte) error { + var keys quorumKeysJSON + err := json.Unmarshal(data, &keys) + if err != nil { + return err + } + err = jsontypes.Unmarshal(keys.PrivKey, &pvKey.PrivKey) + if err != nil { + return err + } + err = jsontypes.Unmarshal(keys.PubKey, &pvKey.PubKey) + if err != nil { + return err + } + return jsontypes.Unmarshal(keys.ThresholdPublicKey, &pvKey.ThresholdPublicKey) } // Validator is a validator interface @@ -109,7 +170,6 @@ type Validator interface { } type PubKey interface { - HexStringer Address() Address Bytes() []byte VerifySignature(msg []byte, sig []byte) bool @@ -118,8 +178,11 @@ type PubKey interface { VerifyAggregateSignature(msgs [][]byte, sig []byte) bool Equals(PubKey) bool Type() string - TypeValue() KeyType - String() string + + // Implementations must support tagged encoding in JSON. + jsontypes.Tagged + fmt.Stringer + HexStringer } type PrivKey interface { @@ -129,7 +192,9 @@ type PrivKey interface { PubKey() PubKey Equals(PrivKey) bool Type() string - TypeValue() KeyType + + // Implementations must support tagged encoding in JSON. + jsontypes.Tagged } type Symmetric interface { diff --git a/crypto/crypto_test.go b/crypto/crypto_test.go new file mode 100644 index 0000000000..af89915f13 --- /dev/null +++ b/crypto/crypto_test.go @@ -0,0 +1,17 @@ +package crypto + +import ( + "encoding/hex" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestChecksum(t *testing.T) { + // since sha256 hash algorithm is critical for tenderdash, this test is needed to inform us + // if for any reason the hash algorithm is changed + actual := Checksum([]byte("dash is the best cryptocurrency in the world")) + want, err := hex.DecodeString("FFE75CFE38997723E7C33D0457521B0BA75AB48B39BC467413BDC853ACC7476F") + require.NoError(t, err) + require.Equal(t, want, actual) +} diff --git a/crypto/ed25519/bench_test.go b/crypto/ed25519/bench_test.go index e57cd393f5..49fcd15041 100644 --- a/crypto/ed25519/bench_test.go +++ b/crypto/ed25519/bench_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/internal/benchmarking" ) diff --git a/crypto/ed25519/ed25519.go b/crypto/ed25519/ed25519.go index f445808dd3..1b26a18d61 100644 --- a/crypto/ed25519/ed25519.go +++ b/crypto/ed25519/ed25519.go @@ -2,6 +2,8 @@ package ed25519 import ( "bytes" + "crypto/rand" + "crypto/sha256" "crypto/subtle" "encoding/hex" "errors" @@ -12,8 +14,7 @@ import ( "github.com/oasisprotocol/curve25519-voi/primitives/ed25519/extra/cache" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/tmhash" - tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/internal/jsontypes" ) //------------------------------------- @@ -57,13 +58,16 @@ const ( ) func init() { - tmjson.RegisterType(PubKey{}, PubKeyName) - tmjson.RegisterType(PrivKey{}, PrivKeyName) + jsontypes.MustRegister(PubKey{}) + jsontypes.MustRegister(PrivKey{}) } // PrivKey implements crypto.PrivKey. type PrivKey []byte +// TypeTag satisfies the jsontypes.Tagged interface. +func (PrivKey) TypeTag() string { return PrivKeyName } + // Bytes returns the privkey byte format. func (privKey PrivKey) Bytes() []byte { return []byte(privKey) @@ -138,7 +142,7 @@ func (privKey PrivKey) TypeValue() crypto.KeyType { // It uses OS randomness in conjunction with the current global random seed // in tendermint/libs/common to generate the private key. func GenPrivKey() PrivKey { - return genPrivKey(crypto.CReader()) + return genPrivKey(rand.Reader) } // genPrivKey generates a new ed25519 private key using the provided reader. @@ -156,9 +160,8 @@ func genPrivKey(rand io.Reader) PrivKey { // NOTE: secret should be the output of a KDF like bcrypt, // if it's derived from user input. func GenPrivKeyFromSecret(secret []byte) PrivKey { - seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes. - - return PrivKey(ed25519.NewKeyFromSeed(seed)) + seed := sha256.Sum256(secret) + return PrivKey(ed25519.NewKeyFromSeed(seed[:])) } //------------------------------------- @@ -168,12 +171,15 @@ var _ crypto.PubKey = PubKey{} // PubKeyEd25519 implements crypto.PubKey for the Ed25519 signature scheme. type PubKey []byte +// TypeTag satisfies the jsontypes.Tagged interface. +func (PubKey) TypeTag() string { return PubKeyName } + // Address is the SHA256-20 of the raw pubkey bytes. func (pubKey PubKey) Address() crypto.Address { if len(pubKey) != PubKeySize { panic("pubkey is incorrect size") } - return crypto.Address(tmhash.SumTruncated(pubKey)) + return crypto.AddressHash(pubKey) } // Bytes returns the PubKey byte format. @@ -268,5 +274,5 @@ func (b *BatchVerifier) Add(key crypto.PubKey, msg, signature []byte) error { } func (b *BatchVerifier) Verify() (bool, []bool) { - return b.BatchVerifier.Verify(crypto.CReader()) + return b.BatchVerifier.Verify(rand.Reader) } diff --git a/crypto/ed25519/ed25519_test.go b/crypto/ed25519/ed25519_test.go index e40acd27dc..db8ff81849 100644 --- a/crypto/ed25519/ed25519_test.go +++ b/crypto/ed25519/ed25519_test.go @@ -17,7 +17,7 @@ func TestSignAndValidateEd25519(t *testing.T) { msg := crypto.CRandBytes(128) sig, err := privKey.SignDigest(msg) - require.Nil(t, err) + require.NoError(t, err) // Test the signature assert.True(t, pubKey.VerifySignature(msg, sig)) diff --git a/crypto/encoding/codec.go b/crypto/encoding/codec.go index 3319d0e5a0..8ca540ecd2 100644 --- a/crypto/encoding/codec.go +++ b/crypto/encoding/codec.go @@ -8,15 +8,15 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" - "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/internal/jsontypes" cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" ) func init() { - json.RegisterType((*cryptoproto.PublicKey)(nil), "tendermint.crypto.PublicKey") - json.RegisterType((*cryptoproto.PublicKey_Bls12381)(nil), "tendermint.crypto.PublicKey_Bls12381") - json.RegisterType((*cryptoproto.PublicKey_Ed25519)(nil), "tendermint.crypto.PublicKey_Ed25519") - json.RegisterType((*cryptoproto.PublicKey_Secp256K1)(nil), "tendermint.crypto.PublicKey_Secp256K1") + jsontypes.MustRegister((*cryptoproto.PublicKey)(nil)) + jsontypes.MustRegister((*cryptoproto.PublicKey_Bls12381)(nil)) + jsontypes.MustRegister((*cryptoproto.PublicKey_Ed25519)(nil)) + jsontypes.MustRegister((*cryptoproto.PublicKey_Secp256K1)(nil)) } // PubKeyToProto takes crypto.PubKey and transforms it to a protobuf Pubkey diff --git a/crypto/example_test.go b/crypto/example_test.go deleted file mode 100644 index f1d0013d48..0000000000 --- a/crypto/example_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2017 Tendermint. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package crypto_test - -import ( - "fmt" - - "github.com/tendermint/tendermint/crypto" -) - -func ExampleSha256() { - sum := crypto.Sha256([]byte("This is Tendermint")) - fmt.Printf("%x\n", sum) - // Output: - // f91afb642f3d1c87c17eb01aae5cb65c242dfdbe7cf1066cc260f4ce5d33b94e -} diff --git a/crypto/hash.go b/crypto/hash.go deleted file mode 100644 index e1d22523f2..0000000000 --- a/crypto/hash.go +++ /dev/null @@ -1,11 +0,0 @@ -package crypto - -import ( - "crypto/sha256" -) - -func Sha256(bytes []byte) []byte { - hasher := sha256.New() - hasher.Write(bytes) - return hasher.Sum(nil) -} diff --git a/crypto/merkle/hash.go b/crypto/merkle/hash.go index 9c6df1786e..0bb5448d71 100644 --- a/crypto/merkle/hash.go +++ b/crypto/merkle/hash.go @@ -3,7 +3,7 @@ package merkle import ( "hash" - "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/crypto" ) // TODO: make these have a large predefined capacity @@ -14,12 +14,12 @@ var ( // returns tmhash() func emptyHash() []byte { - return tmhash.Sum([]byte{}) + return crypto.Checksum([]byte{}) } // returns tmhash(0x00 || leaf) func leafHash(leaf []byte) []byte { - return tmhash.Sum(append(leafPrefix, leaf...)) + return crypto.Checksum(append(leafPrefix, leaf...)) } // returns tmhash(0x00 || leaf) @@ -36,7 +36,7 @@ func innerHash(left []byte, right []byte) []byte { n := copy(data, innerPrefix) n += copy(data[n:], left) copy(data[n:], right) - return tmhash.Sum(data) + return crypto.Checksum(data)[:] } func innerHashOpt(s hash.Hash, left []byte, right []byte) []byte { diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go index 80b289d231..8b98d1b21b 100644 --- a/crypto/merkle/proof.go +++ b/crypto/merkle/proof.go @@ -5,7 +5,7 @@ import ( "errors" "fmt" - "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/crypto" tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" ) @@ -24,10 +24,10 @@ const ( // everything. This also affects the generalized proof system as // well. type Proof struct { - Total int64 `json:"total"` // Total number of items. - Index int64 `json:"index"` // Index of item to prove. - LeafHash []byte `json:"leaf_hash"` // Hash of item value. - Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. + Total int64 `json:"total,string"` // Total number of items. + Index int64 `json:"index,string"` // Index of item to prove. + LeafHash []byte `json:"leaf_hash"` // Hash of item value. + Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. } // ProofsFromByteSlices computes inclusion proof for given items. @@ -102,15 +102,15 @@ func (sp *Proof) ValidateBasic() error { if sp.Index < 0 { return errors.New("negative Index") } - if len(sp.LeafHash) != tmhash.Size { - return fmt.Errorf("expected LeafHash size to be %d, got %d", tmhash.Size, len(sp.LeafHash)) + if len(sp.LeafHash) != crypto.HashSize { + return fmt.Errorf("expected LeafHash size to be %d, got %d", crypto.HashSize, len(sp.LeafHash)) } if len(sp.Aunts) > MaxAunts { return fmt.Errorf("expected no more than %d aunts, got %d", MaxAunts, len(sp.Aunts)) } for i, auntHash := range sp.Aunts { - if len(auntHash) != tmhash.Size { - return fmt.Errorf("expected Aunts#%d size to be %d, got %d", i, tmhash.Size, len(auntHash)) + if len(auntHash) != crypto.HashSize { + return fmt.Errorf("expected Aunts#%d size to be %d, got %d", i, crypto.HashSize, len(auntHash)) } } return nil diff --git a/crypto/merkle/proof_key_path_test.go b/crypto/merkle/proof_key_path_test.go index 0cc947643f..13d26b3601 100644 --- a/crypto/merkle/proof_key_path_test.go +++ b/crypto/merkle/proof_key_path_test.go @@ -28,13 +28,13 @@ func TestKeyPath(t *testing.T) { case KeyEncodingHex: rand.Read(keys[i]) default: - panic("Unexpected encoding") + require.Fail(t, "Unexpected encoding") } path = path.AppendKey(keys[i], enc) } res, err := KeyPathToKeys(path.String()) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, len(keys), len(res)) for i, key := range keys { diff --git a/crypto/merkle/proof_test.go b/crypto/merkle/proof_test.go index f0d2f86896..05a5ca369a 100644 --- a/crypto/merkle/proof_test.go +++ b/crypto/merkle/proof_test.go @@ -79,58 +79,58 @@ func TestProofOperators(t *testing.T) { // Good popz := ProofOperators([]ProofOperator{op1, op2, op3, op4}) err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.Nil(t, err) + assert.NoError(t, err) err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1")) - assert.Nil(t, err) + assert.NoError(t, err) // BAD INPUT err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1_WRONG")}) - assert.NotNil(t, err) + assert.Error(t, err) err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1_WRONG")) - assert.NotNil(t, err) + assert.Error(t, err) // BAD KEY 1 err = popz.Verify(bz("OUTPUT4"), "/KEY3/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD KEY 2 err = popz.Verify(bz("OUTPUT4"), "KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD KEY 3 err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1/", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD KEY 4 err = popz.Verify(bz("OUTPUT4"), "//KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD KEY 5 err = popz.Verify(bz("OUTPUT4"), "/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD OUTPUT 1 err = popz.Verify(bz("OUTPUT4_WRONG"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD OUTPUT 2 err = popz.Verify(bz(""), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD POPZ 1 popz = []ProofOperator{op1, op2, op4} err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD POPZ 2 popz = []ProofOperator{op4, op3, op2, op1} err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) // BAD POPZ 3 popz = []ProofOperator{} err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) - assert.NotNil(t, err) + assert.Error(t, err) } func bz(s string) []byte { diff --git a/crypto/merkle/proof_value.go b/crypto/merkle/proof_value.go index ab776216b0..0f4f2eb3dd 100644 --- a/crypto/merkle/proof_value.go +++ b/crypto/merkle/proof_value.go @@ -2,9 +2,9 @@ package merkle import ( "bytes" + "crypto/sha256" "fmt" - "github.com/tendermint/tendermint/crypto/tmhash" tmcrypto "github.com/tendermint/tendermint/proto/tendermint/crypto" ) @@ -79,14 +79,13 @@ func (op ValueOp) Run(args [][]byte) ([][]byte, error) { return nil, fmt.Errorf("expected 1 arg, got %v", len(args)) } value := args[0] - hasher := tmhash.New() - hasher.Write(value) - vhash := hasher.Sum(nil) + + vhash := sha256.Sum256(value) bz := new(bytes.Buffer) // Wrap to hash the KVPair. - encodeByteSlice(bz, op.key) // nolint: errcheck // does not error - encodeByteSlice(bz, vhash) // nolint: errcheck // does not error + encodeByteSlice(bz, op.key) //nolint: errcheck // does not error + encodeByteSlice(bz, vhash[:]) //nolint: errcheck // does not error kvhash := leafHash(bz.Bytes()) if !bytes.Equal(kvhash, op.Proof.LeafHash) { diff --git a/crypto/merkle/rfc6962_test.go b/crypto/merkle/rfc6962_test.go index 571e5c75f5..f22a48a32e 100644 --- a/crypto/merkle/rfc6962_test.go +++ b/crypto/merkle/rfc6962_test.go @@ -20,7 +20,7 @@ import ( "encoding/hex" "testing" - "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/crypto" ) func TestRFC6962Hasher(t *testing.T) { @@ -39,7 +39,7 @@ func TestRFC6962Hasher(t *testing.T) { // echo -n '' | sha256sum { desc: "RFC6962 Empty Tree", - want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"[:tmhash.Size*2], + want: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"[:crypto.HashSize*2], got: emptyTreeHash, }, @@ -47,19 +47,19 @@ func TestRFC6962Hasher(t *testing.T) { // echo -n 00 | xxd -r -p | sha256sum { desc: "RFC6962 Empty Leaf", - want: "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"[:tmhash.Size*2], + want: "6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d"[:crypto.HashSize*2], got: emptyLeafHash, }, // echo -n 004C313233343536 | xxd -r -p | sha256sum { desc: "RFC6962 Leaf", - want: "395aa064aa4c29f7010acfe3f25db9485bbd4b91897b6ad7ad547639252b4d56"[:tmhash.Size*2], + want: "395aa064aa4c29f7010acfe3f25db9485bbd4b91897b6ad7ad547639252b4d56"[:crypto.HashSize*2], got: leafHash, }, // echo -n 014E3132334E343536 | xxd -r -p | sha256sum { desc: "RFC6962 Node", - want: "aa217fe888e47007fa15edab33c2b492a722cb106c64667fc2b044444de66bbb"[:tmhash.Size*2], + want: "aa217fe888e47007fa15edab33c2b492a722cb106c64667fc2b044444de66bbb"[:crypto.HashSize*2], got: innerHash([]byte("N123"), []byte("N456")), }, } { diff --git a/crypto/merkle/tree_test.go b/crypto/merkle/tree_test.go index 641c46b76c..72b260178f 100644 --- a/crypto/merkle/tree_test.go +++ b/crypto/merkle/tree_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/crypto" ctest "github.com/tendermint/tendermint/internal/libs/test" tmrand "github.com/tendermint/tendermint/libs/rand" ) @@ -53,7 +53,7 @@ func TestProof(t *testing.T) { items := make([][]byte, total) for i := 0; i < total; i++ { - items[i] = testItem(tmrand.Bytes(tmhash.Size)) + items[i] = testItem(tmrand.Bytes(crypto.HashSize)) } rootHash = HashFromByteSlices(items) @@ -106,7 +106,7 @@ func TestHashAlternatives(t *testing.T) { items := make([][]byte, total) for i := 0; i < total; i++ { - items[i] = testItem(tmrand.Bytes(tmhash.Size)) + items[i] = testItem(tmrand.Bytes(crypto.HashSize)) } rootHash1 := HashFromByteSlicesIterative(items) @@ -119,7 +119,7 @@ func BenchmarkHashAlternatives(b *testing.B) { items := make([][]byte, total) for i := 0; i < total; i++ { - items[i] = testItem(tmrand.Bytes(tmhash.Size)) + items[i] = testItem(tmrand.Bytes(crypto.HashSize)) } b.ResetTimer() diff --git a/crypto/random.go b/crypto/random.go index 275fb1044f..352ea0a3ec 100644 --- a/crypto/random.go +++ b/crypto/random.go @@ -1,26 +1,20 @@ package crypto import ( - crand "crypto/rand" + "crypto/rand" "encoding/hex" - "io" ) // This only uses the OS's randomness -func randBytes(numBytes int) []byte { +func CRandBytes(numBytes int) []byte { b := make([]byte, numBytes) - _, err := crand.Read(b) + _, err := rand.Read(b) if err != nil { panic(err) } return b } -// This only uses the OS's randomness -func CRandBytes(numBytes int) []byte { - return randBytes(numBytes) -} - // CRandHex returns a hex encoded string that's floor(numDigits/2) * 2 long. // // Note: CRandHex(24) gives 96 bits of randomness that @@ -28,8 +22,3 @@ func CRandBytes(numBytes int) []byte { func CRandHex(numDigits int) string { return hex.EncodeToString(CRandBytes(numDigits / 2)) } - -// Returns a crand.Reader. -func CReader() io.Reader { - return crand.Reader -} diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index 52aee0d5d5..c520360b3c 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -2,6 +2,7 @@ package secp256k1 import ( "bytes" + "crypto/rand" "crypto/sha256" "crypto/subtle" "encoding/hex" @@ -13,10 +14,10 @@ import ( secp256k1 "github.com/btcsuite/btcd/btcec" "github.com/tendermint/tendermint/crypto" - tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/internal/jsontypes" // necessary for Bitcoin address format - "golang.org/x/crypto/ripemd160" // nolint + "golang.org/x/crypto/ripemd160" //nolint:staticcheck ) //------------------------------------- @@ -29,8 +30,8 @@ const ( ) func init() { - tmjson.RegisterType(PubKey{}, PubKeyName) - tmjson.RegisterType(PrivKey{}, PrivKeyName) + jsontypes.MustRegister(PubKey{}) + jsontypes.MustRegister(PrivKey{}) } var _ crypto.PrivKey = PrivKey{} @@ -38,6 +39,9 @@ var _ crypto.PrivKey = PrivKey{} // PrivKey implements PrivKey. type PrivKey []byte +// TypeTag satisfies the jsontypes.Tagged interface. +func (PrivKey) TypeTag() string { return PrivKeyName } + // Bytes marshalls the private key using amino encoding. func (privKey PrivKey) Bytes() []byte { return []byte(privKey) @@ -73,7 +77,7 @@ func (privKey PrivKey) TypeValue() crypto.KeyType { // GenPrivKey generates a new ECDSA private key on curve secp256k1 private key. // It uses OS randomness to generate the private key. func GenPrivKey() PrivKey { - return genPrivKey(crypto.CReader()) + return genPrivKey(rand.Reader) } // genPrivKey generates a new secp256k1 private key using the provided reader. @@ -145,6 +149,9 @@ const PubKeySize = 33 // This prefix is followed with the x-coordinate. type PubKey []byte +// TypeTag satisfies the jsontypes.Tagged interface. +func (PubKey) TypeTag() string { return PubKeyName } + // Address returns a Bitcoin style addresses: RIPEMD160(SHA256(pubkey)) func (pubKey PubKey) Address() crypto.Address { if len(pubKey) != PubKeySize { @@ -199,8 +206,8 @@ var secp256k1halfN = new(big.Int).Rsh(secp256k1.S256().N, 1) // The returned signature will be of the form R || S (in lower-S form). func (privKey PrivKey) Sign(msg []byte) ([]byte, error) { priv, _ := secp256k1.PrivKeyFromBytes(secp256k1.S256(), privKey) - - sig, err := priv.Sign(crypto.Sha256(msg)) + seed := sha256.Sum256(msg) + sig, err := priv.Sign(seed[:]) if err != nil { return nil, err } @@ -229,28 +236,8 @@ func (pubKey PubKey) VerifySignature(msg []byte, sigStr []byte) bool { return false } - return signature.Verify(crypto.Sha256(msg), pub) -} - -// Read Signature struct from R || S. Caller needs to ensure -// that len(sigStr) == 64. -func signatureFromBytes(sigStr []byte) *secp256k1.Signature { - return &secp256k1.Signature{ - R: new(big.Int).SetBytes(sigStr[:32]), - S: new(big.Int).SetBytes(sigStr[32:64]), - } -} - -// Serialize signature to R || S. -// R, S are padded to 32 bytes respectively. -func serializeSig(sig *secp256k1.Signature) []byte { - rBytes := sig.R.Bytes() - sBytes := sig.S.Bytes() - sigBytes := make([]byte, 64) - // 0 pad the byte arrays from the left if they aren't big enough. - copy(sigBytes[32-len(rBytes):32], rBytes) - copy(sigBytes[64-len(sBytes):64], sBytes) - return sigBytes + seed := sha256.Sum256(msg) + return signature.Verify(seed[:], pub) } // SignDigest creates an ECDSA signature on curve Secp256k1. @@ -278,3 +265,24 @@ func (pubKey PubKey) VerifyAggregateSignature(messages [][]byte, sig []byte) boo func (pubKey PubKey) VerifySignatureDigest(hash []byte, sig []byte) bool { return false } + +// Read Signature struct from R || S. Caller needs to ensure +// that len(sigStr) == 64. +func signatureFromBytes(sigStr []byte) *secp256k1.Signature { + return &secp256k1.Signature{ + R: new(big.Int).SetBytes(sigStr[:32]), + S: new(big.Int).SetBytes(sigStr[32:64]), + } +} + +// Serialize signature to R || S. +// R, S are padded to 32 bytes respectively. +func serializeSig(sig *secp256k1.Signature) []byte { + rBytes := sig.R.Bytes() + sBytes := sig.S.Bytes() + sigBytes := make([]byte, 64) + // 0 pad the byte arrays from the left if they aren't big enough. + copy(sigBytes[32-len(rBytes):32], rBytes) + copy(sigBytes[64-len(sBytes):64], sBytes) + return sigBytes +} diff --git a/crypto/secp256k1/secp256k1_test.go b/crypto/secp256k1/secp256k1_test.go index 7a11092939..6cd53704c5 100644 --- a/crypto/secp256k1/secp256k1_test.go +++ b/crypto/secp256k1/secp256k1_test.go @@ -52,7 +52,7 @@ func TestSignAndValidateSecp256k1(t *testing.T) { msg := crypto.CRandBytes(128) sig, err := privKey.Sign(msg) - require.Nil(t, err) + require.NoError(t, err) assert.True(t, pubKey.VerifySignature(msg, sig)) diff --git a/crypto/tmhash/hash.go b/crypto/tmhash/hash.go deleted file mode 100644 index f9b9582420..0000000000 --- a/crypto/tmhash/hash.go +++ /dev/null @@ -1,65 +0,0 @@ -package tmhash - -import ( - "crypto/sha256" - "hash" -) - -const ( - Size = sha256.Size - BlockSize = sha256.BlockSize -) - -// New returns a new hash.Hash. -func New() hash.Hash { - return sha256.New() -} - -// Sum returns the SHA256 of the bz. -func Sum(bz []byte) []byte { - h := sha256.Sum256(bz) - return h[:] -} - -//------------------------------------------------------------- - -const ( - TruncatedSize = 20 -) - -type sha256trunc struct { - sha256 hash.Hash -} - -func (h sha256trunc) Write(p []byte) (n int, err error) { - return h.sha256.Write(p) -} -func (h sha256trunc) Sum(b []byte) []byte { - shasum := h.sha256.Sum(b) - return shasum[:TruncatedSize] -} - -func (h sha256trunc) Reset() { - h.sha256.Reset() -} - -func (h sha256trunc) Size() int { - return TruncatedSize -} - -func (h sha256trunc) BlockSize() int { - return h.sha256.BlockSize() -} - -// NewTruncated returns a new hash.Hash. -func NewTruncated() hash.Hash { - return sha256trunc{ - sha256: sha256.New(), - } -} - -// SumTruncated returns the first 20 bytes of SHA256 of the bz. -func SumTruncated(bz []byte) []byte { - hash := sha256.Sum256(bz) - return hash[:TruncatedSize] -} diff --git a/crypto/tmhash/hash_test.go b/crypto/tmhash/hash_test.go deleted file mode 100644 index cf9991b3b2..0000000000 --- a/crypto/tmhash/hash_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package tmhash_test - -import ( - "crypto/sha256" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/crypto/tmhash" -) - -func TestHash(t *testing.T) { - testVector := []byte("abc") - hasher := tmhash.New() - _, err := hasher.Write(testVector) - require.NoError(t, err) - bz := hasher.Sum(nil) - - bz2 := tmhash.Sum(testVector) - - hasher = sha256.New() - _, err = hasher.Write(testVector) - require.NoError(t, err) - bz3 := hasher.Sum(nil) - - assert.Equal(t, bz, bz2) - assert.Equal(t, bz, bz3) -} - -func TestHashTruncated(t *testing.T) { - testVector := []byte("abc") - hasher := tmhash.NewTruncated() - _, err := hasher.Write(testVector) - require.NoError(t, err) - bz := hasher.Sum(nil) - - bz2 := tmhash.SumTruncated(testVector) - - hasher = sha256.New() - _, err = hasher.Write(testVector) - require.NoError(t, err) - bz3 := hasher.Sum(nil) - bz3 = bz3[:tmhash.TruncatedSize] - - assert.Equal(t, bz, bz2) - assert.Equal(t, bz, bz3) -} diff --git a/crypto/version.go b/crypto/version.go deleted file mode 100644 index 77c0bed8a2..0000000000 --- a/crypto/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package crypto - -const Version = "0.9.0-dev" diff --git a/crypto/xchacha20poly1305/vector_test.go b/crypto/xchacha20poly1305/vector_test.go deleted file mode 100644 index c6ca9d8d23..0000000000 --- a/crypto/xchacha20poly1305/vector_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package xchacha20poly1305 - -import ( - "bytes" - "encoding/hex" - "testing" -) - -func toHex(bits []byte) string { - return hex.EncodeToString(bits) -} - -func fromHex(bits string) []byte { - b, err := hex.DecodeString(bits) - if err != nil { - panic(err) - } - return b -} - -func TestHChaCha20(t *testing.T) { - for i, v := range hChaCha20Vectors { - var key [32]byte - var nonce [16]byte - copy(key[:], v.key) - copy(nonce[:], v.nonce) - - HChaCha20(&key, &nonce, &key) - if !bytes.Equal(key[:], v.keystream) { - t.Errorf("test %d: keystream mismatch:\n \t got: %s\n \t want: %s", i, toHex(key[:]), toHex(v.keystream)) - } - } -} - -var hChaCha20Vectors = []struct { - key, nonce, keystream []byte -}{ - { - fromHex("0000000000000000000000000000000000000000000000000000000000000000"), - fromHex("000000000000000000000000000000000000000000000000"), - fromHex("1140704c328d1d5d0e30086cdf209dbd6a43b8f41518a11cc387b669b2ee6586"), - }, - { - fromHex("8000000000000000000000000000000000000000000000000000000000000000"), - fromHex("000000000000000000000000000000000000000000000000"), - fromHex("7d266a7fd808cae4c02a0a70dcbfbcc250dae65ce3eae7fc210f54cc8f77df86"), - }, - { - fromHex("0000000000000000000000000000000000000000000000000000000000000001"), - fromHex("000000000000000000000000000000000000000000000002"), - fromHex("e0c77ff931bb9163a5460c02ac281c2b53d792b1c43fea817e9ad275ae546963"), - }, - { - fromHex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"), - fromHex("000102030405060708090a0b0c0d0e0f1011121314151617"), - fromHex("51e3ff45a895675c4b33b46c64f4a9ace110d34df6a2ceab486372bacbd3eff6"), - }, - { - fromHex("24f11cce8a1b3d61e441561a696c1c1b7e173d084fd4812425435a8896a013dc"), - fromHex("d9660c5900ae19ddad28d6e06e45fe5e"), - fromHex("5966b3eec3bff1189f831f06afe4d4e3be97fa9235ec8c20d08acfbbb4e851e3"), - }, -} - -func TestVectors(t *testing.T) { - for i, v := range vectors { - if len(v.plaintext) == 0 { - v.plaintext = make([]byte, len(v.ciphertext)) - } - - var nonce [24]byte - copy(nonce[:], v.nonce) - - aead, err := New(v.key) - if err != nil { - t.Error(err) - } - - dst := aead.Seal(nil, nonce[:], v.plaintext, v.ad) - if !bytes.Equal(dst, v.ciphertext) { - t.Errorf("test %d: ciphertext mismatch:\n \t got: %s\n \t want: %s", i, toHex(dst), toHex(v.ciphertext)) - } - open, err := aead.Open(nil, nonce[:], dst, v.ad) - if err != nil { - t.Error(err) - } - if !bytes.Equal(open, v.plaintext) { - t.Errorf("test %d: plaintext mismatch:\n \t got: %s\n \t want: %s", i, string(open), string(v.plaintext)) - } - } -} - -var vectors = []struct { - key, nonce, ad, plaintext, ciphertext []byte -}{ - { - []byte{ - 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, - 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, - 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, - }, - []byte{0x07, 0x00, 0x00, 0x00, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b}, - []byte{0x50, 0x51, 0x52, 0x53, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7}, - []byte( - "Ladies and Gentlemen of the class of '99: If I could offer you only one tip for the future, sunscreen would be it.", - ), - []byte{ - 0x45, 0x3c, 0x06, 0x93, 0xa7, 0x40, 0x7f, 0x04, 0xff, 0x4c, 0x56, - 0xae, 0xdb, 0x17, 0xa3, 0xc0, 0xa1, 0xaf, 0xff, 0x01, 0x17, 0x49, - 0x30, 0xfc, 0x22, 0x28, 0x7c, 0x33, 0xdb, 0xcf, 0x0a, 0xc8, 0xb8, - 0x9a, 0xd9, 0x29, 0x53, 0x0a, 0x1b, 0xb3, 0xab, 0x5e, 0x69, 0xf2, - 0x4c, 0x7f, 0x60, 0x70, 0xc8, 0xf8, 0x40, 0xc9, 0xab, 0xb4, 0xf6, - 0x9f, 0xbf, 0xc8, 0xa7, 0xff, 0x51, 0x26, 0xfa, 0xee, 0xbb, 0xb5, - 0x58, 0x05, 0xee, 0x9c, 0x1c, 0xf2, 0xce, 0x5a, 0x57, 0x26, 0x32, - 0x87, 0xae, 0xc5, 0x78, 0x0f, 0x04, 0xec, 0x32, 0x4c, 0x35, 0x14, - 0x12, 0x2c, 0xfc, 0x32, 0x31, 0xfc, 0x1a, 0x8b, 0x71, 0x8a, 0x62, - 0x86, 0x37, 0x30, 0xa2, 0x70, 0x2b, 0xb7, 0x63, 0x66, 0x11, 0x6b, - 0xed, 0x09, 0xe0, 0xfd, 0x5c, 0x6d, 0x84, 0xb6, 0xb0, 0xc1, 0xab, - 0xaf, 0x24, 0x9d, 0x5d, 0xd0, 0xf7, 0xf5, 0xa7, 0xea, - }, - }, -} diff --git a/crypto/xchacha20poly1305/xchachapoly.go b/crypto/xchacha20poly1305/xchachapoly.go deleted file mode 100644 index 2578520a5a..0000000000 --- a/crypto/xchacha20poly1305/xchachapoly.go +++ /dev/null @@ -1,259 +0,0 @@ -// Package xchacha20poly1305 creates an AEAD using hchacha, chacha, and poly1305 -// This allows for randomized nonces to be used in conjunction with chacha. -package xchacha20poly1305 - -import ( - "crypto/cipher" - "encoding/binary" - "errors" - "fmt" - - "golang.org/x/crypto/chacha20poly1305" -) - -// Implements crypto.AEAD -type xchacha20poly1305 struct { - key [KeySize]byte -} - -const ( - // KeySize is the size of the key used by this AEAD, in bytes. - KeySize = 32 - // NonceSize is the size of the nonce used with this AEAD, in bytes. - NonceSize = 24 - // TagSize is the size added from poly1305 - TagSize = 16 - // MaxPlaintextSize is the max size that can be passed into a single call of Seal - MaxPlaintextSize = (1 << 38) - 64 - // MaxCiphertextSize is the max size that can be passed into a single call of Open, - // this differs from plaintext size due to the tag - MaxCiphertextSize = (1 << 38) - 48 - - // sigma are constants used in xchacha. - // Unrolled from a slice so that they can be inlined, as slices can't be constants. - sigma0 = uint32(0x61707865) - sigma1 = uint32(0x3320646e) - sigma2 = uint32(0x79622d32) - sigma3 = uint32(0x6b206574) -) - -// New returns a new xchachapoly1305 AEAD -func New(key []byte) (cipher.AEAD, error) { - if len(key) != KeySize { - return nil, errors.New("xchacha20poly1305: bad key length") - } - ret := new(xchacha20poly1305) - copy(ret.key[:], key) - return ret, nil -} - -func (c *xchacha20poly1305) NonceSize() int { - return NonceSize -} - -func (c *xchacha20poly1305) Overhead() int { - return TagSize -} - -func (c *xchacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte { - if len(nonce) != NonceSize { - panic("xchacha20poly1305: bad nonce length passed to Seal") - } - - if uint64(len(plaintext)) > MaxPlaintextSize { - panic("xchacha20poly1305: plaintext too large") - } - - var subKey [KeySize]byte - var hNonce [16]byte - var subNonce [chacha20poly1305.NonceSize]byte - copy(hNonce[:], nonce[:16]) - - HChaCha20(&subKey, &hNonce, &c.key) - - // This can't error because we always provide a correctly sized key - chacha20poly1305, _ := chacha20poly1305.New(subKey[:]) - - copy(subNonce[4:], nonce[16:]) - - return chacha20poly1305.Seal(dst, subNonce[:], plaintext, additionalData) -} - -func (c *xchacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) { - if len(nonce) != NonceSize { - return nil, fmt.Errorf("xchacha20poly1305: bad nonce length passed to Open") - } - if uint64(len(ciphertext)) > MaxCiphertextSize { - return nil, fmt.Errorf("xchacha20poly1305: ciphertext too large") - } - var subKey [KeySize]byte - var hNonce [16]byte - var subNonce [chacha20poly1305.NonceSize]byte - copy(hNonce[:], nonce[:16]) - - HChaCha20(&subKey, &hNonce, &c.key) - - // This can't error because we always provide a correctly sized key - chacha20poly1305, _ := chacha20poly1305.New(subKey[:]) - - copy(subNonce[4:], nonce[16:]) - - return chacha20poly1305.Open(dst, subNonce[:], ciphertext, additionalData) -} - -// HChaCha exported from -// https://github.com/aead/chacha20/blob/8b13a72661dae6e9e5dea04f344f0dc95ea29547/chacha/chacha_generic.go#L194 -// TODO: Add support for the different assembly instructions used there. - -// The MIT License (MIT) - -// Copyright (c) 2016 Andreas Auernhammer - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: - -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. - -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -// HChaCha20 generates 32 pseudo-random bytes from a 128 bit nonce and a 256 bit secret key. -// It can be used as a key-derivation-function (KDF). -func HChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) { hChaCha20Generic(out, nonce, key) } - -func hChaCha20Generic(out *[32]byte, nonce *[16]byte, key *[32]byte) { - v00 := sigma0 - v01 := sigma1 - v02 := sigma2 - v03 := sigma3 - v04 := binary.LittleEndian.Uint32(key[0:]) - v05 := binary.LittleEndian.Uint32(key[4:]) - v06 := binary.LittleEndian.Uint32(key[8:]) - v07 := binary.LittleEndian.Uint32(key[12:]) - v08 := binary.LittleEndian.Uint32(key[16:]) - v09 := binary.LittleEndian.Uint32(key[20:]) - v10 := binary.LittleEndian.Uint32(key[24:]) - v11 := binary.LittleEndian.Uint32(key[28:]) - v12 := binary.LittleEndian.Uint32(nonce[0:]) - v13 := binary.LittleEndian.Uint32(nonce[4:]) - v14 := binary.LittleEndian.Uint32(nonce[8:]) - v15 := binary.LittleEndian.Uint32(nonce[12:]) - - for i := 0; i < 20; i += 2 { - v00 += v04 - v12 ^= v00 - v12 = (v12 << 16) | (v12 >> 16) - v08 += v12 - v04 ^= v08 - v04 = (v04 << 12) | (v04 >> 20) - v00 += v04 - v12 ^= v00 - v12 = (v12 << 8) | (v12 >> 24) - v08 += v12 - v04 ^= v08 - v04 = (v04 << 7) | (v04 >> 25) - v01 += v05 - v13 ^= v01 - v13 = (v13 << 16) | (v13 >> 16) - v09 += v13 - v05 ^= v09 - v05 = (v05 << 12) | (v05 >> 20) - v01 += v05 - v13 ^= v01 - v13 = (v13 << 8) | (v13 >> 24) - v09 += v13 - v05 ^= v09 - v05 = (v05 << 7) | (v05 >> 25) - v02 += v06 - v14 ^= v02 - v14 = (v14 << 16) | (v14 >> 16) - v10 += v14 - v06 ^= v10 - v06 = (v06 << 12) | (v06 >> 20) - v02 += v06 - v14 ^= v02 - v14 = (v14 << 8) | (v14 >> 24) - v10 += v14 - v06 ^= v10 - v06 = (v06 << 7) | (v06 >> 25) - v03 += v07 - v15 ^= v03 - v15 = (v15 << 16) | (v15 >> 16) - v11 += v15 - v07 ^= v11 - v07 = (v07 << 12) | (v07 >> 20) - v03 += v07 - v15 ^= v03 - v15 = (v15 << 8) | (v15 >> 24) - v11 += v15 - v07 ^= v11 - v07 = (v07 << 7) | (v07 >> 25) - v00 += v05 - v15 ^= v00 - v15 = (v15 << 16) | (v15 >> 16) - v10 += v15 - v05 ^= v10 - v05 = (v05 << 12) | (v05 >> 20) - v00 += v05 - v15 ^= v00 - v15 = (v15 << 8) | (v15 >> 24) - v10 += v15 - v05 ^= v10 - v05 = (v05 << 7) | (v05 >> 25) - v01 += v06 - v12 ^= v01 - v12 = (v12 << 16) | (v12 >> 16) - v11 += v12 - v06 ^= v11 - v06 = (v06 << 12) | (v06 >> 20) - v01 += v06 - v12 ^= v01 - v12 = (v12 << 8) | (v12 >> 24) - v11 += v12 - v06 ^= v11 - v06 = (v06 << 7) | (v06 >> 25) - v02 += v07 - v13 ^= v02 - v13 = (v13 << 16) | (v13 >> 16) - v08 += v13 - v07 ^= v08 - v07 = (v07 << 12) | (v07 >> 20) - v02 += v07 - v13 ^= v02 - v13 = (v13 << 8) | (v13 >> 24) - v08 += v13 - v07 ^= v08 - v07 = (v07 << 7) | (v07 >> 25) - v03 += v04 - v14 ^= v03 - v14 = (v14 << 16) | (v14 >> 16) - v09 += v14 - v04 ^= v09 - v04 = (v04 << 12) | (v04 >> 20) - v03 += v04 - v14 ^= v03 - v14 = (v14 << 8) | (v14 >> 24) - v09 += v14 - v04 ^= v09 - v04 = (v04 << 7) | (v04 >> 25) - } - - binary.LittleEndian.PutUint32(out[0:], v00) - binary.LittleEndian.PutUint32(out[4:], v01) - binary.LittleEndian.PutUint32(out[8:], v02) - binary.LittleEndian.PutUint32(out[12:], v03) - binary.LittleEndian.PutUint32(out[16:], v12) - binary.LittleEndian.PutUint32(out[20:], v13) - binary.LittleEndian.PutUint32(out[24:], v14) - binary.LittleEndian.PutUint32(out[28:], v15) -} diff --git a/crypto/xchacha20poly1305/xchachapoly_test.go b/crypto/xchacha20poly1305/xchachapoly_test.go deleted file mode 100644 index 6e42e50ace..0000000000 --- a/crypto/xchacha20poly1305/xchachapoly_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package xchacha20poly1305 - -import ( - "bytes" - crand "crypto/rand" - mrand "math/rand" - "testing" -) - -// The following test is taken from -// https://github.com/golang/crypto/blob/master/chacha20poly1305/chacha20poly1305_test.go#L69 -// It requires the below copyright notice, where "this source code" refers to the following function. -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found at the bottom of this file. -func TestRandom(t *testing.T) { - // Some random tests to verify Open(Seal) == Plaintext - for i := 0; i < 256; i++ { - var nonce [24]byte - var key [32]byte - - al := mrand.Intn(128) - pl := mrand.Intn(16384) - ad := make([]byte, al) - plaintext := make([]byte, pl) - _, err := crand.Read(key[:]) - if err != nil { - t.Errorf("error on read: %s", err) - } - _, err = crand.Read(nonce[:]) - if err != nil { - t.Errorf("error on read: %s", err) - } - _, err = crand.Read(ad) - if err != nil { - t.Errorf("error on read: %s", err) - } - _, err = crand.Read(plaintext) - if err != nil { - t.Errorf("error on read: %s", err) - } - - aead, err := New(key[:]) - if err != nil { - t.Fatal(err) - } - - ct := aead.Seal(nil, nonce[:], plaintext, ad) - - plaintext2, err := aead.Open(nil, nonce[:], ct, ad) - if err != nil { - t.Errorf("random #%d: Open failed", i) - continue - } - - if !bytes.Equal(plaintext, plaintext2) { - t.Errorf("random #%d: plaintext's don't match: got %x vs %x", i, plaintext2, plaintext) - continue - } - - if len(ad) > 0 { - alterAdIdx := mrand.Intn(len(ad)) - ad[alterAdIdx] ^= 0x80 - if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { - t.Errorf("random #%d: Open was successful after altering additional data", i) - } - ad[alterAdIdx] ^= 0x80 - } - - alterNonceIdx := mrand.Intn(aead.NonceSize()) - nonce[alterNonceIdx] ^= 0x80 - if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { - t.Errorf("random #%d: Open was successful after altering nonce", i) - } - nonce[alterNonceIdx] ^= 0x80 - - alterCtIdx := mrand.Intn(len(ct)) - ct[alterCtIdx] ^= 0x80 - if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { - t.Errorf("random #%d: Open was successful after altering ciphertext", i) - } - ct[alterCtIdx] ^= 0x80 - } -} - -// AFOREMENTIONED LICENSE -// Copyright (c) 2009 The Go Authors. All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/crypto/xsalsa20symmetric/symmetric.go b/crypto/xsalsa20symmetric/symmetric.go deleted file mode 100644 index 74cb4b1033..0000000000 --- a/crypto/xsalsa20symmetric/symmetric.go +++ /dev/null @@ -1,54 +0,0 @@ -package xsalsa20symmetric - -import ( - "errors" - "fmt" - - "golang.org/x/crypto/nacl/secretbox" - - "github.com/tendermint/tendermint/crypto" -) - -// TODO, make this into a struct that implements crypto.Symmetric. - -const nonceLen = 24 -const secretLen = 32 - -// secret must be 32 bytes long. Use something like Sha256(Bcrypt(passphrase)) -// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. -func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) { - if len(secret) != secretLen { - panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) - } - nonce := crypto.CRandBytes(nonceLen) - nonceArr := [nonceLen]byte{} - copy(nonceArr[:], nonce) - secretArr := [secretLen]byte{} - copy(secretArr[:], secret) - ciphertext = make([]byte, nonceLen+secretbox.Overhead+len(plaintext)) - copy(ciphertext, nonce) - secretbox.Seal(ciphertext[nonceLen:nonceLen], plaintext, &nonceArr, &secretArr) - return ciphertext -} - -// secret must be 32 bytes long. Use something like Sha256(Bcrypt(passphrase)) -// The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. -func DecryptSymmetric(ciphertext []byte, secret []byte) (plaintext []byte, err error) { - if len(secret) != secretLen { - panic(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) - } - if len(ciphertext) <= secretbox.Overhead+nonceLen { - return nil, errors.New("ciphertext is too short") - } - nonce := ciphertext[:nonceLen] - nonceArr := [nonceLen]byte{} - copy(nonceArr[:], nonce) - secretArr := [secretLen]byte{} - copy(secretArr[:], secret) - plaintext = make([]byte, len(ciphertext)-nonceLen-secretbox.Overhead) - _, ok := secretbox.Open(plaintext[:0], ciphertext[nonceLen:], &nonceArr, &secretArr) - if !ok { - return nil, errors.New("ciphertext decryption failed") - } - return plaintext, nil -} diff --git a/crypto/xsalsa20symmetric/symmetric_test.go b/crypto/xsalsa20symmetric/symmetric_test.go deleted file mode 100644 index 160d49a9ef..0000000000 --- a/crypto/xsalsa20symmetric/symmetric_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package xsalsa20symmetric - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "golang.org/x/crypto/bcrypt" - - "github.com/tendermint/tendermint/crypto" -) - -func TestSimple(t *testing.T) { - - plaintext := []byte("sometext") - secret := []byte("somesecretoflengththirtytwo===32") - ciphertext := EncryptSymmetric(plaintext, secret) - plaintext2, err := DecryptSymmetric(ciphertext, secret) - - require.Nil(t, err, "%+v", err) - assert.Equal(t, plaintext, plaintext2) -} - -func TestSimpleWithKDF(t *testing.T) { - - plaintext := []byte("sometext") - secretPass := []byte("somesecret") - secret, err := bcrypt.GenerateFromPassword(secretPass, 12) - if err != nil { - t.Error(err) - } - secret = crypto.Sha256(secret) - - ciphertext := EncryptSymmetric(plaintext, secret) - plaintext2, err := DecryptSymmetric(ciphertext, secret) - - require.Nil(t, err, "%+v", err) - assert.Equal(t, plaintext, plaintext2) -} diff --git a/dashcore/rpc/client.go b/dash/core/client.go similarity index 92% rename from dashcore/rpc/client.go rename to dash/core/client.go index 3f3546fd9e..ae514f50da 100644 --- a/dashcore/rpc/client.go +++ b/dash/core/client.go @@ -1,4 +1,4 @@ -package dashcore +package core import ( "fmt" @@ -13,7 +13,22 @@ import ( const ModuleName = "rpcclient" +// QuorumVerifier represents subset of priv validator features that +// allows verification of threshold signatures. +type QuorumVerifier interface { + // QuorumVerify verifies quorum signature + QuorumVerify( + quorumType btcjson.LLMQType, + requestID bytes.HexBytes, + messageHash bytes.HexBytes, + signature bytes.HexBytes, + quorumHash bytes.HexBytes, + ) (bool, error) +} + type Client interface { + QuorumVerifier + // QuorumInfo returns quorum info QuorumInfo(quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash) (*btcjson.QuorumInfoResult, error) // MasternodeStatus returns masternode status @@ -29,7 +44,6 @@ type Client interface { messageHash bytes.HexBytes, quorumHash bytes.HexBytes, ) (*btcjson.QuorumSignResult, error) - // QuorumVerify verifies quorum signature QuorumVerify( quorumType btcjson.LLMQType, requestID bytes.HexBytes, diff --git a/dashcore/rpc/mock.go b/dash/core/mock.go similarity index 92% rename from dashcore/rpc/mock.go rename to dash/core/mock.go index 213d24a4ea..49063527b0 100644 --- a/dashcore/rpc/mock.go +++ b/dash/core/mock.go @@ -1,4 +1,4 @@ -package dashcore +package core import ( "context" @@ -47,12 +47,13 @@ func (mc *MockClient) QuorumInfo( quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, ) (*btcjson.QuorumInfoResult, error) { + ctx := context.Background() var members []btcjson.QuorumMember - proTxHash, err := mc.localPV.GetProTxHash(context.Background()) + proTxHash, err := mc.localPV.GetProTxHash(ctx) if err != nil { panic(err) } - pk, err := mc.localPV.GetPubKey(context.Background(), quorumHash) + pk, err := mc.localPV.GetPubKey(ctx, quorumHash) if err != nil { panic(err) } @@ -64,11 +65,11 @@ func (mc *MockClient) QuorumInfo( PubKeyShare: pk.HexString(), }) } - tpk, err := mc.localPV.GetThresholdPublicKey(context.Background(), quorumHash) + tpk, err := mc.localPV.GetThresholdPublicKey(ctx, quorumHash) if err != nil { panic(err) } - height, err := mc.localPV.GetHeight(context.Background(), quorumHash) + height, err := mc.localPV.GetHeight(ctx, quorumHash) if err != nil { panic(err) } @@ -82,7 +83,8 @@ func (mc *MockClient) QuorumInfo( } func (mc *MockClient) MasternodeStatus() (*btcjson.MasternodeStatusResult, error) { - proTxHash, err := mc.localPV.GetProTxHash(context.Background()) + ctx := context.Background() + proTxHash, err := mc.localPV.GetProTxHash(ctx) if err != nil { panic(err) } diff --git a/dash/llmq/llmq.go b/dash/llmq/llmq.go index 170403c37f..65cc23896a 100644 --- a/dash/llmq/llmq.go +++ b/dash/llmq/llmq.go @@ -1,6 +1,7 @@ package llmq import ( + cryptorand "crypto/rand" "errors" "fmt" "io" @@ -91,7 +92,7 @@ func Generate(proTxHashes []crypto.ProTxHash, opts ...optionFunc) (*Data, error) conf := llmqConfig{ proTxHashes: bls12381.ReverseProTxHashes(proTxHashes), threshold: len(proTxHashes)*2/3 + 1, - seedReader: crypto.CReader(), + seedReader: cryptorand.Reader, } for _, opt := range opts { opt(&conf) diff --git a/dash/quorum/mock/dash_dialer.go b/dash/quorum/mock/dash_dialer.go index aff3cabce4..59e4b30753 100644 --- a/dash/quorum/mock/dash_dialer.go +++ b/dash/quorum/mock/dash_dialer.go @@ -3,8 +3,8 @@ package mock import ( "encoding/binary" "encoding/hex" + "sync" - "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/types" ) diff --git a/dash/quorum/selectpeers/dip6.go b/dash/quorum/selectpeers/dip6.go index d4621e10d8..2999bc7328 100644 --- a/dash/quorum/selectpeers/dip6.go +++ b/dash/quorum/selectpeers/dip6.go @@ -25,7 +25,7 @@ func NewDIP6ValidatorSelector(quorumHash bytes.HexBytes) ValidatorSelector { return &dip6PeerSelector{quorumHash: quorumHash} } -// SelectValidator implements ValidtorSelector. +// SelectValidators implements ValidtorSelector. // SelectValidators selects some validators from `validatorSetMembers`, according to the algorithm // described in DIP-6 https://github.com/dashpay/dips/blob/master/dip-0006.md func (s *dip6PeerSelector) SelectValidators( diff --git a/dash/quorum/selectpeers/sortable_validator.go b/dash/quorum/selectpeers/sortable_validator.go index 78e86e18fb..35ff19f8f8 100644 --- a/dash/quorum/selectpeers/sortable_validator.go +++ b/dash/quorum/selectpeers/sortable_validator.go @@ -2,7 +2,6 @@ package selectpeers import ( "bytes" - "crypto/sha256" "github.com/tendermint/tendermint/crypto" tmbytes "github.com/tendermint/tendermint/libs/bytes" @@ -45,6 +44,5 @@ func calculateDIP6SortKey(proTxHash, quorumHash tmbytes.HexBytes) []byte { keyBytes := make([]byte, 0, len(proTxHash)+len(quorumHash)) keyBytes = append(keyBytes, proTxHash...) keyBytes = append(keyBytes, quorumHash...) - keySHA := sha256.Sum256(keyBytes) - return keySHA[:] + return crypto.Checksum(keyBytes) } diff --git a/dash/quorum/selectpeers/sorted_validator_list.go b/dash/quorum/selectpeers/sorted_validator_list.go index 592f902d49..5bdfb6a8b8 100644 --- a/dash/quorum/selectpeers/sorted_validator_list.go +++ b/dash/quorum/selectpeers/sorted_validator_list.go @@ -23,7 +23,7 @@ func newSortedValidatorList(validators []*types.Validator, quorumHash tmbytes.He return ret } -// Sort() sorts this sortableValidatorList +// Sort sorts this sortableValidatorList func (vl sortedValidatorList) Sort() { sort.Sort(vl) } diff --git a/dash/quorum/validator_conn_executor.go b/dash/quorum/validator_conn_executor.go index 7521d27690..20de216f46 100644 --- a/dash/quorum/validator_conn_executor.go +++ b/dash/quorum/validator_conn_executor.go @@ -4,14 +4,16 @@ import ( "context" "errors" "fmt" + "sync" "time" "github.com/hashicorp/go-multierror" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/dash/quorum/selectpeers" - "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/p2p" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" @@ -41,11 +43,12 @@ type optionFunc func(vc *ValidatorConnExecutor) error // Note that we mark peers that are members of active validator set as Persistent, so p2p subsystem // will retry the connection if it fails. type ValidatorConnExecutor struct { - service.BaseService + *service.BaseService + logger log.Logger proTxHash types.ProTxHash - eventBus *types.EventBus + eventBus *eventbus.EventBus dialer p2p.DashDialer - subscription types.Subscription + subscription eventbus.Subscription // validatorSetMembers contains validators active in the current Validator Set, indexed by node ID validatorSetMembers validatorMap @@ -72,11 +75,12 @@ var ( // Don't forget to Start() and Stop() the service. func NewValidatorConnExecutor( proTxHash types.ProTxHash, - eventBus *types.EventBus, + eventBus *eventbus.EventBus, connMgr p2p.DashDialer, opts ...optionFunc, ) (*ValidatorConnExecutor, error) { vc := &ValidatorConnExecutor{ + logger: log.NewNopLogger(), proTxHash: proTxHash, eventBus: eventBus, dialer: connMgr, @@ -89,8 +93,7 @@ func NewValidatorConnExecutor( resolverAddressBook: vc.dialer, resolverTCP: NewTCPNodeIDResolver(), } - baseService := service.NewBaseService(log.NewNopLogger(), validatorConnExecutorName, vc) - vc.BaseService = *baseService + vc.BaseService = service.NewBaseService(log.NewNopLogger(), validatorConnExecutorName, vc) for _, opt := range opts { err := opt(vc) @@ -119,27 +122,27 @@ func WithValidatorsSet(valSet *types.ValidatorSet) func(vc *ValidatorConnExecuto // WithLogger sets a logger func WithLogger(logger log.Logger) func(vc *ValidatorConnExecutor) error { return func(vc *ValidatorConnExecutor) error { - vc.Logger = logger + vc.logger = logger return nil } } // OnStart implements Service to subscribe to Validator Update events -func (vc *ValidatorConnExecutor) OnStart() error { +func (vc *ValidatorConnExecutor) OnStart(ctx context.Context) error { if err := vc.subscribe(); err != nil { return err } err := vc.updateConnections() if err != nil { - vc.Logger.Error("Warning: ValidatorConnExecutor OnStart failed", "error", err) + vc.logger.Error("Warning: ValidatorConnExecutor OnStart failed", "error", err) } go func() { var err error for err == nil { - err = vc.receiveEvents() + err = vc.receiveEvents(ctx) } - vc.Logger.Error("ValidatorConnExecutor goroutine finished", "reason", err) + vc.logger.Error("ValidatorConnExecutor goroutine finished", "reason", err) }() return nil } @@ -151,7 +154,7 @@ func (vc *ValidatorConnExecutor) OnStop() { defer cancel() err := vc.eventBus.UnsubscribeAll(ctx, validatorConnExecutorName) if err != nil { - vc.Logger.Error("cannot unsubscribe from channels", "error", err) + vc.logger.Error("cannot unsubscribe from channels", "error", err) } vc.eventBus = nil } @@ -161,11 +164,13 @@ func (vc *ValidatorConnExecutor) OnStop() { func (vc *ValidatorConnExecutor) subscribe() error { ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout) defer cancel() - updatesSub, err := vc.eventBus.Subscribe( + updatesSub, err := vc.eventBus.SubscribeWithArgs( ctx, - validatorConnExecutorName, - types.EventQueryValidatorSetUpdates, - vc.EventBusCapacity, + tmpubsub.SubscribeArgs{ + ClientID: validatorConnExecutorName, + Query: types.EventQueryValidatorSetUpdates, + Limit: vc.EventBusCapacity, + }, ) if err != nil { return err @@ -177,45 +182,46 @@ func (vc *ValidatorConnExecutor) subscribe() error { // receiveEvents processes received events and executes all the logic. // Returns non-nil error only if fatal error occurred and the main goroutine should be terminated. -func (vc *ValidatorConnExecutor) receiveEvents() error { - vc.Logger.Debug("ValidatorConnExecutor: waiting for an event") - select { - case msg := <-vc.subscription.Out(): - event, ok := msg.Data().(types.EventDataValidatorSetUpdate) - if !ok { - return fmt.Errorf("invalid type of validator set update message: %T", event) - } - if err := vc.handleValidatorUpdateEvent(event); err != nil { - vc.Logger.Error("cannot handle validator update", "error", err) - return nil // non-fatal, so no error returned to continue the loop +func (vc *ValidatorConnExecutor) receiveEvents(ctx context.Context) error { + vc.logger.Debug("ValidatorConnExecutor: waiting for an event") + sCtx, cancel := context.WithCancel(ctx) // TODO check value for correctness + defer cancel() + msg, err := vc.subscription.Next(sCtx) + if err != nil { + if errors.Is(err, context.Canceled) { + return fmt.Errorf("subscription canceled due to error: %w", sCtx.Err()) } - vc.Logger.Debug("validator updates processed successfully", "event", event) - case <-vc.subscription.Canceled(): - return fmt.Errorf("subscription canceled due to error: %w", vc.subscription.Err()) - case <-vc.BaseService.Quit(): - return fmt.Errorf("quit signal received") + return err } - + event, ok := msg.Data().(types.EventDataValidatorSetUpdate) + if !ok { + return fmt.Errorf("invalid type of validator set update message: %T", event) + } + if err := vc.handleValidatorUpdateEvent(event); err != nil { + vc.logger.Error("cannot handle validator update", "error", err) + return nil // non-fatal, so no error returned to continue the loop + } + vc.logger.Debug("validator updates processed successfully", "event", event) return nil } -// handleValidatorUpdateEvent checks and executes event of type EventDataValidatorSetUpdates, received from event bus. +// handleValidatorUpdateEvent checks and executes event of type EventDataValidatorSetUpdate, received from event bus. func (vc *ValidatorConnExecutor) handleValidatorUpdateEvent(event types.EventDataValidatorSetUpdate) error { vc.mux.Lock() defer vc.mux.Unlock() if len(event.ValidatorSetUpdates) < 1 { - vc.Logger.Debug("no validators in ValidatorUpdates") + vc.logger.Debug("no validators in ValidatorUpdates") return nil // not really an error } vc.validatorSetMembers = newValidatorMap(event.ValidatorSetUpdates) if len(event.QuorumHash) > 0 { if err := vc.setQuorumHash(event.QuorumHash); err != nil { - vc.Logger.Error("received invalid quorum hash", "error", err) + vc.logger.Error("received invalid quorum hash", "error", err) return fmt.Errorf("received invalid quorum hash: %w", err) } } else { - vc.Logger.Debug("received empty quorum hash") + vc.logger.Debug("received empty quorum hash") } if err := vc.updateConnections(); err != nil { return fmt.Errorf("inter-validator set connections error: %w", err) @@ -257,7 +263,7 @@ func (vc *ValidatorConnExecutor) resolveNodeID(va *types.ValidatorAddress) error va.NodeID = address.NodeID return nil // success } - vc.Logger.Debug( + vc.logger.Debug( "warning: validator node id lookup method failed", "url", va.String(), "method", method, @@ -298,7 +304,7 @@ func (vc *ValidatorConnExecutor) ensureValidatorsHaveNodeIDs(validators []*types for _, validator := range validators { err := vc.resolveNodeID(&validator.NodeAddress) if err != nil { - vc.Logger.Error("cannot determine node id for validator, skipping", "url", validator.String(), "error", err) + vc.logger.Error("cannot determine node id for validator, skipping", "url", validator.String(), "error", err) continue } results = append(results, validator) @@ -311,7 +317,7 @@ func (vc *ValidatorConnExecutor) disconnectValidator(validator types.Validator) return err } id := validator.NodeAddress.NodeID - vc.Logger.Debug("disconnecting Validator", "validator", validator, "id", id, "address", validator.NodeAddress.String()) + vc.logger.Debug("disconnecting Validator", "validator", validator, "id", id, "address", validator.NodeAddress.String()) if err := vc.dialer.DisconnectAsync(id); err != nil { return err } @@ -327,10 +333,10 @@ func (vc *ValidatorConnExecutor) disconnectValidators(exceptions validatorMap) e if err := vc.disconnectValidator(validator); err != nil { if !errors.Is(err, errPeerNotFound) { // no return, as we see it as non-fatal - vc.Logger.Error("cannot disconnect Validator", "error", err) + vc.logger.Error("cannot disconnect Validator", "error", err) continue } - vc.Logger.Debug("Validator already disconnected", "error", err) + vc.logger.Debug("Validator already disconnected", "error", err) // We still delete the validator from vc.connectedValidators } delete(vc.connectedValidators, currentKey) @@ -350,7 +356,7 @@ func (vc *ValidatorConnExecutor) isValidator() bool { func (vc *ValidatorConnExecutor) updateConnections() error { // We only do something if we are part of new ValidatorSet if !vc.isValidator() { - vc.Logger.Debug("not a member of active ValidatorSet") + vc.logger.Debug("not a member of active ValidatorSet") // We need to disconnect connected validators. It needs to be done explicitly // because they are marked as persistent and will never disconnect themselves. return vc.disconnectValidators(validatorMap{}) @@ -359,22 +365,22 @@ func (vc *ValidatorConnExecutor) updateConnections() error { // Find new newValidators newValidators, err := vc.selectValidators() if err != nil { - vc.Logger.Error("cannot determine list of validators to connect", "error", err) + vc.logger.Error("cannot determine list of validators to connect", "error", err) // no return, as we still need to disconnect unused validators } // Disconnect existing validators unless they are selected to be connected again if err := vc.disconnectValidators(newValidators); err != nil { return fmt.Errorf("cannot disconnect unused validators: %w", err) } - vc.Logger.Debug("filtering validators", "validators", newValidators.String()) + vc.logger.Debug("filtering validators", "validators", newValidators.String()) // ensure that we can connect to all validators newValidators = vc.filterAddresses(newValidators) // Connect to new validators - vc.Logger.Debug("dialing validators", "validators", newValidators.String()) + vc.logger.Debug("dialing validators", "validators", newValidators.String()) if err := vc.dial(newValidators); err != nil { return fmt.Errorf("cannot dial validators: %w", err) } - vc.Logger.Debug("connected to Validators", "validators", newValidators.String()) + vc.logger.Debug("connected to Validators", "validators", newValidators.String()) return nil } @@ -383,20 +389,20 @@ func (vc *ValidatorConnExecutor) filterAddresses(validators validatorMap) valida filtered := make(validatorMap, len(validators)) for id, validator := range validators { if vc.proTxHash != nil && string(id) == vc.proTxHash.String() { - vc.Logger.Debug("validator is ourself", "id", id, "address", validator.NodeAddress.String()) + vc.logger.Debug("validator is ourself", "id", id, "address", validator.NodeAddress.String()) continue } if err := validator.ValidateBasic(); err != nil { - vc.Logger.Debug("validator address is invalid", "id", id, "address", validator.NodeAddress.String()) + vc.logger.Debug("validator address is invalid", "id", id, "address", validator.NodeAddress.String()) continue } if vc.connectedValidators.contains(validator) { - vc.Logger.Debug("validator already connected", "id", id) + vc.logger.Debug("validator already connected", "id", id) continue } if vc.dialer.IsDialingOrConnected(validator.NodeAddress.NodeID) { - vc.Logger.Debug("already dialing this validator", "id", id, "address", validator.NodeAddress.String()) + vc.logger.Debug("already dialing this validator", "id", id, "address", validator.NodeAddress.String()) continue } @@ -416,7 +422,7 @@ func (vc *ValidatorConnExecutor) dial(vals validatorMap) error { vc.connectedValidators[id] = validator address := nodeAddress(validator.NodeAddress) if err := vc.dialer.ConnectAsync(address); err != nil { - vc.Logger.Error("cannot dial validator", "address", address.String(), "err", err) + vc.logger.Error("cannot dial validator", "address", address.String(), "err", err) return fmt.Errorf("cannot dial validator %s: %w", address.String(), err) } } diff --git a/dash/quorum/validator_conn_executor_test.go b/dash/quorum/validator_conn_executor_test.go index 78fdbc84e9..376376fa93 100644 --- a/dash/quorum/validator_conn_executor_test.go +++ b/dash/quorum/validator_conn_executor_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/stretchr/testify/assert" + testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -15,9 +16,11 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/dash/quorum/mock" "github.com/tendermint/tendermint/dash/quorum/selectpeers" - mmock "github.com/tendermint/tendermint/internal/mempool/mock" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/mempool/mocks" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" tmbytes "github.com/tendermint/tendermint/libs/bytes" @@ -27,12 +30,8 @@ import ( ) const ( - mySeedID uint16 = math.MaxUint16 - 1 -) - -var ( + mySeedID uint16 = math.MaxUint16 - 1 chainID = "execution_chain" - testPartSize uint32 = 65536 nTxsPerBlock = 10 ) @@ -342,54 +341,64 @@ func TestValidatorConnExecutor_ValidatorUpdatesSequence(t *testing.T) { // TestEndBlock verifies if ValidatorConnExecutor is called correctly during processing of EndBlock // message from the ABCI app. -func TestEndBlock(t *testing.T) { +func TestFinalizeBlock(t *testing.T) { const timeout = 3 * time.Second // how long we'll wait for connection + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := newTestApp() + logger := log.NewTestingLogger(t) - clientCreator := abciclient.NewLocalCreator(app) - require.NotNil(t, clientCreator) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) + client := abciclient.NewLocalClient(logger, app) + require.NotNil(t, client) + proxyApp := proxy.New(client, logger, proxy.NopMetrics()) require.NotNil(t, proxyApp) - err := proxyApp.Start() + err := proxyApp.Start(ctx) require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(3, 1) nodeProTxHash := state.Validators.Validators[0].ProTxHash stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + mp := mocks.NewMempool(t) + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", testifymock.Anything).Return(nil) + mp.On("Update", + testifymock.Anything, + testifymock.Anything, + testifymock.Anything, + testifymock.Anything, + testifymock.Anything, + testifymock.Anything).Return(nil) blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), - proxyApp.Consensus(), - proxyApp.Query(), - mmock.Mempool{}, + logger, + proxyApp, + mp, sm.EmptyEvidencePool{}, blockStore, - nil, + eventBus, + sm.NopMetrics(), ) - eventBus := types.NewEventBus() - err = eventBus.Start() - require.NoError(t, err) - defer eventBus.Stop() //nolint:errcheck // ignore for tests - - blockExec.SetEventBus(eventBus) - - updatesSub, err := eventBus.Subscribe( - context.Background(), - "TestEndBlockValidatorUpdates", - types.EventQueryValidatorSetUpdates, + updatesSub, err := eventBus.SubscribeWithArgs( + ctx, + pubsub.SubscribeArgs{ + ClientID: "TestEndBlockValidatorUpdates", + Query: types.EventQueryValidatorSetUpdates, + }, ) require.NoError(t, err) block := makeBlock(state, 1, new(types.Commit)) - blockID := types.BlockID{ - Hash: block.Hash(), - PartSetHeader: block.MakePartSet(testPartSize).Header(), - } + blockID, err := block.BlockID() + require.NoError(t, err) vals := state.Validators proTxHashes := vals.GetProTxHashes() @@ -411,13 +420,12 @@ func TestEndBlock(t *testing.T) { proTxHash := newVals.Validators[0].ProTxHash vc, err := NewValidatorConnExecutor(proTxHash, eventBus, sw) require.NoError(t, err) - err = vc.Start() + err = vc.Start(ctx) require.NoError(t, err) - defer func() { err := vc.Stop(); require.NoError(t, err) }() app.ValidatorSetUpdates[1] = newVals.ABCIEquivalentValidatorUpdates() - state, err = blockExec.ApplyBlock(state, nodeProTxHash, blockID, block) + state, err = blockExec.ApplyBlock(ctx, state, nodeProTxHash, blockID, block) require.Nil(t, err) // test new validator was added to NextValidators require.Equal(t, state.Validators.Size()+100, state.NextValidators.Size()) @@ -426,31 +434,29 @@ func TestEndBlock(t *testing.T) { assert.Contains(t, nextValidatorsProTxHashes, addProTxHash) } + sCtx, sCancel := context.WithTimeout(ctx, 1*time.Second) + defer sCancel() // test we threw an event - select { - case msg := <-updatesSub.Out(): - event, ok := msg.Data().(types.EventDataValidatorSetUpdate) - require.True( + msg, err := updatesSub.Next(sCtx) + require.NoError(t, err) + + event, ok := msg.Data().(types.EventDataValidatorSetUpdate) + require.True( + t, + ok, + "Expected event of type EventDataValidatorSetUpdate, got %T", + msg.Data(), + ) + if assert.NotEmpty(t, event.ValidatorSetUpdates) { + for _, addProTxHash := range addProTxHashes { + assert.Contains(t, mock.ValidatorsProTxHashes(event.ValidatorSetUpdates), addProTxHash) + } + assert.EqualValues( t, - ok, - "Expected event of type EventDataValidatorSetUpdates, got %T", - msg.Data(), + types.DefaultDashVotingPower, + event.ValidatorSetUpdates[1].VotingPower, ) - if assert.NotEmpty(t, event.ValidatorSetUpdates) { - for _, addProTxHash := range addProTxHashes { - assert.Contains(t, mock.ValidatorsProTxHashes(event.ValidatorSetUpdates), addProTxHash) - } - assert.EqualValues( - t, - types.DefaultDashVotingPower, - event.ValidatorSetUpdates[1].VotingPower, - ) - assert.NotEmpty(t, event.QuorumHash) - } - case <-updatesSub.Canceled(): - t.Fatalf("updatesSub was canceled (reason: %v)", updatesSub.Err()) - case <-time.After(1 * time.Second): - t.Fatal("Did not receive EventValidatorSetUpdates within 1 sec.") + assert.NotEmpty(t, event.QuorumHash) } // ensure some history got generated inside the Switch; we expect 1 dial event @@ -475,7 +481,10 @@ func executeTestCase(t *testing.T, tc testCase) { // const TIMEOUT = 100 * time.Millisecond const TIMEOUT = 5 * time.Second - eventBus, sw, vc := setup(t, tc.me) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus, sw, vc := setup(ctx, t, tc.me) defer cleanup(t, eventBus, sw, vc) for updateID, update := range tc.validatorUpdates { @@ -560,28 +569,29 @@ func allowedParamsDefaults( // setup creates ValidatorConnExecutor and some dependencies. // Use `defer cleanup()` to free the resources. func setup( + ctx context.Context, t *testing.T, me *types.Validator, -) (eventBus *types.EventBus, sw *mock.DashDialer, vc *ValidatorConnExecutor) { - eventBus = types.NewEventBus() - err := eventBus.Start() +) (eventBus *eventbus.EventBus, sw *mock.DashDialer, vc *ValidatorConnExecutor) { + logger := log.NewTestingLogger(t) + eventBus = eventbus.NewDefault(logger) + err := eventBus.Start(ctx) require.NoError(t, err) sw = mock.NewDashDialer() - proTxHash := me.ProTxHash - vc, err = NewValidatorConnExecutor(proTxHash, eventBus, sw, WithLogger(log.TestingLogger())) + vc, err = NewValidatorConnExecutor(me.ProTxHash, eventBus, sw, WithLogger(logger)) require.NoError(t, err) - err = vc.Start() + err = vc.Start(ctx) require.NoError(t, err) return eventBus, sw, vc } // cleanup frees some resources allocated for tests -func cleanup(t *testing.T, bus *types.EventBus, dialer p2p.DashDialer, vc *ValidatorConnExecutor) { - assert.NoError(t, bus.Stop()) - assert.NoError(t, vc.Stop()) +func cleanup(t *testing.T, bus *eventbus.EventBus, dialer p2p.DashDialer, vc *ValidatorConnExecutor) { + bus.Stop() + vc.Stop() } // SOME UTILS // @@ -629,9 +639,10 @@ func makeState(nVals int, height int64) (sm.State, dbm.DB, map[string]types.Priv } func makeBlock(state sm.State, height int64, commit *types.Commit) *types.Block { - block, _ := state.MakeBlock(height, nil, makeTxs(state.LastBlockHeight), - commit, nil, state.Validators.GetProposer().ProTxHash, 0) - return block + return state.MakeBlock( + height, nil, makeTxs(state.LastBlockHeight), + commit, nil, state.Validators.GetProposer().ProTxHash, 0, + ) } // TEST APP // @@ -640,48 +651,47 @@ func makeBlock(state sm.State, height int64, commit *types.Commit) *types.Block type testApp struct { abci.BaseApplication - ByzantineValidators []abci.Evidence + ByzantineValidators []abci.Misbehavior ValidatorSetUpdates map[int64]*abci.ValidatorSetUpdate } func newTestApp() *testApp { return &testApp{ - ByzantineValidators: []abci.Evidence{}, + ByzantineValidators: []abci.Misbehavior{}, ValidatorSetUpdates: map[int64]*abci.ValidatorSetUpdate{}, } } var _ abci.Application = (*testApp)(nil) -func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) { - return abci.ResponseInfo{} +func (app *testApp) Info(context.Context, *abci.RequestInfo) (*abci.ResponseInfo, error) { + return &abci.ResponseInfo{}, nil } -func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock { +func (app *testApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { app.ByzantineValidators = req.ByzantineValidators - return abci.ResponseBeginBlock{} -} - -func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { - return abci.ResponseEndBlock{ + txs := make([]*abci.ExecTxResult, 0, len(req.Txs)) + for _, tx := range req.Txs { + txs = append(txs, &abci.ExecTxResult{Data: tx}) + } + return &abci.ResponseFinalizeBlock{ + Events: []abci.Event{}, + TxResults: txs, ValidatorSetUpdate: app.ValidatorSetUpdates[req.Height], ConsensusParamUpdates: &tmproto.ConsensusParams{ - Version: &tmproto.VersionParams{ - AppVersion: 1}}} -} - -func (app *testApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { - return abci.ResponseDeliverTx{Events: []abci.Event{}} + Version: &tmproto.VersionParams{AppVersion: 1}, + }, + }, nil } -func (app *testApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { - return abci.ResponseCheckTx{} +func (app *testApp) CheckTx(_ context.Context, req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { + return &abci.ResponseCheckTx{Code: abci.CodeTypeOK}, nil } -func (app *testApp) Commit() abci.ResponseCommit { - return abci.ResponseCommit{RetainHeight: 1} +func (app *testApp) Commit(_ context.Context) (*abci.ResponseCommit, error) { + return &abci.ResponseCommit{RetainHeight: 1}, nil } -func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) { - return +func (app *testApp) Query(_ context.Context, req *abci.RequestQuery) (*abci.ResponseQuery, error) { + return &abci.ResponseQuery{}, nil } diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index c1ab1580ab..da06785d57 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -11,9 +11,9 @@ and other supported release branches. There is a [GitHub Actions workflow](https://github.com/tendermint/docs/actions/workflows/deployment.yml) in the `tendermint/docs` repository that clones and builds the documentation -site from the contents of this `docs` directory, for `master` and for each -supported release branch. Under the hood, this workflow runs `make build-docs` -from the [Makefile](../Makefile#L214). +site from the contents of this `docs` directory, for `master` and for the +backport branch of each supported release. Under the hood, this workflow runs +`make build-docs` from the [Makefile](../Makefile#L214). The list of supported versions are defined in [`config.js`](./.vuepress/config.js), which defines the UI menu on the documentation site, and also in diff --git a/docs/README.md b/docs/README.md index a9b6925323..3137d611a7 100644 --- a/docs/README.md +++ b/docs/README.md @@ -21,7 +21,7 @@ Tendermint?](introduction/what-is-tendermint.md). To get started quickly with an example application, see the [quick start guide](introduction/quick-start.md). -To learn about application development on Tendermint, see the [Application Blockchain Interface](https://github.com/tendermint/spec/tree/master/spec/abci). +To learn about application development on Tendermint, see the [Application Blockchain Interface](../spec/abci). For more details on using Tendermint, see the respective documentation for [Tendermint Core](tendermint-core/), [benchmarking and monitoring](tools/), and [network deployments](nodes/). diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index 9768c32950..7649b7cde7 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -27,17 +27,17 @@ Usage: abci-cli [command] Available Commands: - batch Run a batch of abci commands against an application - check_tx Validate a tx - commit Commit the application state and return the Merkle root hash - console Start an interactive abci console for multiple commands - deliver_tx Deliver a new tx to the application - kvstore ABCI demo example - echo Have the application echo a message - help Help about any command - info Get some info about the application - query Query the application state - set_option Set an options on the application + batch Run a batch of abci commands against an application + check_tx Validate a tx + commit Commit the application state and return the Merkle root hash + console Start an interactive abci console for multiple commands + finalize_block Send a set of transactions to the application + kvstore ABCI demo example + echo Have the application echo a message + help Help about any command + info Get some info about the application + query Query the application state + set_option Set an options on the application Flags: --abci string socket or grpc (default "socket") @@ -53,7 +53,7 @@ Use "abci-cli [command] --help" for more information about a command. The `abci-cli` tool lets us send ABCI messages to our application, to help build and debug them. -The most important messages are `deliver_tx`, `check_tx`, and `commit`, +The most important messages are `finalize_block`, `check_tx`, and `commit`, but there are others for convenience, configuration, and information purposes. @@ -83,19 +83,19 @@ func cmdKVStore(cmd *cobra.Command, args []string) error { if err != nil { return err } + + // Stop upon receiving SIGTERM or CTRL-C. + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + defer cancel() + srv.SetLogger(logger.With("module", "abci-server")) - if err := srv.Start(); err != nil { + if err := srv.Start(ctx); err != nil { return err } - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() { - // Cleanup - srv.Stop() - }) - - // Run forever. - select {} + // Run until shutdown. +<-ctx.Done() +srv.Wait() } ``` @@ -173,7 +173,7 @@ Try running these commands: -> code: OK -> data.hex: 0x0000000000000000 -> deliver_tx "abc" +> finalize_block "abc" -> code: OK > info @@ -192,7 +192,7 @@ Try running these commands: -> value: abc -> value.hex: 616263 -> deliver_tx "def=xyz" +> finalize_block "def=xyz" -> code: OK > commit @@ -207,8 +207,8 @@ Try running these commands: -> value.hex: 78797A ``` -Note that if we do `deliver_tx "abc"` it will store `(abc, abc)`, but if -we do `deliver_tx "abc=efg"` it will store `(abc, efg)`. +Note that if we do `finalize_block "abc"` it will store `(abc, abc)`, but if +we do `finalize_block "abc=efg"` it will store `(abc, efg)`. Similarly, you could put the commands in a file and run `abci-cli --verbose batch < myfile`. diff --git a/docs/app-dev/app-architecture.md b/docs/app-dev/app-architecture.md index ec2822688c..f478547bca 100644 --- a/docs/app-dev/app-architecture.md +++ b/docs/app-dev/app-architecture.md @@ -57,4 +57,4 @@ See the following for more extensive documentation: - [Interchain Standard for the Light-Client REST API](https://github.com/cosmos/cosmos-sdk/pull/1028) - [Tendermint RPC Docs](https://docs.tendermint.com/master/rpc/) - [Tendermint in Production](../tendermint-core/running-in-production.md) -- [ABCI spec](https://github.com/tendermint/spec/tree/95cf253b6df623066ff7cd4074a94e7a3f147c7a/spec/abci) +- [ABCI spec](https://github.com/tendermint/tendermint/tree/95cf253b6df623066ff7cd4074a94e7a3f147c7a/spec/abci) diff --git a/docs/app-dev/getting-started.md b/docs/app-dev/getting-started.md index 2f5739e0f1..a480137cac 100644 --- a/docs/app-dev/getting-started.md +++ b/docs/app-dev/getting-started.md @@ -96,25 +96,21 @@ like: ```json { - "jsonrpc": "2.0", - "id": "", - "result": { - "check_tx": {}, - "deliver_tx": { - "tags": [ - { - "key": "YXBwLmNyZWF0b3I=", - "value": "amFl" - }, - { - "key": "YXBwLmtleQ==", - "value": "YWJjZA==" - } - ] - }, - "hash": "9DF66553F98DE3C26E3C3317A3E4CED54F714E39", - "height": 14 - } + "check_tx": { ... }, + "deliver_tx": { + "tags": [ + { + "key": "YXBwLmNyZWF0b3I=", + "value": "amFl" + }, + { + "key": "YXBwLmtleQ==", + "value": "YWJjZA==" + } + ] + }, + "hash": "9DF66553F98DE3C26E3C3317A3E4CED54F714E39", + "height": 14 } ``` @@ -129,15 +125,11 @@ The result should look like: ```json { - "jsonrpc": "2.0", - "id": "", - "result": { - "response": { - "log": "exists", - "index": "-1", - "key": "YWJjZA==", - "value": "YWJjZA==" - } + "response": { + "log": "exists", + "index": "-1", + "key": "YWJjZA==", + "value": "YWJjZA==" } } ``` @@ -190,7 +182,7 @@ node example/counter.js In another window, reset and start `tendermint`: ```sh -tendermint unsafe-reset-all +tendermint reset unsafe-all tendermint start ``` diff --git a/docs/app-dev/indexing-transactions.md b/docs/app-dev/indexing-transactions.md index b8b06d01b9..67d17c8794 100644 --- a/docs/app-dev/indexing-transactions.md +++ b/docs/app-dev/indexing-transactions.md @@ -15,7 +15,7 @@ the block itself is never stored. Each event contains a type and a list of attributes, which are key-value pairs denoting something about what happened during the method's execution. For more details on `Events`, see the -[ABCI](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md#events) +[ABCI](https://github.com/tendermint/tendermint/blob/master/spec/abci/abci.md#events) documentation. An `Event` has a composite key associated with it. A `compositeKey` is diff --git a/docs/app-dev/readme.md b/docs/app-dev/readme.md index 51e88fc34a..46ce06ca00 100644 --- a/docs/app-dev/readme.md +++ b/docs/app-dev/readme.md @@ -1,7 +1,6 @@ --- order: false parent: + title: "Building Applications" order: 3 ---- - -# Apps +--- \ No newline at end of file diff --git a/docs/architecture/adr-073-libp2p.md b/docs/architecture/adr-073-libp2p.md new file mode 100644 index 0000000000..080fecbcdf --- /dev/null +++ b/docs/architecture/adr-073-libp2p.md @@ -0,0 +1,235 @@ +# ADR 073: Adopt LibP2P + +## Changelog + +- 2021-11-02: Initial Draft (@tychoish) + +## Status + +Proposed. + +## Context + + +As part of the 0.35 development cycle, the Tendermint team completed +the first phase of the work described in ADRs 61 and 62, which included a +large scale refactoring of the reactors and the p2p message +routing. This replaced the switch and many of the other legacy +components without breaking protocol or network-level +interoperability and left the legacy connection/socket handling code. + +Following the release, the team has reexamined the state of the code +and the design, as well as Tendermint's requirements. The notes +from that process are available in the [P2P Roadmap +RFC][rfc]. + +This ADR supersedes the decisions made in ADRs 60 and 61, but +builds on the completed portions of this work. Previously, the +boundaries of peer management, message handling, and the higher level +business logic (e.g., "the reactors") were intermingled, and core +elements of the p2p system were responsible for the orchestration of +higher-level business logic. Refactoring the legacy components +made it more obvious that this entanglement of responsibilities +had outsized influence on the entire implementation, making +it difficult to iterate within the current abstractions. +It would not be viable to maintain interoperability with legacy +systems while also achieving many of our broader objectives. + +LibP2P is a thoroughly-specified implementation of a peer-to-peer +networking stack, designed specifically for systems such as +ours. Adopting LibP2P as the basis of Tendermint will allow the +Tendermint team to focus more of their time on other differentiating +aspects of the system, and make it possible for the ecosystem as a +whole to take advantage of tooling and efforts of the LibP2P +platform. + +## Alternative Approaches + +As discussed in the [P2P Roadmap RFC][rfc], the primary alternative would be to +continue development of Tendermint's home-grown peer-to-peer +layer. While that would give the Tendermint team maximal control +over the peer system, the current design is unexceptional on its +own merits, and the prospective maintenance burden for this system +exceeds our tolerances for the medium term. + +Tendermint can and should differentiate itself not on the basis of +its networking implementation or peer management tools, but providing +a consistent operator experience, a battle-tested consensus algorithm, +and an ergonomic user experience. + +## Decision + +Tendermint will adopt libp2p during the 0.37 development cycle, +replacing the bespoke Tendermint P2P stack. This will remove the +`Endpoint`, `Transport`, `Connection`, and `PeerManager` abstractions +and leave the reactors, `p2p.Router` and `p2p.Channel` +abstractions. + +LibP2P may obviate the need for a dedicated peer exchange (PEX) +reactor, which would also in turn obviate the need for a dedicated +seed mode. If this is the case, then all of this functionality would +be removed. + +If it turns out (based on the advice of Protocol Labs) that it makes +sense to maintain separate pubsub or gossipsub topics +per-message-type, then the `Router` abstraction could also +be entirely subsumed. + +## Detailed Design + +### Implementation Changes + +The seams in the P2P implementation between the higher level +constructs (reactors), the routing layer (`Router`) and the lower +level connection and peer management code make this operation +relatively straightforward to implement. A key +goal in this design is to minimize the impact on the reactors +(potentially entirely,) and completely remove the lower level +components (e.g., `Transport`, `Connection` and `PeerManager`) using the +separation afforded by the `Router` layer. The current state of the +code makes these changes relatively surgical, and limited to a small +number of methods: + +- `p2p.Router.OpenChannel` will still return a `Channel` structure + which will continue to serve as a pipe between the reactors and the + `Router`. The implementation will no longer need the queue + implementation, and will instead start goroutines that + are responsible for routing the messages from the channel to libp2p + fundamentals, replacing the current `p2p.Router.routeChannel`. + +- The current `p2p.Router.dialPeers` and `p2p.Router.acceptPeers`, + are responsible for establishing outbound and inbound connections, + respectively. These methods will be removed, along with + `p2p.Router.openConnection`, and the libp2p connection manager will + be responsible for maintaining network connectivity. + +- The `p2p.Channel` interface will change to replace Go + channels with a more functional interface for sending messages. + New methods on this object will take contexts to support safe + cancellation, and return errors, and will block rather than + running asynchronously. The `Out` channel through which + reactors send messages to Peers, will be replaced by a `Send` + method, and the Error channel will be replaced by an `Error` + method. + +- Reactors will be passed an interface that will allow them to + access Peer information from libp2p. This will supplant the + `p2p.PeerUpdates` subscription. + +- Add some kind of heartbeat message at the application level + (e.g. with a reactor,) potentially connected to libp2p's DHT to be + used by reactors for service discovery, message targeting, or other + features. + +- Replace the existing/legacy handshake protocol with [Noise](http://www.noiseprotocol.org/noise.html). + +This project will initially use the TCP-based transport protocols within +libp2p. QUIC is also available as an option that we may implement later. +We will not support mixed networks in the initial release, but will +revisit that possibility later if there is a demonstrated need. + +### Upgrade and Compatibility + +Because the routers and all current P2P libraries are `internal` +packages and not part of the public API, the only changes to the public +API surface area of Tendermint will be different configuration +file options, replacing the current P2P options with options relevant +to libp2p. + +However, it will not be possible to run a network with both networking +stacks active at once, so the upgrade to the version of Tendermint +will need to be coordinated between all nodes of the network. This is +consistent with the expectations around upgrades for Tendermint moving +forward, and will help manage both the complexity of the project and +the implementation timeline. + +## Open Questions + +- What is the role of Protocol Labs in the implementation of libp2p in + tendermint, both during the initial implementation and on an ongoing + basis thereafter? + +- Should all P2P traffic for a given node be pushed to a single topic, + so that a topic maps to a specific ChainID, or should + each reactor (or type of message) have its own topic? How many + topics can a libp2p network support? Is there testing that validates + the capabilities? + +- Tendermint presently provides a very coarse QoS-like functionality + using priorities based on message-type. + This intuitively/theoretically ensures that evidence and consensus + messages don't get starved by blocksync/statesync messages. It's + unclear if we can or should attempt to replicate this with libp2p. + +- What kind of QoS functionality does libp2p provide and what kind of + metrics does libp2p provide about it's QoS functionality? + +- Is it possible to store additional (and potentially arbitrary) + information into the DHT as part of the heartbeats between nodes, + such as the latest height, and then access that in the + reactors. How frequently can the DHT be updated? + +- Does it make sense to have reactors continue to consume inbound + messages from a Channel (`In`) or is there another interface or + pattern that we should consider? + + - We should avoid exposing Go channels when possible, and likely + some kind of alternate iterator likely makes sense for processing + messages within the reactors. + +- What are the security and protocol implications of tracking + information from peer heartbeats and exposing that to reactors? + +- How much (or how little) configuration can Tendermint provide for + libp2p, particularly on the first release? + + - In general, we should not support byo-functionality for libp2p + components within Tendermint, and reduce the configuration surface + area, as much as possible. + +- What are the best ways to provide request/response semantics for + reactors on top of libp2p? Will it be possible to add + request/response semantics in a future release or is there + anticipatory work that needs to be done as part of the initial + release? + +## Consequences + +### Positive + +- Reduce the maintenance burden for the Tendermint Core team by + removing a large swath of legacy code that has proven to be + difficult to modify safely. + +- Remove the responsibility for maintaining and developing the entire + peer management system (p2p) and stack. + +- Provide users with a more stable peer and networking system, + Tendermint can improve operator experience and network stability. + +### Negative + +- By deferring to library implementations for peer management and + networking, Tendermint loses some flexibility for innovating at the + peer and networking level. However, Tendermint should be innovating + primarily at the consensus layer, and libp2p does not preclude + optimization or development in the peer layer. + +- Libp2p is a large dependency and Tendermint would become dependent + upon Protocol Labs' release cycle and prioritization for bug + fixes. If this proves onerous, it's possible to maintain a vendor + fork of relevant components as needed. + +### Neutral + +- N/A + +## References + +- [ADR 61: P2P Refactor Scope][adr61] +- [ADR 62: P2P Architecture][adr62] +- [P2P Roadmap RFC][rfc] + +[adr61]: ./adr-061-p2p-refactor-scope.md +[adr62]: ./adr-062-p2p-architecture.md +[rfc]: ../rfc/rfc-000-p2p-roadmap.rst diff --git a/docs/architecture/adr-074-timeout-params.md b/docs/architecture/adr-074-timeout-params.md new file mode 100644 index 0000000000..22fd784bd9 --- /dev/null +++ b/docs/architecture/adr-074-timeout-params.md @@ -0,0 +1,203 @@ +# ADR 74: Migrate Timeout Parameters to Consensus Parameters + +## Changelog + +- 03-Jan-2022: Initial draft (@williambanfield) +- 13-Jan-2022: Updated to indicate work on upgrade path needed (@williambanfield) + +## Status + +Proposed + +## Context + +### Background + +Tendermint's consensus timeout parameters are currently configured locally by each validator +in the validator's [config.toml][config-toml]. +This means that the validators on a Tendermint network may have different timeouts +from each other. There is no reason for validators on the same network to configure +different timeout values. Proper functioning of the Tendermint consensus algorithm +relies on these parameters being uniform across validators. + +The configurable values are as follows: + +* `TimeoutPropose` + * How long the consensus algorithm waits for a proposal block before issuing a prevote. + * If no prevote arrives by `TimeoutPropose`, then the consensus algorithm will issue a nil prevote. +* `TimeoutProposeDelta` + * How much the `TimeoutPropose` grows each round. +* `TimeoutPrevote` + * How long the consensus algorithm waits after receiving +2/3 prevotes with + no quorum for a value before issuing a precommit for nil. + (See the [arXiv paper][arxiv-paper], Algorithm 1, Line 34) +* `TimeoutPrevoteDelta` + * How much the `TimeoutPrevote` increases with each round. +* `TimeoutPrecommit` + * How long the consensus algorithm waits after receiving +2/3 precommits that + do not have a quorum for a value before entering the next round. + (See the [arXiv paper][arxiv-paper], Algorithm 1, Line 47) +* `TimeoutPrecommitDelta` + * How much the `TimeoutPrecommit` increases with each round. +* `TimeoutCommit` + * How long the consensus algorithm waits after committing a block but before starting the new height. + * This gives a validator a chance to receive slow precommits. +* `SkipTimeoutCommit` + * Make progress as soon as the node has 100% of the precommits. + + +### Overview of Change + +We will consolidate the timeout parameters and migrate them from the node-local +`config.toml` file into the network-global consensus parameters. + +The 8 timeout parameters will be consolidated down to 6. These will be as follows: + +* `TimeoutPropose` + * Same as current `TimeoutPropose`. +* `TimeoutProposeDelta` + * Same as current `TimeoutProposeDelta`. +* `TimeoutVote` + * How long validators wait for votes in both the prevote + and precommit phase of the consensus algorithm. This parameter subsumes + the current `TimeoutPrevote` and `TimeoutPrecommit` parameters. +* `TimeoutVoteDelta` + * How much the `TimeoutVote` will grow each successive round. + This parameter subsumes the current `TimeoutPrevoteDelta` and `TimeoutPrecommitDelta` + parameters. +* `TimeoutCommit` + * Same as current `TimeoutCommit`. +* `BypassCommitTimeout` + * Same as current `SkipTimeoutCommit`, renamed for clarity. + +A safe default will be provided by Tendermint for each of these parameters and +networks will be able to update the parameters as they see fit. Local updates +to these parameters will no longer be possible; instead, the application will control +updating the parameters. Applications using the Cosmos SDK will be automatically be +able to change the values of these consensus parameters [via a governance proposal][cosmos-sdk-consensus-params]. + +This change is low-risk. While parameters are locally configurable, many running chains +do not change them from their default values. For example, initializing +a node on Osmosis, Terra, and the Cosmos Hub using the their `init` command produces +a `config.toml` with Tendermint's default values for these parameters. + +### Why this parameter consolidation? + +Reducing the number of parameters is good for UX. Fewer superfluous parameters makes +running and operating a Tendermint network less confusing. + +The Prevote and Precommit messages are both similar sizes, require similar amounts +of processing so there is no strong need for them to be configured separately. + +The `TimeoutPropose` parameter governs how long Tendermint will wait for the proposed +block to be gossiped. Blocks are much larger than votes and therefore tend to be +gossiped much more slowly. It therefore makes sense to keep `TimeoutPropose` and +the `TimeoutProposeDelta` as parameters separate from the vote timeouts. + +`TimeoutCommit` is used by chains to ensure that the network waits for the votes from +slower validators before proceeding to the next height. Without this timeout, the votes +from slower validators would consistently not be included in blocks and those validators +would not be counted as 'up' from the chain's perspective. Being down damages a validator's +reputation and causes potential stakers to think twice before delegating to that validator. + +`TimeoutCommit` also prevents the network from producing the next height as soon as validators +on the fastest hardware with a summed voting power of +2/3 of the network's total have +completed execution of the block. Allowing the network to proceed as soon as the fastest ++2/3 completed execution would have a cumulative effect over heights, eventually +leaving slower validators unable to participate in consensus at all. `TimeoutCommit` +therefore allows networks to have greater variability in hardware. Additional +discussion of this can be found in [tendermint issue 5911][tendermint-issue-5911-comment] +and [spec issue 359][spec-issue-359]. + +## Alternative Approaches + +### Hardcode the parameters + +Many Tendermint networks run on similar cloud-hosted infrastructure. Therefore, +they have similar bandwidth and machine resources. The timings for propagating votes +and blocks are likely to be reasonably similar across networks. As a result, the +timeout parameters are good candidates for being hardcoded. Hardcoding the timeouts +in Tendermint would mean entirely removing these parameters from any configuration +that could be altered by either an application or a node operator. Instead, +Tendermint would ship with a set of timeouts and all applications using Tendermint +would use this exact same set of values. + +While Tendermint nodes often run with similar bandwidth and on similar cloud-hosted +machines, there are enough points of variability to make configuring +consensus timeouts meaningful. Namely, Tendermint network topologies are likely to be +very different from chain to chain. Additionally, applications may vary greatly in +how long the `Commit` phase may take. Applications that perform more work during `Commit` +require a longer `TimeoutCommit` to allow the application to complete its work +and be prepared for the next height. + +## Decision + +The decision has been made to implement this work, with the caveat that the +specific mechanism for introducing the new parameters to chains is still ongoing. + +## Detailed Design + +### New Consensus Parameters + +A new `TimeoutParams` `message` will be added to the [params.proto file][consensus-params-proto]. +This message will have the following form: + +```proto +message TimeoutParams { + google.protobuf.Duration propose = 1; + google.protobuf.Duration propose_delta = 2; + google.protobuf.Duration vote = 3; + google.protobuf.Duration vote_delta = 4; + google.protobuf.Duration commit = 5; + bool bypass_commit_timeout = 6; +} +``` + +This new message will be added as a field into the [`ConsensusParams` +message][consensus-params-proto]. The same default values that are [currently +set for these parameters][current-timeout-defaults] in the local configuration +file will be used as the defaults for these new consensus parameters in the +[consensus parameter defaults][default-consensus-params]. + +The new consensus parameters will be subject to the same +[validity rules][time-param-validation] as the current configuration values, +namely, each value must be non-negative. + +### Migration + +The new `ConsensusParameters` will be added during an upcoming release. In this +release, the old `config.toml` parameters will cease to control the timeouts and +an error will be logged on nodes that continue to specify these values. The specific +mechanism by which these parameters will added to a chain is being discussed in +[RFC-009][rfc-009] and will be decided ahead of the next release. + +The specific mechanism for adding these parameters depends on work related to +[soft upgrades][soft-upgrades], which is still ongoing. + +## Consequences + +### Positive + +* Timeout parameters will be equal across all of the validators in a Tendermint network. +* Remove superfluous timeout parameters. + +### Negative + +### Neutral + +* Timeout parameters require consensus to change. + +## References + +[conseusus-params-proto]: https://github.com/tendermint/spec/blob/a00de7199f5558cdd6245bbbcd1d8405ccfb8129/proto/tendermint/types/params.proto#L11 +[hashed-params]: https://github.com/tendermint/tendermint/blob/7cdf560173dee6773b80d1c574a06489d4c394fe/types/params.go#L49 +[default-consensus-params]: https://github.com/tendermint/tendermint/blob/7cdf560173dee6773b80d1c574a06489d4c394fe/types/params.go#L79 +[current-timeout-defaults]: https://github.com/tendermint/tendermint/blob/7cdf560173dee6773b80d1c574a06489d4c394fe/config/config.go#L955 +[config-toml]: https://github.com/tendermint/tendermint/blob/5cc980698a3402afce76b26693ab54b8f67f038b/config/toml.go#L425-L440 +[cosmos-sdk-consensus-params]: https://github.com/cosmos/cosmos-sdk/issues/6197 +[time-param-validation]: https://github.com/tendermint/tendermint/blob/7cdf560173dee6773b80d1c574a06489d4c394fe/config/config.go#L1038 +[tendermint-issue-5911-comment]: https://github.com/tendermint/tendermint/issues/5911#issuecomment-973560381 +[spec-issue-359]: https://github.com/tendermint/spec/issues/359 +[arxiv-paper]: https://arxiv.org/pdf/1807.04938.pdf +[soft-upgrades]: https://github.com/tendermint/spec/pull/222 +[rfc-009]: https://github.com/tendermint/tendermint/pull/7524 diff --git a/docs/architecture/adr-075-rpc-subscription.md b/docs/architecture/adr-075-rpc-subscription.md new file mode 100644 index 0000000000..1ca48e7123 --- /dev/null +++ b/docs/architecture/adr-075-rpc-subscription.md @@ -0,0 +1,684 @@ +# ADR 075: RPC Event Subscription Interface + +## Changelog + +- 01-Mar-2022: Update long-polling interface (@creachadair). +- 10-Feb-2022: Updates to reflect implementation. +- 26-Jan-2022: Marked accepted. +- 22-Jan-2022: Updated and expanded (@creachadair). +- 20-Nov-2021: Initial draft (@creachadair). + +--- +## Status + +Accepted + +--- +## Background & Context + +For context, see [RFC 006: Event Subscription][rfc006]. + +The [Tendermint RPC service][rpc-service] permits clients to subscribe to the +event stream generated by a consensus node. This allows clients to observe the +state of the consensus network, including details of the consensus algorithm +state machine, proposals, transaction delivery, and block completion. The +application may also attach custom key-value attributes to events to expose +application-specific details to clients. + +The event subscription API in the RPC service currently comprises three methods: + +1. `subscribe`: A request to subscribe to the events matching a specific + [query expression][query-grammar]. Events can be filtered by their key-value + attributes, including custom attributes provided by the application. + +2. `unsubscribe`: A request to cancel an existing subscription based on its + query expression. + +3. `unsubscribe_all`: A request to cancel all existing subscriptions belonging + to the client. + +There are some important technical and UX issues with the current RPC event +subscription API. The rest of this ADR outlines these problems in detail, and +proposes a new API scheme intended to address them. + +### Issue 1: Persistent connections + +To subscribe to a node's event stream, a client needs a persistent connection +to the node. Unlike the other methods of the service, for which each call is +serviced by a short-lived HTTP round trip, subscription delivers a continuous +stream of events to the client by hijacking the HTTP channel for a websocket. +The stream (and hence the HTTP request) persists until either the subscription +is explicitly cancelled, or the connection is closed. + +There are several problems with this API: + +1. **Expensive per-connection state**: The server must maintain a substantial + amount of state per subscriber client: + + - The current implementation uses a [WebSocket][ws] for each active + subscriber. The connection must be maintained even if there are no + matching events for a given client. + + The server can drop idle connections to save resources, but doing so + terminates all subscriptions on those connections and forces those clients + to re-connect, adding additional resource churn for the server. + + - In addition, the server maintains a separate buffer of undelivered events + for each client. This is to reduce the dual risks that a client will miss + events, and that a slow client could "push back" on the publisher, + impeding the progress of consensus. + + Because event traffic is quite bursty, queues can potentially take up a + lot of memory. Moreover, each subscriber may have a different filter + query, so the server winds up having to duplicate the same events among + multiple subscriber queues. Not only does this add memory pressure, but it + does so most at the worst possible time, i.e., when the server is already + under load from high event traffic. + +2. **Operational access control is difficult**: The server's websocket + interface exposes _all_ the RPC service endpoints, not only the subscription + methods. This includes methods that allow callers to inject arbitrary + transactions (`broadcast_tx_*`) and evidence (`broadcast_evidence`) into the + network, remove transactions (`remove_tx`), and request arbitrary amounts of + chain state. + + Filtering requests to the GET endpoint is straightforward: A reverse proxy + like [nginx][nginx] can easily filter methods by URL path. Filtering POST + requests takes a bit more work, but can be managed with a filter program + that speaks [FastCGI][fcgi] and parses JSON-RPC request bodies. + + Filtering the websocket interface requires a dedicated proxy implementation. + Although nginx can [reverse-proxy websockets][rp-ws], it does not support + filtering websocket traffic via FastCGI. The operator would need to either + implement a custom [nginx extension module][ng-xm] or build and run a + standalone proxy that implements websocket and filters each session. Apart + from the work, this also makes the system even more resource intensive, as + well as introducing yet another connection that could potentially time out + or stall on full buffers. + + Even for the simple case of restricting access to only event subscription, + there is no easy solution currently: Once a caller has access to the + websocket endpoint, it has complete access to the RPC service. + +### Issue 2: Inconvenient client API + +The subscription interface has some inconvenient features for the client as +well as the server. These include: + +1. **Non-standard protocol:** The RPC service is mostly [JSON-RPC 2.0][jsonrpc2], + but the subscription interface diverges from the standard. + + In a standard JSON-RPC 2.0 call, the client initiates a request to the + server with a unique ID, and the server concludes the call by sending a + reply for that ID. The `subscribe` implementation, however, sends multiple + responses to the client's request: + + - The client sends `subscribe` with some ID `x` and the desired query + + - The server responds with ID `x` and an empty confirmation response. + + - The server then (repeatedly) sends event result responses with ID `x`, one + for each item with a matching event. + + Standard JSON-RPC clients will reject the subsequent replies, as they + announce a request ID (`x`) that is already complete. This means a caller + has to implement Tendermint-specific handling for these responses. + + Moreover, the result format is different between the initial confirmation + and the subsequent responses. This means a caller has to implement special + logic for decoding the first response versus the subsequent ones. + +2. **No way to detect data loss:** The subscriber connection can be terminated + for many reasons. Even ignoring ordinary network issues (e.g., packet loss): + + - The server will drop messages and/or close the websocket if its write + buffer fills, or if the queue of undelivered matching events is not + drained fast enough. The client has no way to discover that messages were + dropped even if the connection remains open. + + - Either the client or the server may close the websocket if the websocket + PING and PONG exchanges are not handled correctly, or frequently enough. + Even if correctly implemented, this may fail if the system is under high + load and cannot service those control messages in a timely manner. + + When the connection is terminated, the server drops all the subscriptions + for that client (as if it had called `unsubscribe_all`). Even if the client + reconnects, any events that were published during the period between the + disconnect and re-connect and re-subscription will be silently lost, and the + client has no way to discover that it missed some relevant messages. + +3. **No way to replay old events:** Even if a client knew it had missed some + events (due to a disconnection, for example), the API provides no way for + the client to "play back" events it may have missed. + +4. **Large response sizes:** Some event data can be quite large, and there can + be substantial duplication across items. The API allows the client to select + _which_ events are reported, but has no way to control which parts of a + matching event it wishes to receive. + + This can be costly on the server (which has to marshal those data into + JSON), the network, and the client (which has to unmarshal the result and + then pick through for the components that are relevant to it). + + Besides being inefficient, this also contributes to some of the persistent + connection issues mentioned above, e.g., filling up the websocket write + buffer and forcing the server to queue potentially several copies of a large + value in memory. + +5. **Client identity is tied to network address:** The Tendermint event API + identifies each subscriber by a (Client ID, Query) pair. In the RPC service, + the query is provided by the client, but the client ID is set to the TCP + address of the client (typically "host:port" or "ip:port"). + + This means that even if the server did _not_ drop subscriptions immediately + when the websocket connection is closed, a client may not be able to + reattach to its existing subscription. Dialing a new connection is likely + to result in a different port (and, depending on their own proxy setup, + possibly a different public IP). + + In isolation, this problem would be easy to work around with a new + subscription parameter, but it would require several other changes to the + handling of event subscriptions for that workaround to become useful. + +--- +## Decision + +To address the described problems, we will: + +1. Introduce a new API for event subscription to the Tendermint RPC service. + The proposed API is described in [Detailed Design](#detailed-design) below. + +2. This new API will target the Tendermint v0.36 release, during which the + current ("streaming") API will remain available as-is, but deprecated. + +3. The streaming API will be entirely removed in release v0.37, which will + require all users of event subscription to switch to the new API. + +> **Point for discussion:** Given that ABCI++ and PBTS are the main priorities +> for v0.36, it would be fine to slip the first phase of this work to v0.37. +> Unless there is a time problem, however, the proposed design does not disrupt +> the work on ABCI++ or PBTS, and will not increase the scope of breaking +> changes. Therefore the plan is to begin in v0.36 and slip only if necessary. + +--- +## Detailed Design + +### Design Goals + +Specific goals of this design include: + +1. Remove the need for a persistent connection to each subscription client. + Subscribers should use the same HTTP request flow for event subscription + requests as for other RPC calls. + +2. The server retains minimal state (possibly none) per-subscriber. In + particular: + + - The server does not buffer unconsumed writes nor queue undelivered events + on a per-client basis. + - A client that stalls or goes idle does not cost the server any resources. + - Any event data that is buffered or stored is shared among _all_ + subscribers, and is not duplicated per client. + +3. Slow clients have no impact (or minimal impact) on the rate of progress of + the consensus algorithm, beyond the ambient overhead of servicing individual + RPC requests. + +4. Clients can tell when they have missed events matching their subscription, + within some reasonable (configurable) window of time, and can "replay" + events within that window to catch up. + +5. Nice to have: It should be easy to use the event subscription API from + existing standard tools and libraries, including command-line use for + testing and experimentation. + +### Definitions + +- The **event stream** of a node is a single, time-ordered, heterogeneous + stream of event items. + +- Each **event item** comprises an **event datum** (for example, block header + metadata for a new-block event), and zero or more optional **events**. + +- An **event** means the [ABCI `Event` data type][abci-event], which comprises + a string type and zero or more string key-value **event attributes**. + + The use of the new terms "event item" and "event datum" is to avert confusion + between the values that are published to the event bus (what we call here + "event items") and the ABCI `Event` data type. + +- The node assigns each event item a unique identifier string called a + **cursor**. A cursor must be unique among all events published by a single + node, but it is not required to be unique globally across nodes. + + Cursors are time-ordered so that given event items A and B, if A was + published before B, then cursor(A) < cursor(B) in lexicographic order. + + A minimum viable cursor implementation is a tuple consisting of a timestamp + and a sequence number (e.g., `16CCC798FB5F4670-0123`). However, it may also + be useful to append basic type information to a cursor, to allow efficient + filtering (e.g., `16CCC87E91869050-0091:BeginBlock`). + + The initial implementation will use the minimum viable format. + +### Discussion + +The node maintains an **event log**, a shared ordered record of the events +published to its event bus within an operator-configurable time window. The +initial implementation will store the event log in-memory, and the operator +will be given two per-node configuration settings. Note, these names are +provisional: + +- `[rpc] event-log-window-size`: A duration before the latest published event, + during which the node will retain event items published. Setting this value + to zero disables event subscription. + +- `[rpc] event-log-max-items`: A maximum number of event items that the node + will retain within the time window. If the number of items exceeds this + value, the node discardes the oldest items in the window. Setting this value + to zero means that no limit is imposed on the number of items. + +The node will retain all events within the time window, provided they do not +exceed the maximum number. These config parameters allow the operator to +loosely regulate how much memory and storage the node allocates to the event +log. The client can use the server reply to tell whether the events it wants +are still available from the event log. + +The event log is shared among all subscribers to the node. + +> **Discussion point:** Should events persist across node restarts? +> +> The current event API does not persist events across restarts, so this new +> design does not either. Note, however, that we may "spill" older event data +> to disk as a way of controlling memory use. Such usage is ephemeral, however, +> and does not need to be tracked as node data (e.g., it could be temp files). + +### Query API + +To retrieve event data, the client will call the (new) RPC method `events`. +The parameters of this method will correspond to the following Go types: + +```go +type EventParams struct { + // Optional filter spec. If nil or empty, all items are eligible. + Filter *Filter `json:"filter"` + + // The maximum number of eligible results to return. + // If zero or negative, the server will report a default number. + MaxResults int `json:"max_results"` + + // Return only items after this cursor. If empty, the limit is just + // before the the beginning of the event log. + After string `json:"after"` + + // Return only items before this cursor. If empty, the limit is just + // after the head of the event log. + Before string `json:"before"` + + // Wait for up to this long for events to be available. + WaitTime time.Duration `json:"wait_time"` +} + +type Filter struct { + Query string `json:"query"` +} +``` + +> **Discussion point:** The initial implementation will not cache filter +> queries for the client. If this turns out to be a performance issue in +> production, the service can keep a small shared cache of compiled queries. +> Given the improvements from #7319 et seq., this should not be necessary. + +> **Discussion point:** For the initial implementation, the new API will use +> the existing query language as-is. Future work may extend the Filter message +> with a more structured and/or expressive query surface, but that is beyond +> the scope of this design. + +The semantics of the request are as follows: An item in the event log is +**eligible** for a query if: + +- It is newer than the `after` cursor (if set). +- It is older than the `before` cursor (if set). +- It matches the filter (if set). + +Among the eligible items in the log, the server returns up to `max_results` of +the newest items, in reverse order of cursor. If `max_results` is unset the +server chooses a number to return, and will cap `max_results` at a sensible +limit. + +The `wait_time` parameter is used to effect polling. If `before` is empty and +no items are available, the server will wait for up to `wait_time` for matching +items to arrive at the head of the log. If `wait_time` is zero or negative, the +server will wait for a default (positive) interval. + +If `before` non-empty, `wait_time` is ignored: new results are only added to +the head of the log, so there is no need to wait. This allows the client to +poll for new data, and "page" backward through matching event items. This is +discussed in more detail below. + +The server will set a sensible cap on the maximum `wait_time`, overriding +client-requested intervals longer than that. + +A successful reply from the `events` request corresponds to the following Go +types: + +```go +type EventReply struct { + // The items matching the request parameters, from newest + // to oldest, if any were available within the timeout. + Items []*EventItem `json:"items"` + + // This is true if there is at least one older matching item + // available in the log that was not returned. + More bool `json:"more"` + + // The cursor of the oldest item in the log at the time of this reply, + // or "" if the log is empty. + Oldest string `json:"oldest"` + + // The cursor of the newest item in the log at the time of this reply, + // or "" if the log is empty. + Newest string `json:"newest"` +} + +type EventItem struct { + // The cursor of this item. + Cursor string `json:"cursor"` + + // The encoded event data for this item. + // The type identifies the structure of the value. + Data struct { + Type string `json:"type"` + Value json.RawMessage `json:"value"` + } `json:"data"` +} +``` + +The `oldest` and `newest` fields of the reply report the cursors of the oldest +and newest items (of any kind) recorded in the event log at the time of the +reply, or are `""` if the log is empty. + +The `data` field contains the type-specific event datum. The datum carries any +ABCI events that may have been defined. + +> **Discussion point**: Based on [issue #7273][i7273], I did not include a +> separate field in the response for the ABCI events, since it duplicates data +> already stored elsewhere in the event data. + +The semantics of the reply are as follows: + +- If `items` is non-empty: + + - Items are ordered from newest to oldest. + + - If `more` is true, there is at least one additional, older item in the + event log that was not returned (in excess of `max_results`). + + In this case the client can fetch the next page by setting `before` in a + new request, to the cursor of the oldest item fetched (i.e., the last one + in `items`). + + - Otherwise (if `more` is false), all the matching results have been + reported (pagination is complete). + + - The first element of `items` identifies the newest item considered. + Subsequent poll requests can set `after` to this cursor to skip items + that were already retrieved. + +- If `items` is empty: + + - If the `before` was set in the request, there are no further eligible + items for this query in the log (pagination is complete). + + This is just a safety case; the client can detect this without issuing + another call by consulting the `more` field of the previous reply. + + - If the `before` was empty in the request, no eligible items were + available before the `wait_time` expired. The client may poll again to + wait for more event items. + +A client can store cursor values to detect data loss and to recover from +crashes and connectivity issues: + +- After a crash, the client requests events after the newest cursor it has + seen. If the reply indicates that cursor is no longer in range, the client + may (conservatively) conclude some event data may have been lost. + +- On the other hand, if it _is_ in range, the client can then page back through + the results that it missed, and then resume polling. As long as its recovery + cursor does not age out before it finishes, the client can be sure it has all + the relevant results. + +### Other Notes + +- The new API supports two general "modes" of operation: + + 1. In ordinary operation, clients will **long-poll** the head of the event + log for new events matching their criteria (by setting a `wait_time` and + no `before`). + + 2. If there are more events than the client requested, or if the client needs + to to read older events to recover from a stall or crash, clients will + **page** backward through the event log (by setting `before` and `after`). + +- While the new API requires explicit polling by the client, it makes better + use of the node's existing HTTP infrastructure (e.g., connection pools). + Moreover, the direct implementation is easier to use from standard tools and + client libraries for HTTP and JSON-RPC. + + Explicit polling does shift the burden of timeliness to the client. That is + arguably preferable, however, given that the RPC service is ancillary to the + node's primary goal, viz., consensus. The details of polling can be easily + hidden from client applications with simple libraries. + +- The format of a cursor is considered opaque to the client. Clients must not + parse cursor values, but they may rely on their ordering properties. + +- To maintain the event log, the server must prune items outside the time + window and in excess of the item limit. + + The initial implementation will do this by checking the tail of the event log + after each new item is published. If the number of items in the log exceeds + the item limit, it will delete oldest items until the log is under the limit; + then discard any older than the time window before the latest. + + To minimize coordination interference between the publisher (the event bus) + and the subcribers (the `events` service handlers), the event log will be + stored as a persistent linear queue with shared structure (a cons list). A + single reader-writer mutex will guard the "head" of the queue where new + items are published: + + - **To publish a new item**, the publisher acquires the write lock, conses a + new item to the front of the existing queue, and replaces the head pointer + with the new item. + + - **To scan the queue**, a reader acquires the read lock, captures the head + pointer, and then releases the lock. The rest of its request can be served + without holding a lock, since the queue structure will not change. + + When a reader wants to wait, it will yield the lock and wait on a condition + that is signaled when the publisher swings the pointer. + + - **To prune the queue**, the publisher (who is the sole writer) will track + the queue length and the age of the oldest item separately. When the + length and or age exceed the configured bounds, it will construct a new + queue spine on the same items, discarding out-of-band values. + + Pruning can be done while the publisher already holds the write lock, or + could be done outside the lock entirely: Once the new queue is constructed, + the lock can be re-acquired to swing the pointer. This costs some extra + allocations for the cons cells, but avoids duplicating any event items. + The pruning step is a simple linear scan down the first (up to) max-items + elements of the queue, to find the breakpoint of age and length. + + Moreover, the publisher can amortize the cost of pruning by item count, if + necessary, by pruning length "more aggressively" than the configuration + requires (e.g., reducing to 3/4 of the maximum rather than 1/1). + + The state of the event log before the publisher acquires the lock: + ![Before publish and pruning](./img/adr-075-log-before.png) + + After the publisher has added a new item and pruned old ones: + ![After publish and pruning](./img/adr-075-log-after.png) + +### Migration Plan + +This design requires that clients eventually migrate to the new event +subscription API, but provides a full release cycle with both APIs in place to +make this burden more tractable. The migration strategy is broadly: + +**Phase 1**: Release v0.36. + +- Implement the new `events` endpoint, keeping the existing methods as they are. +- Update the Go clients to support the new `events` endpoint, and handle polling. +- Update the old endpoints to log annoyingly about their own deprecation. +- Write tutorials about how to migrate client usage. + +At or shortly after release, we should proactively update the Cosmos SDK to use +the new API, to remove a disincentive to upgrading. + +**Phase 2**: Release v0.37 + +- During development, we should actively seek out any existing users of the + streaming event subscription API and help them migrate. +- Possibly also: Spend some time writing clients for JS, Rust, et al. +- Release: Delete the old implementation and all the websocket support code. + +> **Discussion point**: Even though the plan is to keep the existing service, +> we might take the opportunity to restrict the websocket endpoint to _only_ +> the event streaming service, removing the other endpoints. To minimize the +> disruption for users in the v0.36 cycle, I have decided not to do this for +> the first phase. +> +> If we wind up pushing this design into v0.37, however, we should re-evaulate +> this partial turn-down of the websocket. + +### Future Work + +- This design does not immediately address the problem of allowing the client + to control which data are reported back for event items. That concern is + deferred to future work. However, it would be straightforward to extend the + filter and/or the request parameters to allow more control. + +- The node currently stores a subset of event data (specifically the block and + transaction events) for use in reindexing. While these data are redundant + with the event log described in this document, they are not sufficient to + cover event subscription, as they omit other event types. + + In the future we should investigate consolidating or removing event data from + the state store entirely. For now this issue is out of scope for purposes of + updating the RPC API. We may be able to piggyback on the database unification + plans (see [RFC 001][rfc001]) to store the event log separately, so its + pruning policy does not need to be tied to the block and state stores. + +- This design reuses the existing filter query language from the old API. In + the future we may want to use a more structured and/or expressive query. The + Filter object can be extended with more fields as needed to support this. + +- Some users have trouble communicating with the RPC service because of + configuration problems like improperly-set CORS policies. While this design + does not address those issues directly, we might want to revisit how we set + policies in the RPC service to make it less susceptible to confusing errors + caused by misconfiguration. + +--- +## Consequences + +- ✅ Reduces the number of transport options for RPC. Supports [RFC 002][rfc002]. +- ️✅ Removes the primary non-standard use of JSON-RPC. +- ⛔️ Forces clients to migrate to a different API (eventually). +- ↕️ API requires clients to poll, but this reduces client state on the server. +- ↕️ We have to maintain both implementations for a whole release, but this + gives clients time to migrate. + +--- +## Alternative Approaches + +The following alternative approaches were considered: + +1. **Leave it alone.** Since existing tools mostly already work with the API as + it stands today, we could leave it alone and do our best to improve its + performance and reliability. + + Based on many issues reported by users and node operators (e.g., + [#3380][i3380], [#6439][i6439], [#6729][i6729], [#7247][i7247]), the + problems described here affect even the existing use that works. Investing + further incremental effort in the existing API is unlikely to address these + issues. + +2. **Design a better streaming API.** Instead of polling, we might try to + design a better "streaming" API for event subscription. + + A significant advantage of switching away from streaming is to remove the + need for persistent connections between the node and subscribers. A new + streaming protocol design would lose that advantage, and would still need a + way to let clients recover and replay. + + This approach might look better if we decided to use a different protocol + for event subscription, say gRPC instead of JSON-RPC. That choice, however, + would be just as breaking for existing clients, for marginal benefit. + Moreover, this option increases both the complexity and the resource cost on + the node implementation. + + Given that resource consumption and complexity are important considerations, + this option was not chosen. + +3. **Defer to an external event broker.** We might remove the entire event + subscription infrastructure from the node, and define an optional interface + to allow the node to publish all its events to an external event broker, + such as Apache Kafka. + + This has the advantage of greatly simplifying the node, but at a great cost + to the node operator: To enable event subscription in this design, the + operator has to stand up and maintain a separate process in communion with + the node, and configuration changes would have to be coordinated across + both. + + Moreover, this approach would be highly disruptive to existing client use, + and migration would probably require switching to third-party libraries. + Despite the potential benefits for the node itself, the costs to operators + and clients seems too large for this to be the best option. + + Publishing to an external event broker might be a worthwhile future project, + if there is any demand for it. That decision is out of scope for this design, + as it interacts with the design of the indexer as well. + +--- +## References + +- [RFC 006: Event Subscription][rfc006] +- [Tendermint RPC service][rpc-service] +- [Event query grammar][query-grammar] +- [RFC 6455: The WebSocket protocol][ws] +- [JSON-RPC 2.0 Specification][jsonrpc2] +- [Nginx proxy server][nginx] + - [Proxying websockets][rp-ws] + - [Extension modules][ng-xm] +- [FastCGI][fcgi] +- [RFC 001: Storage Engines & Database Layer][rfc001] +- [RFC 002: Interprocess Communication in Tendermint][rfc002] +- Issues: + - [rpc/client: test that client resubscribes upon disconnect][i3380] (#3380) + - [Too high memory usage when creating many events subscriptions][i6439] (#6439) + - [Tendermint emits events faster than clients can pull them][i6729] (#6729) + - [indexer: unbuffered event subscription slow down the consensus][i7247] (#7247) + - [rpc: remove duplication of events when querying][i7273] (#7273) + +[rfc006]: https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-006-event-subscription.md +[rpc-service]: https://github.com/tendermint/tendermint/blob/master/rpc/openapi/openapi.yaml +[query-grammar]: https://pkg.go.dev/github.com/tendermint/tendermint@master/internal/pubsub/query/syntax +[ws]: https://datatracker.ietf.org/doc/html/rfc6455 +[jsonrpc2]: https://www.jsonrpc.org/specification +[nginx]: https://nginx.org/en/docs/ +[fcgi]: http://www.mit.edu/~yandros/doc/specs/fcgi-spec.html +[rp-ws]: https://nginx.org/en/docs/http/websocket.html + +[ng-xm]: https://www.nginx.com/resources/wiki/extending/ +[abci-event]: https://pkg.go.dev/github.com/tendermint/tendermint/abci/types#Event +[rfc001]: https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-001-storage-engine.rst +[rfc002]: https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-002-ipc-ecosystem.md +[i3380]: https://github.com/tendermint/tendermint/issues/3380 +[i6439]: https://github.com/tendermint/tendermint/issues/6439 +[i6729]: https://github.com/tendermint/tendermint/issues/6729 +[i7247]: https://github.com/tendermint/tendermint/issues/7247 +[i7273]: https://github.com/tendermint/tendermint/issues/7273 diff --git a/docs/architecture/adr-076-combine-spec-repo.md b/docs/architecture/adr-076-combine-spec-repo.md new file mode 100644 index 0000000000..a6365da5b8 --- /dev/null +++ b/docs/architecture/adr-076-combine-spec-repo.md @@ -0,0 +1,112 @@ +# ADR 076: Combine Spec and Tendermint Repositories + +## Changelog + +- 2022-02-04: Initial Draft. (@tychoish) + +## Status + +Accepted. + +## Context + +While the specification for Tendermint was originally in the same +repository as the Go implementation, at some point the specification +was split from the core repository and maintained separately from the +implementation. While this makes sense in promoting a conceptual +separation of specification and implementation, in practice this +separation was a premature optimization, apparently aimed at supporting +alternate implementations of Tendermint. + +The operational and documentary burden of maintaining a separate +spec repo has not returned value to justify its cost. There are no active +projects to develop alternate implementations of Tendermint based on the +common specification, and having separate repositories creates an ongoing +burden to coordinate versions, documentation, and releases. + +## Decision + +The specification repository will be merged back into the Tendermint +core repository. + +Stakeholders including representatives from the maintainers of the +spec, the Go implementation, and the Tendermint Rust library, agreed +to merge the repositories in the Tendermint core dev meeting on 27 +January 2022, including @williambanfield @cmwaters @creachadair and +@thanethomson. + +## Alternative Approaches + +The main alternative we considered was to keep separate repositories, +and to introduce a coordinated versioning scheme between the two, so +that users could figure out which spec versions go with which versions +of the core implementation. + +We decided against this on the grounds that it would further complicate +the release process for _both_ repositories, without mitigating any of +the other existing issues. + +## Detailed Design + +Clone and merge the master branch of the `tendermint/spec` repository +as a branch of the `tendermint/tendermint`, to ensure the commit history +of both repositories remains intact. + +### Implementation Instructions + +1. Within the `tendermint` repository, execute the following commands + to add a new branch with the history of the master branch of `spec`: + + ```bash + git remote add spec git@github.com:tendermint/spec.git + git fetch spec + git checkout -b spec-master spec/master + mkdir spec + git ls-tree -z --name-only HEAD | xargs -0 -I {} git mv {} subdir/ + git commit -m "spec: organize specification prior to merge" + git checkout -b spec-merge-mainline origin/master + git merge --allow-unrelated-histories spec-master + ``` + + This merges the spec into the `tendermint/tendermint` repository as + a normal branch. This commit can also be backported to the 0.35 + branch, if needed. + +2. Migrate outstanding issues from `tendermint/spec` to the + `tendermint/tendermint` repository. + +3. In the specification repository, add redirect to the README and mark + the repository as archived. + + +## Consequences + +### Positive + +Easier maintenance for the specification will obviate a number of +complicated and annoying versioning problems, and will help prevent the +possibility of the specification and the implementation drifting apart. + +Additionally, co-locating the specification will help encourage +cross-pollination and collaboration, between engineers focusing on the +specification and the protocol and engineers focusing on the implementation. + +### Negative + +Co-locating the spec and Go implementation has the potential effect of +prioritizing the Go implementation with regards to the spec, and +making it difficult to think about alternate implementations of the +Tendermint algorithm. Although we may want to foster additional +Tendermint implementations in the future, this isn't an active goal +in our current roadmap, and *not* merging these repos doesn't +change the fact that the Go implementation of Tendermint is already the +primary implementation. + +### Neutral + +N/A + +## References + +- https://github.com/tendermint/spec +- https://github.com/tendermint/tendermint diff --git a/docs/architecture/adr-077-block-retention.md b/docs/architecture/adr-077-block-retention.md new file mode 100644 index 0000000000..714b4810af --- /dev/null +++ b/docs/architecture/adr-077-block-retention.md @@ -0,0 +1,109 @@ +# ADR 077: Configurable Block Retention + +## Changelog + +- 2020-03-23: Initial draft (@erikgrinaker) +- 2020-03-25: Use local config for snapshot interval (@erikgrinaker) +- 2020-03-31: Use ABCI commit response for block retention hint +- 2020-04-02: Resolved open questions +- 2021-02-11: Migrate to tendermint repo (Originally [RFC 001](https://github.com/tendermint/spec/pull/84)) + +## Author(s) + +- Erik Grinaker (@erikgrinaker) + +## Context + +Currently, all Tendermint nodes contain the complete sequence of blocks from genesis up to some height (typically the latest chain height). This will no longer be true when the following features are released: + +- [Block pruning](https://github.com/tendermint/tendermint/issues/3652): removes historical blocks and associated data (e.g. validator sets) up to some height, keeping only the most recent blocks. + +- [State sync](https://github.com/tendermint/tendermint/issues/828): bootstraps a new node by syncing state machine snapshots at a given height, but not historical blocks and associated data. + +To maintain the integrity of the chain, the use of these features must be coordinated such that necessary historical blocks will not become unavailable or lost forever. In particular: + +- Some nodes should have complete block histories, for auditability, querying, and bootstrapping. + +- The majority of nodes should retain blocks longer than the Cosmos SDK unbonding period, for light client verification. + +- Some nodes must take and serve state sync snapshots with snapshot intervals less than the block retention periods, to allow new nodes to state sync and then replay blocks to catch up. + +- Applications may not persist their state on commit, and require block replay on restart. + +- Only a minority of nodes can be state synced within the unbonding period, for light client verification and to serve block histories for catch-up. + +However, it is unclear if and how we should enforce this. It may not be possible to technically enforce all of these without knowing the state of the entire network, but it may also be unrealistic to expect this to be enforced entirely through social coordination. This is especially unfortunate since the consequences of misconfiguration can be permanent chain-wide data loss. + +## Proposal + +Add a new field `retain_height` to the ABCI `ResponseCommit` message: + +```proto +service ABCIApplication { + rpc Commit(RequestCommit) returns (ResponseCommit); +} + +message RequestCommit {} + +message ResponseCommit { + // reserve 1 + bytes data = 2; // the Merkle root hash + uint64 retain_height = 3; // the oldest block height to retain +} +``` + +Upon ABCI `Commit`, which finalizes execution of a block in the state machine, Tendermint removes all data for heights lower than `retain_height`. This allows the state machine to control block retention, which is preferable since only it can determine the significance of historical blocks. By default (i.e. with `retain_height=0`) all historical blocks are retained. + +Removed data includes not only blocks, but also headers, commit info, consensus params, validator sets, and so on. In the first iteration this will be done synchronously, since the number of heights removed for each run is assumed to be small (often 1) in the typical case. It can be made asynchronous at a later time if this is shown to be necessary. + +Since `retain_height` is dynamic, it is possible for it to refer to a height which has already been removed. For example, commit at height 100 may return `retain_height=90` while commit at height 101 may return `retain_height=80`. This is allowed, and will be ignored - it is the application's responsibility to return appropriate values. + +State sync will eventually support backfilling heights, via e.g. a snapshot metadata field `backfill_height`, but in the initial version it will have a fully truncated block history. + +## Cosmos SDK Example + +As an example, we'll consider how the Cosmos SDK might make use of this. The specific details should be discussed in a separate SDK proposal. + +The returned `retain_height` would be the lowest height that satisfies: + +- Unbonding time: the time interval in which validators can be economically punished for misbehavior. Blocks in this interval must be auditable e.g. by the light client. + +- IAVL snapshot interval: the block interval at which the underlying IAVL database is persisted to disk, e.g. every 10000 heights. Blocks since the last IAVL snapshot must be available for replay on application restart. + +- State sync snapshots: blocks since the _oldest_ available snapshot must be available for state sync nodes to catch up (oldest because a node may be restoring an old snapshot while a new snapshot was taken). + +- Local config: archive nodes may want to retain more or all blocks, e.g. via a local config option `min-retain-blocks`. There may also be a need to vary rentention for other nodes, e.g. sentry nodes which do not need historical blocks. + +![Cosmos SDK block retention diagram](img/block-retention.png) + +## Status + +Accepted + +## Consequences + +### Positive + +- Application-specified block retention allows the application to take all relevant factors into account and prevent necessary blocks from being accidentally removed. + +- Node operators can independently decide whether they want to provide complete block histories (if local configuration for this is provided) and snapshots. + +### Negative + +- Social coordination is required to run archival nodes, failure to do so may lead to permanent loss of historical blocks. + +- Social coordination is required to run snapshot nodes, failure to do so may lead to inability to run state sync, and inability to bootstrap new nodes at all if no archival nodes are online. + +### Neutral + +- Reduced block retention requires application changes, and cannot be controlled directly in Tendermint. + +- Application-specified block retention may set a lower bound on disk space requirements for all nodes. + +## References + +- State sync ADR: + +- State sync issue: + +- Block pruning issue: diff --git a/docs/architecture/adr-078-nonzero-genesis.md b/docs/architecture/adr-078-nonzero-genesis.md new file mode 100644 index 0000000000..bd9c030f0a --- /dev/null +++ b/docs/architecture/adr-078-nonzero-genesis.md @@ -0,0 +1,82 @@ +# ADR 078: Non-Zero Genesis + +## Changelog + +- 2020-07-26: Initial draft (@erikgrinaker) +- 2020-07-28: Use weak chain linking, i.e. `predecessor` field (@erikgrinaker) +- 2020-07-31: Drop chain linking (@erikgrinaker) +- 2020-08-03: Add `State.InitialHeight` (@erikgrinaker) +- 2021-02-11: Migrate to tendermint repo (Originally [RFC 002](https://github.com/tendermint/spec/pull/119)) + +## Author(s) + +- Erik Grinaker (@erikgrinaker) + +## Context + +The recommended upgrade path for block protocol-breaking upgrades is currently to hard fork the +chain (see e.g. [`cosmoshub-3` upgrade](https://blog.cosmos.network/cosmos-hub-3-upgrade-announcement-39c9da941aee). +This is done by halting all validators at a predetermined height, exporting the application +state via application-specific tooling, and creating an entirely new chain using the exported +application state. + +As far as Tendermint is concerned, the upgraded chain is a completely separate chain, with e.g. +a new chain ID and genesis file. Notably, the new chain starts at height 1, and has none of the +old chain's block history. This causes problems for integrators, e.g. coin exchanges and +wallets, that assume a monotonically increasing height for a given blockchain. Users also find +it confusing that a given height can now refer to distinct states depending on the chain +version. + +An ideal solution would be to always retain block backwards compatibility in such a way that chain +history is never lost on upgrades. However, this may require a significant amount of engineering +work that is not viable for the planned Stargate release (Tendermint 0.34), and may prove too +restrictive for future development. + +As a first step, allowing the new chain to start from an initial height specified in the genesis +file would at least provide monotonically increasing heights. There was a proposal to include the +last block header of the previous chain as well, but since the genesis file is not verified and +hashed (only specific fields are) this would not be trustworthy. + +External tooling will be required to map historical heights onto e.g. archive nodes that contain +blocks from previous chain version. Tendermint will not include any such functionality. + +## Proposal + +Tendermint will allow chains to start from an arbitrary initial height: + +- A new field `initial_height` is added to the genesis file, defaulting to `1`. It can be set to any +non-negative integer, and `0` is considered equivalent to `1`. + +- A new field `InitialHeight` is added to the ABCI `RequestInitChain` message, with the same value +and semantics as the genesis field. + +- A new field `InitialHeight` is added to the `state.State` struct, where `0` is considered invalid. + Including the field here simplifies implementation, since the genesis value does not have to be + propagated throughout the code base separately, but it is not strictly necessary. + +ABCI applications may have to be updated to handle arbitrary initial heights, otherwise the initial +block may fail. + +## Status + +Accepted + +## Consequences + +### Positive + +- Heights can be unique throughout the history of a "logical" chain, across hard fork upgrades. + +### Negative + +- Upgrades still cause loss of block history. + +- Integrators will have to map height ranges to specific archive nodes/networks to query history. + +### Neutral + +- There is no explicit link to the last block of the previous chain. + +## References + +- [#2543: Allow genesis file to start from non-zero height w/ prev block header](https://github.com/tendermint/tendermint/issues/2543) diff --git a/docs/architecture/adr-079-ed25519-verification.md b/docs/architecture/adr-079-ed25519-verification.md new file mode 100644 index 0000000000..c20869e6c4 --- /dev/null +++ b/docs/architecture/adr-079-ed25519-verification.md @@ -0,0 +1,57 @@ +# ADR 079: Ed25519 Verification + +## Changelog + +- 2020-08-21: Initial RFC +- 2021-02-11: Migrate RFC to tendermint repo (Originally [RFC 003](https://github.com/tendermint/spec/pull/144)) + +## Author(s) + +- Marko (@marbar3778) + +## Context + +Ed25519 keys are the only supported key types for Tendermint validators currently. Tendermint-Go wraps the ed25519 key implementation from the go standard library. As more clients are implemented to communicate with the canonical Tendermint implementation (Tendermint-Go) different implementations of ed25519 will be used. Due to [RFC 8032](https://www.rfc-editor.org/rfc/rfc8032.html) not guaranteeing implementation compatibility, Tendermint clients must to come to an agreement of how to guarantee implementation compatibility. [Zcash](https://z.cash/) has multiple implementations of their client and have identified this as a problem as well. The team at Zcash has made a proposal to address this issue, [Zcash improvement proposal 215](https://zips.z.cash/zip-0215). + +## Proposal + +- Tendermint-Go would adopt [hdevalence/ed25519consensus](https://github.com/hdevalence/ed25519consensus). + - This library is implements `ed25519.Verify()` in accordance to zip-215. Tendermint-go will continue to use `crypto/ed25519` for signing and key generation. + +- Tendermint-rs would adopt [ed25519-zebra](https://github.com/ZcashFoundation/ed25519-zebra) + - related [issue](https://github.com/informalsystems/tendermint-rs/issues/355) + +Signature verification is one of the major bottlenecks of Tendermint-go, batch verification can not be used unless it has the same consensus rules, ZIP 215 makes verification safe in consensus critical areas. + +This change constitutes a breaking changes, therefore must be done in a major release. No changes to validator keys or operations will be needed for this change to be enabled. + +This change has no impact on signature aggregation. To enable this signature aggregation Tendermint will have to use different signature schema (Schnorr, BLS, ...). Secondly, this change will enable safe batch verification for the Tendermint-Go client. Batch verification for the rust client is already supported in the library being used. + +As part of the acceptance of this proposal it would be best to contract or discuss with a third party the process of conducting a security review of the go library. + +## Status + +Proposed + +## Consequences + +### Positive + +- Consistent signature verification across implementations +- Enable safe batch verification + +### Negative + +#### Tendermint-Go + +- Third_party dependency + - library has not gone through a security review. + - unclear maintenance schedule +- Fragmentation of the ed25519 key for the go implementation, verification is done using a third party library while the rest + uses the go standard library + +### Neutral + +## References + +[It’s 255:19AM. Do you know what your validation criteria are?](https://hdevalence.ca/blog/2020-10-04-its-25519am) diff --git a/docs/architecture/adr-080-reverse-sync.md b/docs/architecture/adr-080-reverse-sync.md new file mode 100644 index 0000000000..57d747fc8d --- /dev/null +++ b/docs/architecture/adr-080-reverse-sync.md @@ -0,0 +1,203 @@ +# ADR 080: ReverseSync - fetching historical data + +## Changelog + +- 2021-02-11: Migrate to tendermint repo (Originally [RFC 005](https://github.com/tendermint/spec/pull/224)) +- 2021-04-19: Use P2P to gossip necessary data for reverse sync. +- 2021-03-03: Simplify proposal to the state sync case. +- 2021-02-17: Add notes on asynchronicity of processes. +- 2020-12-10: Rename backfill blocks to reverse sync. +- 2020-11-25: Initial draft. + +## Author(s) + +- Callum Waters (@cmwaters) + +## Context + +Two new features: [Block pruning](https://github.com/tendermint/tendermint/issues/3652) +and [State sync](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-042-state-sync.md) +meant nodes no longer needed a complete history of the blockchain. This +introduced some challenges of its own which were covered and subsequently +tackled with [RFC-001](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-077-block-retention.md). +The RFC allowed applications to set a block retention height; an upper bound on +what blocks would be pruned. However nodes who state sync past this upper bound +(which is necessary as snapshots must be saved within the trusting period for +the assisting light client to verify) have no means of backfilling the blocks +to meet the retention limit. This could be a problem as nodes who state sync and +then eventually switch to consensus (or fast sync) may not have the block and +validator history to verify evidence causing them to panic if they see 2/3 +commit on what the node believes to be an invalid block. + +Thus, this RFC sets out to instil a minimum block history invariant amongst +honest nodes. + +## Proposal + +A backfill mechanism can simply be defined as an algorithm for fetching, +verifying and storing, headers and validator sets of a height prior to the +current base of the node's blockchain. In matching the terminology used for +other data retrieving protocols (i.e. fast sync and state sync), we +call this method **ReverseSync**. + +We will define the mechanism in four sections: + +- Usage +- Design +- Verification +- Termination + +### Usage + +For now, we focus purely on the case of a state syncing node, whom after +syncing to a height will need to verify historical data in order to be capable +of processing new blocks. We can denote the earliest height that the node will +need to verify and store in order to be able to verify any evidence that might +arise as the `max_historical_height`/`time`. Both height and time are necessary +as this maps to the BFT time used for evidence expiration. After acquiring +`State`, we calculate these parameters as: + +```go +max_historical_height = max(state.InitialHeight, state.LastBlockHeight - state.ConsensusParams.EvidenceAgeHeight) +max_historical_time = max(GenesisTime, state.LastBlockTime.Sub(state.ConsensusParams.EvidenceAgeTime)) +``` + +Before starting either fast sync or consensus, we then run the following +synchronous process: + +```go +func ReverseSync(max_historical_height int64, max_historical_time time.Time) error +``` + +Where we fetch and verify blocks until a block `A` where +`A.Height <= max_historical_height` and `A.Time <= max_historical_time`. + +Upon successfully reverse syncing, a node can now safely continue. As this +feature is only used as part of state sync, one can think of this as merely an +extension to it. + +In the future we may want to extend this functionality to allow nodes to fetch +historical blocks for reasons of accountability or data accessibility. + +### Design + +This section will provide a high level overview of some of the more important +characteristics of the design, saving the more tedious details as an ADR. + +#### P2P + +Implementation of this RFC will require the addition of a new channel and two +new messages. + +```proto +message LightBlockRequest { + uint64 height = 1; +} +``` + +```proto +message LightBlockResponse { + Header header = 1; + Commit commit = 2; + ValidatorSet validator_set = 3; +} +``` + +The P2P path may also enable P2P networked light clients and a state sync that +also doesn't need to rely on RPC. + +### Verification + +ReverseSync is used to fetch the following data structures: + +- `Header` +- `Commit` +- `ValidatorSet` + +Nodes will also need to be able to verify these. This can be achieved by first +retrieving the header at the base height from the block store. From this trusted +header, the node hashes each of the three data structures and checks that they are correct. + +1. The trusted header's last block ID matches the hash of the new header + + ```go + header[height].LastBlockID == hash(header[height-1]) + ``` + +2. The trusted header's last commit hash matches the hash of the new commit + + ```go + header[height].LastCommitHash == hash(commit[height-1]) + ``` + +3. Given that the node now trusts the new header, check that the header's validator set + hash matches the hash of the validator set + + ```go + header[height-1].ValidatorsHash == hash(validatorSet[height-1]) + ``` + +### Termination + +ReverseSync draws a lot of parallels with fast sync. An important consideration +for fast sync that also extends to ReverseSync is termination. ReverseSync will +finish it's task when one of the following conditions have been met: + +1. It reaches a block `A` where `A.Height <= max_historical_height` and +`A.Time <= max_historical_time`. +2. None of it's peers reports to have the block at the height below the +processes current block. +3. A global timeout. + +This implies that we can't guarantee adequate history and thus the term +"invariant" can't be used in the strictest sense. In the case that the first +condition isn't met, the node will log an error and optimistically attempt +to continue with either fast sync or consensus. + +## Alternative Solutions + +The need for a minimum block history invariant stems purely from the need to +validate evidence (although there may be some application relevant needs as +well). Because of this, an alternative, could be to simply trust whatever the +2/3+ majority has agreed upon and in the case where a node is at the head of the +blockchain, you simply abstain from voting. + +As it stands, if 2/3+ vote on evidence you can't verify, in the same manner if +2/3+ vote on a header that a node sees as invalid (perhaps due to a different +app hash), the node will halt. + +Another alternative is the method with which the relevant data is retrieved. +Instead of introducing new messages to the P2P layer, RPC could have been used +instead. + +The aforementioned data is already available via the following RPC endpoints: +`/commit` for `Header`'s' and `/validators` for `ValidatorSet`'s'. It was +decided predominantly due to the instability of the current RPC infrastructure +that P2P be used instead. + +## Status + +Proposed + +## Consequences + +### Positive + +- Ensures a minimum block history invariant for honest nodes. This will allow + nodes to verify evidence. + +### Negative + +- Statesync will be slower as more processing is required. + +### Neutral + +- By having validator sets served through p2p, this would make it easier to +extend p2p support to light clients and state sync. +- In the future, it may also be possible to extend this feature to allow for +nodes to freely fetch and verify prior blocks + +## References + +- [RFC-001: Block retention](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-077-block-retention.md) +- [Original issue](https://github.com/tendermint/tendermint/issues/4629) diff --git a/docs/architecture/adr-081-protobuf-mgmt.md b/docs/architecture/adr-081-protobuf-mgmt.md new file mode 100644 index 0000000000..1199cff1b4 --- /dev/null +++ b/docs/architecture/adr-081-protobuf-mgmt.md @@ -0,0 +1,201 @@ +# ADR 081: Protocol Buffers Management + +## Changelog + +- 2022-02-28: First draft + +## Status + +Accepted + +[Tracking issue](https://github.com/tendermint/tendermint/issues/8121) + +## Context + +At present, we manage the [Protocol Buffers] schema files ("protos") that define +our wire-level data formats within the Tendermint repository itself (see the +[`proto`](../../proto/) directory). Recently, we have been making use of [Buf], +both locally and in CI, in order to generate Go stubs, and lint and check +`.proto` files for breaking changes. + +The version of Buf used at the time of this decision was `v1beta1`, and it was +discussed in [\#7975] and in weekly calls as to whether we should upgrade to +`v1` and harmonize our approach with that used by the Cosmos SDK. The team +managing the Cosmos SDK was primarily interested in having our protos versioned +and easily accessible from the [Buf] registry. + +The three main sets of stakeholders for the `.proto` files and their needs, as +currently understood, are as follows. + +1. Tendermint needs Go code generated from `.proto` files. +2. Consumers of Tendermint's `.proto` files, specifically projects that want to + interoperate with Tendermint and need to generate code for their own + programming language, want to be able to access these files in a reliable and + efficient way. +3. The Tendermint Core team wants to provide stable interfaces that are as easy + as possible to maintain, on which consumers can depend, and to be able to + notify those consumers promptly when those interfaces change. To this end, we + want to: + 1. Prevent any breaking changes from being introduced in minor/patch releases + of Tendermint. Only major version updates should be able to contain + breaking interface changes. + 2. Prevent generated code from diverging from the Protobuf schema files. + +There was also discussion surrounding the notion of automated documentation +generation and hosting, but it is not clear at this time whether this would be +that valuable to any of our stakeholders. What will, of course, be valuable at +minimum would be better documentation (in comments) of the `.proto` files +themselves. + +## Alternative Approaches + +### Meeting stakeholders' needs + +1. Go stub generation from protos. We could use: + 1. [Buf]. This approach has been rather cumbersome up to this point, and it + is not clear what Buf really provides beyond that which `protoc` provides + to justify the additional complexity in configuring Buf for stub + generation. + 2. [protoc] - the Protocol Buffers compiler. +2. Notification of breaking changes: + 1. Buf in CI for all pull requests to *release* branches only (and not on + `master`). + 2. Buf in CI on every pull request to every branch (this was the case at the + time of this decision, and the team decided that the signal-to-noise ratio + for this approach was too low to be of value). +3. `.proto` linting: + 1. Buf in CI on every pull request +4. `.proto` formatting: + 1. [clang-format] locally and a [clang-format GitHub Action] in CI to check + that files are formatted properly on every pull request. +5. Sharing of `.proto` files in a versioned, reliable manner: + 1. Consumers could simply clone the Tendermint repository, check out a + specific commit, tag or branch and manually copy out all of the `.proto` + files they need. This requires no effort from the Tendermint Core team and + will continue to be an option for consumers. The drawback of this approach + is that it requires manual coding/scripting to implement and is brittle in + the face of bigger changes. + 2. Uploading our `.proto` files to Buf's registry on every release. This is + by far the most seamless for consumers of our `.proto` files, but requires + the dependency on Buf. This has the additional benefit that the Buf + registry will automatically [generate and host + documentation][buf-docs-gen] for these protos. + 3. We could create a process that, upon release, creates a `.zip` file + containing our `.proto` files. + +### Popular alternatives to Buf + +[Prototool] was not considered as it appears deprecated, and the ecosystem seems +to be converging on Buf at this time. + +### Tooling complexity + +The more tools we have in our build/CI processes, the more complex and fragile +repository/CI management becomes, and the longer it takes to onboard new team +members. Maintainability is a core concern here. + +### Buf sustainability and costs + +One of the primary considerations regarding the usage of Buf is whether, for +example, access to its registry will eventually become a +paid-for/subscription-based service and whether this is valuable enough for us +and the ecosystem to pay for such a service. At this time, it appears as though +Buf will never charge for hosting open source projects' protos. + +Another consideration was Buf's sustainability as a project - what happens when +their resources run out? Will there be a strong and broad enough open source +community to continue maintaining it? + +### Local Buf usage options + +Local usage of Buf (i.e. not in CI) can be accomplished in two ways: + +1. Installing the relevant tools individually. +2. By way of its [Docker image][buf-docker]. + +Local installation of Buf requires developers to manually keep their toolchains +up-to-date. The Docker option comes with a number of complexities, including +how the file system permissions of code generated by a Docker container differ +between platforms (e.g. on Linux, Buf-generated code ends up being owned by +`root`). + +The trouble with the Docker-based approach is that we make use of the +[gogoprotobuf] plugin for `protoc`. Continuing to use the Docker-based approach +to using Buf will mean that we will have to continue building our own custom +Docker image with embedded gogoprotobuf. + +Along these lines, we could eventually consider coming up with a [Nix]- or +[redo]-based approach to developer tooling to ensure tooling consistency across +the team and for anyone who wants to be able to contribute to Tendermint. + +## Decision + +1. We will adopt Buf for now for proto generation, linting, breakage checking + and its registry (mainly in CI, with optional usage locally). +2. Failing CI when checking for breaking changes in `.proto` files will only + happen when performing minor/patch releases. +3. Local tooling will be favored over Docker-based tooling. + +## Detailed Design + +We currently aim to: + +1. Update to Buf `v1` to facilitate linting, breakage checking and uploading to + the Buf registry. +2. Configure CI appropriately for proto management: + 1. Uploading protos to the Buf registry on every release (e.g. the + [approach][cosmos-sdk-buf-registry-ci] used by the Cosmos SDK). + 2. Linting on every pull request (e.g. the + [approach][cosmos-sdk-buf-linting-ci] used by the Cosmos SDK). The linter + passing should be considered a requirement for accepting PRs. + 3. Checking for breaking changes in minor/patch version releases and failing + CI accordingly - see [\#8003]. + 4. Add [clang-format GitHub Action] to check `.proto` file formatting. Format + checking should be considered a requirement for accepting PRs. +3. Update the Tendermint [`Makefile`](../../Makefile) to primarily facilitate + local Protobuf stub generation, linting, formatting and breaking change + checking. More specifically: + 1. This includes removing the dependency on Docker and introducing the + dependency on local toolchain installation. CI-based equivalents, where + relevant, will rely on specific GitHub Actions instead of the Makefile. + 2. Go code generation will rely on `protoc` directly. + +## Consequences + +### Positive + +- We will still offer Go stub generation, proto linting and breakage checking. +- Breakage checking will only happen on minor/patch releases to increase the + signal-to-noise ratio in CI. +- Versioned protos will be made available via Buf's registry upon every release. + +### Negative + +- Developers/contributors will need to install the relevant Protocol + Buffers-related tooling (Buf, gogoprotobuf, clang-format) locally in order to + build, lint, format and check `.proto` files for breaking changes. + +### Neutral + +## References + +- [Protocol Buffers] +- [Buf] +- [\#7975] +- [protoc] - The Protocol Buffers compiler + +[Protocol Buffers]: https://developers.google.com/protocol-buffers +[Buf]: https://buf.build/ +[\#7975]: https://github.com/tendermint/tendermint/pull/7975 +[protoc]: https://github.com/protocolbuffers/protobuf +[clang-format]: https://clang.llvm.org/docs/ClangFormat.html +[clang-format GitHub Action]: https://github.com/marketplace/actions/clang-format-github-action +[buf-docker]: https://hub.docker.com/r/bufbuild/buf +[cosmos-sdk-buf-registry-ci]: https://github.com/cosmos/cosmos-sdk/blob/e6571906043b6751951a42b6546431b1c38b05bd/.github/workflows/proto-registry.yml +[cosmos-sdk-buf-linting-ci]: https://github.com/cosmos/cosmos-sdk/blob/e6571906043b6751951a42b6546431b1c38b05bd/.github/workflows/proto.yml#L15 +[\#8003]: https://github.com/tendermint/tendermint/issues/8003 +[Nix]: https://nixos.org/ +[gogoprotobuf]: https://github.com/gogo/protobuf +[Prototool]: https://github.com/uber/prototool +[buf-docs-gen]: https://docs.buf.build/bsr/documentation +[redo]: https://redo.readthedocs.io/en/latest/ diff --git a/docs/introduction/architecture.md b/docs/introduction/architecture.md index 3b70e70151..27e1b34c66 100644 --- a/docs/introduction/architecture.md +++ b/docs/introduction/architecture.md @@ -61,7 +61,7 @@ Here are some relevant facts about TCP: ![tcp](../imgs/tcp-window.png) -In order to have performant TCP connections under the conditions created in Tendermint, we've created the `mconnection`, or the multiplexing connection. It is our own protocol built on top of TCP. It lets us reuse TCP connections to minimize overhead, and it keeps the window size high by sending auxiliary messages when necessary. +In order to have performant TCP connections under the conditions created in Tendermint, we've created the `mconnection`, or the multiplexing connection. It is our own protocol built on top of TCP. It lets us reuse TCP connections to minimize overhead, and it keeps the window size high by sending auxiliary messages when necessary. The `mconnection` is represented by a struct, which contains a batch of messages, read and write buffers, and a map of channel IDs to reactors. It communicates with TCP via file descriptors, which it can write to. There is one `mconnection` per peer connection. diff --git a/docs/introduction/what-is-tendermint.md b/docs/introduction/what-is-tendermint.md index 2386626eac..417152d748 100644 --- a/docs/introduction/what-is-tendermint.md +++ b/docs/introduction/what-is-tendermint.md @@ -68,10 +68,10 @@ Tendermint is in essence similar software, but with two key differences: - It is Byzantine Fault Tolerant, meaning it can only tolerate up to a 1/3 of failures, but those failures can include arbitrary behaviour - - including hacking and malicious attacks. -- It does not specify a particular application, like a fancy key-value - store. Instead, it focuses on arbitrary state machine replication, - so developers can build the application logic that's right for them, + including hacking and malicious attacks. +- It does not specify a particular application, like a fancy key-value + store. Instead, it focuses on arbitrary state machine replication, + so developers can build the application logic that's right for them, from key-value store to cryptocurrency to e-voting platform and beyond. ### Bitcoin, Ethereum, etc @@ -104,12 +104,10 @@ to Tendermint, but is more opinionated about how the state is managed, and requires that all application behaviour runs in potentially many docker containers, modules it calls "chaincode". It uses an implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf). -from a team at IBM that is [augmented to handle potentially -non-deterministic -chaincode](https://www.zurich.ibm.com/~cca/papers/sieve.pdf) It is -possible to implement this docker-based behaviour as a ABCI app in -Tendermint, though extending Tendermint to handle non-determinism -remains for future work. +from a team at IBM that is augmented to handle potentially non-deterministic +chaincode It is possible to implement this docker-based behaviour as a ABCI app +in Tendermint, though extending Tendermint to handle non-determinism remains +for future work. [Burrow](https://github.com/hyperledger/burrow) is an implementation of the Ethereum Virtual Machine and Ethereum transaction mechanics, with diff --git a/docs/networks/README.md b/docs/networks/README.md deleted file mode 100644 index 0b14e391be..0000000000 --- a/docs/networks/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -order: 1 -parent: - title: Networks - order: 1 ---- - -# Overview - -Use [Docker Compose](./docker-compose.md) to spin up Tendermint testnets on your -local machine. - -Use [Terraform and Ansible](./terraform-and-ansible.md) to deploy Tendermint -testnets to the cloud. - -See the `tendermint testnet --help` command for more help initializing testnets. diff --git a/docs/nodes/README.md b/docs/nodes/README.md index 9be6febf03..fd9056e0dd 100644 --- a/docs/nodes/README.md +++ b/docs/nodes/README.md @@ -1,7 +1,7 @@ --- order: 1 parent: - title: Nodes + title: Node Operators order: 4 --- diff --git a/docs/nodes/configuration.md b/docs/nodes/configuration.md index e0cfe501a5..a55bfb63a2 100644 --- a/docs/nodes/configuration.md +++ b/docs/nodes/configuration.md @@ -16,7 +16,8 @@ the parameters set with their default values. It will look something like the file below, however, double check by inspecting the `config.toml` created with your version of `tendermint` installed: -```toml# This is a TOML config file. +```toml +# This is a TOML config file. # For more information, see https://github.com/toml-lang/toml # NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or @@ -33,11 +34,10 @@ like the file below, however, double check by inspecting the proxy-app = "tcp://127.0.0.1:26658" # A custom human readable name for this node -moniker = "ape" - +moniker = "sidewinder" -# Mode of Node: full | validator | seed (default: "validator") -# * validator node (default) +# Mode of Node: full | validator | seed +# * validator node # - all reactors # - with priv_validator_key.json, priv_validator_state.json # * full node @@ -48,11 +48,6 @@ moniker = "ape" # - No priv_validator_key.json, priv_validator_state.json mode = "validator" -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast-sync = true - # Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb # * goleveldb (github.com/syndtr/goleveldb - most popular implementation) # - pure go @@ -120,10 +115,10 @@ laddr = "" client-certificate-file = "" # Client key generated while creating certificates for secure connection -validator-client-key-file = "" +client-key-file = "" # Path to the Root Certificate Authority used to sign both client and server certificates -certificate-authority = "" +root-ca-file = "" ####################################################################### @@ -149,26 +144,10 @@ cors-allowed-methods = ["HEAD", "GET", "POST", ] # A list of non simple headers the client is allowed to use with cross-domain requests cors-allowed-headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. -grpc-laddr = "" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max-open-connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. -grpc-max-open-connections = 900 - # Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool unsafe = false # Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc-max-open-connections # If you want to accept a larger number than the default, make sure # you increase your OS limits. # 0 - unlimited. @@ -182,10 +161,37 @@ max-open-connections = 900 max-subscription-clients = 100 # Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. +# If you're using a Local RPC client and /broadcast_tx_commit, set this +# to the estimated maximum number of broadcast_tx_commit calls per block. max-subscriptions-per-client = 5 +# If true, disable the websocket interface to the RPC service. This has +# the effect of disabling the /subscribe, /unsubscribe, and /unsubscribe_all +# methods for event subscription. +# +# EXPERIMENTAL: This setting will be removed in Tendermint v0.37. +experimental-disable-websocket = false + +# The time window size for the event log. All events up to this long before +# the latest (up to EventLogMaxItems) will be available for subscribers to +# fetch via the /events method. If 0 (the default) the event log and the +# /events RPC method are disabled. +event-log-window-size = "0s" + +# The maxiumum number of events that may be retained by the event log. If +# this value is 0, no upper limit is set. Otherwise, items in excess of +# this number will be discarded from the event log. +# +# Warning: This setting is a safety valve. Setting it too low may cause +# subscribers to miss events. Try to choose a value higher than the +# maximum worst-case expected event load within the chosen window size in +# ordinary operation. +# +# For example, if the window size is 10 minutes and the node typically +# averages 1000 events per ten minutes, but with occasional known spikes of +# up to 2000, choose a value > 2000. +event-log-max-items = 0 + # How long to wait for a tx to be committed during /broadcast_tx_commit. # WARNING: Using a value larger than 10s will result in increasing the # global HTTP write timeout, which applies to all connections and endpoints. @@ -221,9 +227,6 @@ pprof-laddr = "" ####################################################### [p2p] -# Enable the legacy p2p layer. -use-legacy = false - # Select the p2p internal queue queue-type = "priority" @@ -255,87 +258,48 @@ persistent-peers = "" # UPNP port forwarding upnp = false -# Path to address book -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 -addr-book-file = "config/addrbook.json" - -# Set true for strict address routability rules -# Set false for private or local networks -addr-book-strict = true - -# Maximum number of inbound peers -# -# TODO: Remove once p2p refactor is complete in favor of MaxConnections. -# ref: https://github.com/tendermint/tendermint/issues/5670 -max-num-inbound-peers = 40 - -# Maximum number of outbound peers to connect to, excluding persistent peers -# -# TODO: Remove once p2p refactor is complete in favor of MaxConnections. -# ref: https://github.com/tendermint/tendermint/issues/5670 -max-num-outbound-peers = 10 - # Maximum number of connections (inbound and outbound). max-connections = 64 # Rate limits the number of incoming connection attempts per IP address. max-incoming-connection-attempts = 100 -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 -unconditional-peer-ids = "" +# Set true to enable the peer-exchange reactor +pex = true -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 -persistent-peers-max-dial-period = "0s" +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +# Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055 +private-peer-ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow-duplicate-ip = false + +# Peer connection configuration. +handshake-timeout = "20s" +dial-timeout = "3s" # Time to wait before flushing messages out on the connection -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 +# TODO: Remove once MConnConnection is removed. flush-throttle-timeout = "100ms" # Maximum size of a message packet payload, in bytes -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 +# TODO: Remove once MConnConnection is removed. max-packet-msg-payload-size = 1400 # Rate at which packets can be sent, in bytes/second -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 +# TODO: Remove once MConnConnection is removed. send-rate = 5120000 # Rate at which packets can be received, in bytes/second -# TODO: Remove once p2p refactor is complete -# ref: https:#github.com/tendermint/tendermint/issues/5670 +# TODO: Remove once MConnConnection is removed. recv-rate = 5120000 -# Set true to enable the peer-exchange reactor -pex = true - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -# Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055 -private-peer-ids = "" - -# Toggle to disable guard against peers connecting from the same ip. -allow-duplicate-ip = false - -# Peer connection configuration. -handshake-timeout = "20s" -dial-timeout = "3s" ####################################################### ### Mempool Configuration Option ### ####################################################### [mempool] -# Mempool version to use: -# 1) "v0" - The legacy non-prioritized mempool reactor. -# 2) "v1" (default) - The prioritized mempool reactor. -version = "v1" - recheck = true broadcast = true @@ -391,22 +355,30 @@ ttl-num-blocks = 0 # starting from the height of the snapshot. enable = false -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust-period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. +# State sync uses light client verification to verify state. This can be done either through the +# P2P layer or RPC layer. Set this to true to use the P2P layer. If false (default), RPC layer +# will be used. +use-p2p = false + +# If using RPC, at least two addresses need to be provided. They should be compatible with net.Dial, +# for example: "host.example.com:2125" rpc-servers = "" + +# The hash and height of a trusted block. Must be within the trust-period. trust-height = 0 trust-hash = "" + +# The trust period should be set so that Tendermint can detect and gossip misbehavior before +# it is considered expired. For chains based on the Cosmos SDK, one day less than the unbonding +# period should suffice. trust-period = "168h0m0s" # Time to spend discovering snapshots before initiating a restore. discovery-time = "15s" -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. +# Temporary directory for state sync snapshot chunks, defaults to os.TempDir(). +# The synchronizer will create a new, randomly named directory within this directory +# and remove it when the sync is complete. temp-dir = "" # The timeout duration before re-requesting a chunk, possibly from a different @@ -416,21 +388,6 @@ chunk-request-timeout = "15s" # The number of concurrent chunk and block fetchers to run (default: 4). fetchers = "4" -####################################################### -### Block Sync Configuration Connections ### -####################################################### -[blocksync] - -# If this node is many blocks behind the tip of the chain, BlockSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -enable = true - -# Block Sync version to use: -# 1) "v0" (default) - the standard block sync implementation -# 2) "v2" - DEPRECATED, please use v0 -version = "v0" - ####################################################### ### Consensus Configuration Options ### ####################################################### @@ -438,32 +395,12 @@ version = "v0" wal-file = "data/cs.wal/wal" -# How long we wait for a proposal block before prevoting nil -timeout-propose = "3s" -# How much timeout-propose increases with each round -timeout-propose-delta = "500ms" -# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) -timeout-prevote = "1s" -# How much the timeout-prevote increases with each round -timeout-prevote-delta = "500ms" -# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) -timeout-precommit = "1s" -# How much the timeout-precommit increases with each round -timeout-precommit-delta = "500ms" -# How long we wait after committing a block, before starting on the new -# height (this gives us a chance to receive some more precommits, even -# though we already have +2/3). -timeout-commit = "1s" - # How many blocks to look back to check existence of the node's consensus votes before joining consensus # When non-zero, the node will panic upon restart # if the same consensus key was used to sign {double-sign-check-height} last blocks. # So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. double-sign-check-height = 0 -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip-timeout-commit = false - # EmptyBlocks mode and possible interval between empty blocks create-empty-blocks = true create-empty-blocks-interval = "0s" @@ -472,6 +409,50 @@ create-empty-blocks-interval = "0s" peer-gossip-sleep-duration = "100ms" peer-query-maj23-sleep-duration = "2s" +### Unsafe Timeout Overrides ### + +# These fields provide temporary overrides for the Timeout consensus parameters. +# Use of these parameters is strongly discouraged. Using these parameters may have serious +# liveness implications for the validator and for the chain. +# +# These fields will be removed from the configuration file in the v0.37 release of Tendermint. +# For additional information, see ADR-74: +# https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-074-timeout-params.md + +# This field provides an unsafe override of the Propose timeout consensus parameter. +# This field configures how long the consensus engine will wait for a proposal block before prevoting nil. +# If this field is set to a value greater than 0, it will take effect. +# unsafe-propose-timeout-override = 0s + +# This field provides an unsafe override of the ProposeDelta timeout consensus parameter. +# This field configures how much the propose timeout increases with each round. +# If this field is set to a value greater than 0, it will take effect. +# unsafe-propose-timeout-delta-override = 0s + +# This field provides an unsafe override of the Vote timeout consensus parameter. +# This field configures how long the consensus engine will wait after +# receiving +2/3 votes in a around. +# If this field is set to a value greater than 0, it will take effect. +# unsafe-vote-timeout-override = 0s + +# This field provides an unsafe override of the VoteDelta timeout consensus parameter. +# This field configures how much the vote timeout increases with each round. +# If this field is set to a value greater than 0, it will take effect. +# unsafe-vote-timeout-delta-override = 0s + +# This field provides an unsafe override of the Commit timeout consensus parameter. +# This field configures how long the consensus engine will wait after receiving +# +2/3 precommits before beginning the next height. +# If this field is set to a value greater than 0, it will take effect. +# unsafe-commit-timeout-override = 0s + +# This field provides an unsafe override of the BypassCommitTimeout consensus parameter. +# This field configures if the consensus engine will wait for the full Commit timeout +# before proceeding to the next height. +# If this field is set to true, the consensus engine will proceed to the next height +# as soon as the node has gathered votes from all of the validators on the network. +# unsafe-bypass-commit-timeout-override = + ####################################################### ### Transaction Indexer Configuration Options ### ####################################################### @@ -546,46 +527,6 @@ transactions every `create-empty-blocks-interval`. For instance, with Tendermint will only create blocks if there are transactions, or after waiting 30 seconds without receiving any transactions. -## Consensus timeouts explained - -There's a variety of information about timeouts in [Running in -production](../tendermint-core/running-in-production.md) - -You can also find more detailed technical explanation in the spec: [The latest -gossip on BFT consensus](https://arxiv.org/abs/1807.04938). - -```toml -[consensus] -... - -timeout-propose = "3s" -timeout-propose-delta = "500ms" -timeout-prevote = "1s" -timeout-prevote-delta = "500ms" -timeout-precommit = "1s" -timeout-precommit-delta = "500ms" -timeout-commit = "1s" -``` - -Note that in a successful round, the only timeout that we absolutely wait no -matter what is `timeout-commit`. - -Here's a brief summary of the timeouts: - -- `timeout-propose` = how long we wait for a proposal block before prevoting - nil -- `timeout-propose-delta` = how much timeout-propose increases with each round -- `timeout-prevote` = how long we wait after receiving +2/3 prevotes for - anything (ie. not a single block or nil) -- `timeout-prevote-delta` = how much the timeout-prevote increases with each - round -- `timeout-precommit` = how long we wait after receiving +2/3 precommits for - anything (ie. not a single block or nil) -- `timeout-precommit-delta` = how much the timeout-precommit increases with - each round -- `timeout-commit` = how long we wait after committing a block, before starting - on the new height (this gives us a chance to receive some more precommits, - even though we already have +2/3) ## P2P settings @@ -597,7 +538,7 @@ This section will cover settings within the p2p section of the `config.toml`. - `pex` = turns the peer exchange reactor on or off. Validator node will want the `pex` turned off so it would not begin gossiping to unknown peers on the network. PeX can also be turned off for statically configured networks with fixed network connectivity. For full nodes on open, dynamic networks, it should be turned on. - `private-peer-ids` = is a comma-separated list of node ids that will _not_ be exposed to other peers (i.e., you will not tell other peers about the ids in this list). This can be filled with a validator's node id. -Recently the Tendermint Team conducted a refactor of the p2p layer. This lead to multiple config paramters being deprecated and/or replaced. +Recently the Tendermint Team conducted a refactor of the p2p layer. This lead to multiple config parameters being deprecated and/or replaced. We will cover the new and deprecated parameters below. ### New Parameters @@ -651,3 +592,27 @@ Example: ```shell $ psql ... -f state/indexer/sink/psql/schema.sql ``` + +## Unsafe Consensus Timeout Overrides + +Tendermint version v0.36 provides a set of unsafe overrides for the consensus +timing parameters. These parameters are provided as a safety measure in case of +unusual timing issues during the upgrade to v0.36 so that an operator may +override the timings for a single node. These overrides will completely be +removed in Tendermint v0.37. + +- `unsafe-propose-override`: How long the Tendermint consensus engine will wait + for a proposal block before prevoting nil. +- `unsafe-propose-delta-override`: How much the propose timeout increase with + each round. +- `unsafe-vote-override`: How long the consensus engine will wait after + receiving +2/3 votes in a round. +- `unsafe-vote-delta-override`: How much the vote timeout increases with each + round. +- `unsafe-commit-override`: How long the consensus engine will wait after + receiving +2/3 precommits before beginning the next height. +- `unsafe-bypass-commit-timeout-override`: Configures if the consensus engine + will wait for the full commit timeout before proceeding to the next height. If + this field is set to true, the consensus engine will proceed to the next + height as soon as the node has gathered votes from all of the validators on + the network. diff --git a/docs/nodes/logging.md b/docs/nodes/logging.md index 31a9d08d20..9261dd0edf 100644 --- a/docs/nodes/logging.md +++ b/docs/nodes/logging.md @@ -50,7 +50,7 @@ little overview what they do. they are coming from peers or the application. - `p2p` Provides an abstraction around peer-to-peer communication. For more details, please check out the - [README](https://github.com/tendermint/spec/tree/master/spec/p2p). + [README](https://github.com/tendermint/tendermint/tree/master/spec/p2p). - `rpc-server` RPC server. For implementation details, please read the [doc.go](https://github.com/tendermint/tendermint/blob/v0.35.x/rpc/jsonrpc/doc.go). - `state` Represents the latest state and execution submodule, which @@ -120,7 +120,7 @@ Next follows a standard block creation cycle, where we enter a new round, propose a block, receive more than 2/3 of prevotes, then precommits and finally have a chance to commit a block. For details, please refer to [Byzantine Consensus -Algorithm](https://github.com/tendermint/spec/blob/master/spec/consensus/consensus.md). +Algorithm](https://github.com/tendermint/tendermint/blob/master/spec/consensus/consensus.md). ```sh I[10-04|13:54:30.393] enterNewRound(91/0). Current: 91/0/RoundStepNewHeight module=consensus diff --git a/docs/nodes/metrics.md b/docs/nodes/metrics.md index 6589e044aa..1b2e9f0070 100644 --- a/docs/nodes/metrics.md +++ b/docs/nodes/metrics.md @@ -40,6 +40,7 @@ The following metrics are available: | consensus_fast_syncing | gauge | | either 0 (not fast syncing) or 1 (syncing) | | consensus_state_syncing | gauge | | either 0 (not state syncing) or 1 (syncing) | | consensus_block_size_bytes | Gauge | | Block size in bytes | +| evidence_pool_num_evidence | Gauge | | Number of evidence in the evidence pool | p2p_peers | Gauge | | Number of peers node's connected to | | p2p_peer_receive_bytes_total | counter | peer_id, chID | number of bytes per channel received from a given peer | | p2p_peer_send_bytes_total | counter | peer_id, chID | number of bytes per channel sent to a given peer | diff --git a/docs/nodes/remote-signer.md b/docs/nodes/remote-signer.md index e7dfccacdb..39a38e1b7a 100644 --- a/docs/nodes/remote-signer.md +++ b/docs/nodes/remote-signer.md @@ -37,7 +37,7 @@ There are two ways to generate certificates, [openssl](https://www.openssl.org/) - Install `Certstrap`: ```sh - go get github.com/square/certstrap@v1.2.0 + go install github.com/square/certstrap@v1.2.0 ``` - Create certificate authority for self signing. diff --git a/docs/nodes/validators.md b/docs/nodes/validators.md index b787fa8a46..e7c3a3cf43 100644 --- a/docs/nodes/validators.md +++ b/docs/nodes/validators.md @@ -109,9 +109,9 @@ Currently Tendermint uses [Ed25519](https://ed25519.cr.yp.to/) keys which are wi > **+2/3 is short for "more than 2/3"** A block is committed when +2/3 of the validator set sign [precommit -votes](https://github.com/tendermint/spec/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/blockchain.md#vote) for that block at the same `round`. +votes](https://github.com/tendermint/tendermint/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/blockchain.md#vote) for that block at the same `round`. The +2/3 set of precommit votes is called a -[_commit_](https://github.com/tendermint/spec/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/blockchain.md#commit). While any +2/3 set of +[_commit_](https://github.com/tendermint/tendermint/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/blockchain.md#commit). While any +2/3 set of precommits for the same block at the same height&round can serve as validation, the canonical commit is included in the next block (see -[LastCommit](https://github.com/tendermint/spec/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/blockchain.md#lastcommit)). +[LastCommit](https://github.com/tendermint/tendermint/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/blockchain.md#lastcommit)). diff --git a/docs/package-lock.json b/docs/package-lock.json index 8bbdae8cc6..447c8c27d0 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -3037,9 +3037,9 @@ } }, "node_modules/async": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz", - "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==", + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz", + "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==", "dependencies": { "lodash": "^4.17.14" } @@ -8876,9 +8876,9 @@ } }, "node_modules/minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", + "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==" }, "node_modules/mississippi": { "version": "3.0.0", @@ -10389,9 +10389,9 @@ } }, "node_modules/prismjs": { - "version": "1.26.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.26.0.tgz", - "integrity": "sha512-HUoH9C5Z3jKkl3UunCyiD5jwk0+Hz0fIgQ2nbwU2Oo/ceuTAQAg+pPVnfdt2TJWRVLcxKh9iuoYDUSc8clb5UQ==", + "version": "1.27.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.27.0.tgz", + "integrity": "sha512-t13BGPUlFDR7wRB5kQDG4jjl7XeuH6jbJGt11JHPL96qwsEHNX2+68tFXqc1/k+/jALsbSWJKUOT/hcYAZ5LkA==", "engines": { "node": ">=6" } @@ -13045,9 +13045,9 @@ } }, "node_modules/url-parse": { - "version": "1.5.7", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.7.tgz", - "integrity": "sha512-HxWkieX+STA38EDk7CE9MEryFeHCKzgagxlGvsdS7WBImq9Mk+PGwiT56w82WI3aicwJA8REp42Cxo98c8FZMA==", + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", "dependencies": { "querystringify": "^2.1.1", "requires-port": "^1.0.0" @@ -16588,9 +16588,9 @@ "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=" }, "async": { - "version": "2.6.3", - "resolved": "https://registry.npmjs.org/async/-/async-2.6.3.tgz", - "integrity": "sha512-zflvls11DCy+dQWzTW2dzuilv8Z5X/pjfmZOWba6TNIVDm+2UDaJmXSOXlasHKfNBs8oo3M0aT50fDEWfKZjXg==", + "version": "2.6.4", + "resolved": "https://registry.npmjs.org/async/-/async-2.6.4.tgz", + "integrity": "sha512-mzo5dfJYwAn29PeiJ0zvwTo04zj8HDJj0Mn8TD7sno7q12prdbnasKJHhkm2c1LgrhlJ0teaea8860oxi51mGA==", "requires": { "lodash": "^4.17.14" } @@ -21113,9 +21113,9 @@ } }, "minimist": { - "version": "1.2.5", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", - "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" + "version": "1.2.6", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", + "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==" }, "mississippi": { "version": "3.0.0", @@ -22350,9 +22350,9 @@ "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==" }, "prismjs": { - "version": "1.26.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.26.0.tgz", - "integrity": "sha512-HUoH9C5Z3jKkl3UunCyiD5jwk0+Hz0fIgQ2nbwU2Oo/ceuTAQAg+pPVnfdt2TJWRVLcxKh9iuoYDUSc8clb5UQ==" + "version": "1.27.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.27.0.tgz", + "integrity": "sha512-t13BGPUlFDR7wRB5kQDG4jjl7XeuH6jbJGt11JHPL96qwsEHNX2+68tFXqc1/k+/jALsbSWJKUOT/hcYAZ5LkA==" }, "process": { "version": "0.11.10", @@ -24536,9 +24536,9 @@ } }, "url-parse": { - "version": "1.5.7", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.7.tgz", - "integrity": "sha512-HxWkieX+STA38EDk7CE9MEryFeHCKzgagxlGvsdS7WBImq9Mk+PGwiT56w82WI3aicwJA8REp42Cxo98c8FZMA==", + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", "requires": { "querystringify": "^2.1.1", "requires-port": "^1.0.0" diff --git a/docs/pre.sh b/docs/pre.sh index 37193d265b..76a1cff99a 100755 --- a/docs/pre.sh +++ b/docs/pre.sh @@ -1,3 +1,4 @@ #!/bin/bash cp -a ../rpc/openapi/ .vuepress/public/rpc/ +cp -r ../spec . diff --git a/docs/presubmit.sh b/docs/presubmit.sh new file mode 100755 index 0000000000..19e931a4f2 --- /dev/null +++ b/docs/presubmit.sh @@ -0,0 +1,39 @@ +#!/bin/bash +# +# This script verifies that each document in the docs and architecture +# directory has a corresponding table-of-contents entry in its README file. +# +# This can be run manually from the command line. +# It is also run in CI via the docs-toc.yml workflow. +# +set -euo pipefail + +readonly base="$(dirname $0)" +cd "$base" + +readonly workdir="$(mktemp -d)" +trap "rm -fr -- '$workdir'" EXIT + +checktoc() { + local dir="$1" + local tag="$2"'-*-*' + local out="$workdir/${dir}.out.txt" + ( + cd "$dir" >/dev/null + find . -type f -maxdepth 1 -name "$tag" -not -exec grep -q "({})" README.md ';' -print + ) > "$out" + if [[ -s "$out" ]] ; then + echo "-- The following files in $dir lack a ToC entry: +" + cat "$out" + return 1 + fi +} + +err=0 + +# Verify that each RFC and ADR has a ToC entry in its README file. +checktoc architecture adr || ((err++)) +checktoc rfc rfc || ((err++)) + +exit $err diff --git a/docs/rfc/images/abci++.png b/docs/rfc/images/abci++.png new file mode 100644 index 0000000000..d5146f9957 Binary files /dev/null and b/docs/rfc/images/abci++.png differ diff --git a/docs/rfc/images/abci.png b/docs/rfc/images/abci.png new file mode 100644 index 0000000000..10039ab5ce Binary files /dev/null and b/docs/rfc/images/abci.png differ diff --git a/docs/rfc/rfc-006-event-subscription.md b/docs/rfc/rfc-006-event-subscription.md new file mode 100644 index 0000000000..4372f8d287 --- /dev/null +++ b/docs/rfc/rfc-006-event-subscription.md @@ -0,0 +1,204 @@ +# RFC 006: Event Subscription + +## Changelog + +- 30-Oct-2021: Initial draft (@creachadair) + +## Abstract + +The Tendermint consensus node allows clients to subscribe to its event stream +via methods on its RPC service. The ability to view the event stream is +valuable for clients, but the current implementation has some deficiencies that +make it difficult for some clients to use effectively. This RFC documents these +issues and discusses possible approaches to solving them. + + +## Background + +A running Tendermint consensus node exports a [JSON-RPC service][rpc-service] +that provides a [large set of methods][rpc-methods] for inspecting and +interacting with the node. One important cluster of these methods are the +`subscribe`, `unsubscribe`, and `unsubscribe_all` methods, which permit clients +to subscribe to a filtered stream of the [events generated by the node][events] +as it runs. + +Unlike the other methods of the service, the methods in the "event +subscription" cluster are not accessible via [ordinary HTTP GET or POST +requests][rpc-transport], but require upgrading the HTTP connection to a +[websocket][ws]. This is necessary because the `subscribe` request needs a +persistent channel to deliver results back to the client, and an ordinary HTTP +connection does not reliably persist across multiple requests. Since these +methods do not work properly without a persistent channel, they are _only_ +exported via a websocket connection, and are not routed for plain HTTP. + + +## Discussion + +There are some operational problems with the current implementation of event +subscription in the RPC service: + +- **Event delivery is not valid JSON-RPC.** When a client issues a `subscribe` + request, the server replies (correctly) with an initial empty acknowledgement + (`{}`). After that, each matching event is delivered "unsolicited" (without + another request from the client), as a separate [response object][json-response] + with the same ID as the initial request. + + This matters because it means a standard JSON-RPC client library can't + interact correctly with the event subscription mechanism. + + Even for clients that can handle unsolicited values pushed by the server, + these responses are invalid: They have an ID, so they cannot be treated as + [notifications][json-notify]; but the ID corresponds to a request that was + already completed. In practice, this means that general-purpose JSON-RPC + libraries cannot use this method correctly -- it requires a custom client. + + The Go RPC client from the Tendermint core can support this case, but clients + in other languages have no easy solution. + + This is the cause of issue [#2949][issue2949]. + +- **Subscriptions are terminated by disconnection.** When the connection to the + client is interrupted, the subscription is silently dropped. + + This is a reasonable behavior, but it matters because a client whose + subscription is dropped gets no useful error feedback, just a closed + connection. Should they try again? Is the node overloaded? Was the client + too slow? Did the caller forget to respond to pings? Debugging these kinds + of failures is unnecessarily painful. + + Websockets compound this, because websocket connections time out if no + traffic is seen for a while, and keeping them alive requires active + cooperation between the client and server. With a plain TCP socket, liveness + is handled transparently by the keepalive mechanism. On a websocket, + however, one side has to occasionally send a PING (if the connection is + otherwise idle). The other side must return a matching PONG in time, or the + connection is dropped. Apart from being tedious, this is highly susceptible + to CPU load. + + The Tendermint Go implementation automatically sends and responds to pings. + Clients in other languages (or not wanting to use the Tendermint libraries) + need to handle it explicitly. This burdens the client for no practical + benefit: A subscriber has no information about when matching events may be + available, so it shouldn't have to participate in keeping the connection + alive. + +- **Mismatched load profiles.** Most of the RPC service is mainly important for + low-volume local use, either by the application the node serves (e.g., the + ABCI methods) or by the node operator (e.g., the info methods). Event + subscription is important for remote clients, and may represent a much higher + volume of traffic. + + This matters because both are using the same JSON-RPC mechanism. For + low-volume local use, the ergonomics of JSON-RPC are a good fit: It's easy to + issue queries from the command line (e.g., using `curl`) or to write scripts + that call the RPC methods to monitor the running node. + + For high-volume remote use, JSON-RPC is not such a good fit: Even leaving + aside the non-standard delivery protocol mentioned above, the time and memory + cost of encoding event data matters for the stability of the node when there + can be potentially hundreds of subscribers. Moreover, a subscription is + long-lived compared to most RPC methods, in that it may persist as long the + node is active. + +- **Mismatched security profiles.** The RPC service exports several methods + that should not be open to arbitrary remote callers, both for correctness + reasons (e.g., `remove_tx` and `broadcast_tx_*`) and for operational + stability reasons (e.g., `tx_search`). A node may still need to expose + events, however, to support UI tools. + + This matters, because all the methods share the same network endpoint. While + it is possible to block the top-level GET and POST handlers with a proxy, + exposing the `/websocket` handler exposes not _only_ the event subscription + methods, but the rest of the service as well. + +### Possible Improvements + +There are several things we could do to improve the experience of developers +who need to subscribe to events from the consensus node. These are not all +mutually exclusive. + +1. **Split event subscription into a separate service**. Instead of exposing + event subscription on the same endpoint as the rest of the RPC service, + dedicate a separate endpoint on the node for _only_ event subscription. The + rest of the RPC services (_sans_ events) would remain as-is. + + This would make it easy to disable or firewall outside access to sensitive + RPC methods, without blocking access to event subscription (and vice versa). + This is probably worth doing, even if we don't take any of the other steps + described here. + +2. **Use a different protocol for event subscription.** There are various ways + we could approach this, depending how much we're willing to shake up the + current API. Here are sketches of a few options: + + - Keep the websocket, but rework the API to be more JSON-RPC compliant, + perhaps by converting event delivery into notifications. This is less + up-front change for existing clients, but retains all of the existing + implementation complexity, and doesn't contribute much toward more serious + performance and UX improvements later. + + - Switch from websocket to plain HTTP, and rework the subscription API to + use a more conventional request/response pattern instead of streaming. + This is a little more up-front work for existing clients, but leverages + better library support for clients not written in Go. + + The protocol would become more chatty, but we could mitigate that with + batching, and in return we would get more control over what to do about + slow clients: Instead of simply silently dropping them, as we do now, we + could drop messages and signal the client that they missed some data ("M + dropped messages since your last poll"). + + This option is probably the best balance between work, API change, and + benefit, and has a nice incidental effect that it would be easier to debug + subscriptions from the command-line, like the other RPC methods. + + - Switch to gRPC: Preserves a persistent connection and gives us a more + efficient binary wire format (protobuf), at the cost of much more work for + clients and harder debugging. This may be the best option if performance + and server load are our top concerns. + + Given that we are currently using JSON-RPC, however, I'm not convinced the + costs of encoding and sending messages on the event subscription channel + are the limiting factor on subscription efficiency, however. + +3. **Delegate event subscriptions to a proxy.** Give responsibility for + managing event subscription to a proxy that runs separately from the node, + and switch the node to push events to the proxy (like a webhook) instead of + serving subscribers directly. This is more work for the operator (another + process to configure and run) but may scale better for big networks. + + I mention this option for completeness, but making this change would be a + fairly substantial project. If we want to consider shifting responsibility + for event subscription outside the node anyway, we should probably be more + systematic about it. For a more principled approach, see point (4) below. + +4. **Move event subscription downstream of indexing.** We are already planning + to give applications more control over event indexing. By extension, we + might allow the application to also control how events are filtered, + queried, and subscribed. Having the application control these concerns, + rather than the node, might make life easier for developers building UI and + tools for that application. + + This is a much larger change, so I don't think it is likely to be practical + in the near-term, but it's worth considering as a broader option. Some of + the existing code for filtering and selection could be made more reusable, + so applications would not need to reinvent everything. + + +## References + +- [Tendermint RPC service][rpc-service] +- [Tendermint RPC routes][rpc-methods] +- [Discussion of the event system][events] +- [Discussion about RPC transport options][rpc-transport] (from RFC 002) +- [RFC 6455: The websocket protocol][ws] +- [JSON-RPC 2.0 Specification](https://www.jsonrpc.org/specification) + +[rpc-service]: https://docs.tendermint.com/master/rpc/ +[rpc-methods]: https://github.com/tendermint/tendermint/blob/master/internal/rpc/core/routes.go#L12 +[events]: ./rfc-005-event-system.rst +[rpc-transport]: ./rfc-002-ipc-ecosystem.md#rpc-transport +[ws]: https://datatracker.ietf.org/doc/html/rfc6455 +[json-response]: https://www.jsonrpc.org/specification#response_object +[json-notify]: https://www.jsonrpc.org/specification#notification +[issue2949]: https://github.com/tendermint/tendermint/issues/2949 diff --git a/docs/rfc/rfc-007-deterministic-proto-bytes.md b/docs/rfc/rfc-007-deterministic-proto-bytes.md new file mode 100644 index 0000000000..0b55c22283 --- /dev/null +++ b/docs/rfc/rfc-007-deterministic-proto-bytes.md @@ -0,0 +1,140 @@ +# RFC 007 : Deterministic Proto Byte Serialization + +## Changelog + +- 09-Dec-2021: Initial draft (@williambanfield). + +## Abstract + +This document discusses the issue of stable byte-representation of serialized messages +within Tendermint and describes a few possible routes that could be taken to address it. + +## Background + +We use the byte representations of wire-format proto messages to produce +and verify hashes of data within the Tendermint codebase as well as for +producing and verifying cryptographic signatures over these signed bytes. + +The protocol buffer [encoding spec][proto-spec-encoding] does not guarantee that the byte representation +of a protocol buffer message will be the same between two calls to an encoder. +While there is a mode to force the encoder to produce the same byte representation +of messages within a single binary, these guarantees are not good enough for our +use case in Tendermint. We require multiple different versions of a binary running +Tendermint to be able to inter-operate. Additionally, we require that multiple different +systems written in _different languages_ be able to participate in different aspects +of the protocols of Tendermint and be able to verify the integrity of the messages +they each produce. + +While this has not yet created a problem that we know of in a running network, we should +make sure to provide stronger guarantees around the serialized representation of the messages +used within the Tendermint consensus algorithm to prevent any issue from occurring. + + +## Discussion + +Proto has the following points of variability that can produce non-deterministic byte representation: + +1. Encoding order of fields within a message. + +Proto allows fields to be encoded in any order and even be repeated. + +2. Encoding order of elements of a repeated field. + +`repeated` fields in a proto message can be serialized in any order. + +3. Presence or absence of default values. + +Types in proto have defined default values similar to Go's zero values. +Writing or omitting a default value are both legal ways of encoding a wire message. + +4. Serialization of 'unknown' fields. + +Unknown fields can be present when a message is created by a binary with a newer +version of the proto that contains fields that the deserializer in a different +binary does not yet know about. Deserializers in binaries that do not know about the field +will maintain the bytes of the unknown field but not place them into the deserialized structure. + +We have a few options to consider when producing this stable representation. + +### Options for deterministic byte representation + +#### Use only compliant serializers and constrain field usage + +According to [Cosmos-SDK ADR-27][cosmos-sdk-adr-27], when message types obey a simple +set of rules, gogoproto produces a consistent byte representation of serialized messages. +This seems promising, although more research is needed to guarantee gogoproto always +produces a consistent set of bytes on serialized messages. This would solve the problem +within Tendermint as written in Go, but would require ensuring that there are similar +serializers written in other languages that produce the same output as gogoproto. + +#### Reorder serialized bytes to ensure determinism. + +The serialized form of a proto message can be transformed into a canonical representation +by applying simple rules to the serialized bytes. Re-ordering the serialized bytes +would allow Tendermint to produce a canonical byte representation without having to +simultaneously maintain a custom proto marshaller. + +This could be implemented as a function in many languages that performed the following +producing bytes to sign or hashing: + +1. Does not add any of the data from unknown fields into the type to hash. + +Tendermint should not run into a case where it needs to verify the integrity of +data with unknown fields for the following reasons: + +The purpose of checking hash equality within Tendermint is to ensure that +its local copy of data matches the data that the network agreed on. There should +therefore not be a case where a process is checking hash equality using data that it did not expect +to receive. What the data represent may be opaque to the process, such as when checking the +transactions in a block, _but the process will still have expected to receive this data_, +despite not understanding what their internal structure is. It's not clear what it would +mean to verify that a block contains data that a process does not know about. + +The same reasoning applies for signature verification within Tendermint. Processes +verify that a digital signature signed over a set of bytes by locally reconstructing the +data structure that the digital signature signed using the process's local data. + +2. Reordered all message fields to be in tag-sorted order. + +Tag-sorting top-level fields will place all fields of the same tag in a adjacent +to eachother within the serialized representation. + +3. Reordered the contents of all `repeated` fields to be in lexicographically sorted order. + +`repeated` fields will appear in a message as having the same tag but will contain different +contents. Therefore, lexicographical sorting will produce a stable ordering of +fields with the same tag. + +4. Deleted all default values from the byte representation. + +Encoders can include default values or omit them. Most encoders appear to omit them +but we may wish to delete them just to be safe. + +5. Recursively performed these operations on any length-delimited subfields. + +Length delimited fields may contain messages, strings, or just bytes. However, +it's not possible to know what data is being represented by such a field. +A 'string' may happen to have the same structure as an embedded message and we cannot +disambiguate. For this reason, we must apply these same rules to all subfields that +may contain messages. Because we cannot know if we have totally mangled the interior 'string' +or not, this data should never be deserialized or used for anything beyond hashing. + +A **prototype** implementation by @creachadair of this can be found in [the wirepb repo][wire-pb]. +This could be implemented in multiple languages more simply than ensuring that there are +canonical proto serializers that match in each language. + +### Future work + +We should add clear documentation to the Tendermint codebase every time we +compare hashes of proto messages or use proto serialized bytes to produces a +digital signatures that we have been careful to ensure that the hashes are performed +properly. + +### References + +[proto-spec-encoding]: https://developers.google.com/protocol-buffers/docs/encoding +[spec-issue]: https://github.com/tendermint/tendermint/issues/5005 +[cosmos-sdk-adr-27]: https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-027-deterministic-protobuf-serialization.md +[cer-proto-3]: https://github.com/regen-network/canonical-proto3 +[wire-pb]: https://github.com/creachadair/wirepb + diff --git a/docs/rfc/rfc-008-do-not-panic.md b/docs/rfc/rfc-008-do-not-panic.md new file mode 100644 index 0000000000..ec8c08f5e7 --- /dev/null +++ b/docs/rfc/rfc-008-do-not-panic.md @@ -0,0 +1,139 @@ +# RFC 008: Don't Panic + +## Changelog + +- 2021-12-17: initial draft (@tychoish) + +## Abstract + +Today, the Tendermint core codebase has panics in a number of cases as +a response to exceptional situations. These panics complicate testing, +and might make tendermint components difficult to use as a library in +some circumstances. This document outlines a project of converting +panics to errors and describes the situations where its safe to +panic. + +## Background + +Panics in Go are a great mechanism for aborting the current execution +for truly exceptional situations (e.g. memory errors, data corruption, +processes initialization); however, because they resemble exceptions +in other languages, it can be easy to over use them in the +implementation of software architectures. This certainly happened in +the history of Tendermint, and as we embark on the project of +stabilizing the package, we find ourselves in the right moment to +reexamine our use of panics, and largely where panics happen in the +code base. + +There are still some situations where panics are acceptable and +desireable, but it's important that Tendermint, as a project, comes to +consensus--perhaps in the text of this document--on the situations +where it is acceptable to panic. + +### References + +- [Defer Panic and Recover](https://go.dev/blog/defer-panic-and-recover) +- [Why Go gets exceptions right](https://dave.cheney.net/tag/panic) +- [Don't panic](https://dave.cheney.net/practical-go/presentations/gophercon-singapore-2019.html#_dont_panic) + +## Discussion + +### Acceptable Panics + +#### Initialization + +It is unambiguously safe (and desireable) to panic in `init()` +functions in response to any kind of error. These errors are caught by +tests, and occur early enough in process initialization that they +won't cause unexpected runtime crashes. + +Other code that is called early in process initialization MAY panic, +in some situations if it's not possible to return an error or cause +the process to abort early, although these situations should be +vanishingly slim. + +#### Data Corruption + +If Tendermint code encounters an inconsistency that could be +attributed to data corruption or a logical impossibility it is safer +to panic and crash the process than continue to attempt to make +progress in these situations. + +Examples including reading data out of the storage engine that +is invalid or corrupt, or encountering an ambiguous situation where +the process should halt. Generally these forms of corruption are +detected after interacting with a trusted but external data source, +and reflect situations where the author thinks its safer to terminate +the process immediately rather than allow execution to continue. + +#### Unrecoverable Consensus Failure + +In general, a panic should be used in the case of unrecoverable +consensus failures. If a process detects that the network is +behaving in an incoherent way and it does not have a clearly defined +and mechanism for recovering, the process should panic. + +#### Static Validity + +It is acceptable to panic for invariant violations, within a library +or package, in situations that should be statically impossible, +because there is no way to make these kinds of assertions at compile +time. + +For example, type-asserting `interface{}` values returned by +`container/list` and `container/heap` (and similar), is acceptable, +because package authors should have exclusive control of the inputs to +these containers. Packages should not expose the ability to add +arbitrary values to these data structures. + +#### Controlled Panics Within Libraries + +In some algorithms with highly recursive structures or very nested +call patterns, using a panic, in combination with conditional recovery +handlers results in more manageable code. Ultimately this is a limited +application, and implementations that use panics internally should +only recover conditionally, filtering out panics rather than ignoring +or handling all panics. + +#### Request Handling + +Code that handles responses to incoming/external requests +(e.g. `http.Handler`) should avoid panics, but practice this isn't +totally possible, and it makes sense that request handlers have some +kind of default recovery mechanism that will prevent one request from +terminating a service. + +### Unacceptable Panics + +In **no** other situation is it acceptable for the code to panic: + +- there should be **no** controlled panics that callers are required + to handle across library/package boundaries. +- callers of library functions should not expect panics. +- ensuring that arbitrary go routines can't panic. +- ensuring that there are no arbitrary panics in core production code, + espically code that can run at any time during the lifetime of a + process. +- all test code and fixture should report normal test assertions with + a mechanism like testify's `require` assertion rather than calling + panic directly. + +The goal of this increased "panic rigor" is to ensure that any escaped +panic is reflects a fixable bug in Tendermint. + +### Removing Panics + +The process for removing panics involve a few steps, and will be part +of an ongoing process of code modernization: + +- converting existing explicit panics to errors in cases where it's + possible to return an error, the errors can and should be handled, and returning + an error would not lead to data corruption or cover up data + corruption. + +- increase rigor around operations that can cause runtime errors, like + type assertions, nil pointer errors, array bounds access issues, and + either avoid these situations or return errors where possible. + +- remove generic panic handlers which could cover and hide known + panics. diff --git a/docs/rfc/rfc-009-consensus-parameter-upgrades.md b/docs/rfc/rfc-009-consensus-parameter-upgrades.md new file mode 100644 index 0000000000..60be878df1 --- /dev/null +++ b/docs/rfc/rfc-009-consensus-parameter-upgrades.md @@ -0,0 +1,128 @@ +# RFC 009 : Consensus Parameter Upgrade Considerations + +## Changelog + +- 06-Jan-2011: Initial draft (@williambanfield). + +## Abstract + +This document discusses the challenges of adding additional consensus parameters +to Tendermint and proposes a few solutions that can enable addition of consensus +parameters in a backwards-compatible way. + +## Background + +This section provides an overview of the issues of adding consensus parameters +to Tendermint. + +### Hash Compatibility + +Tendermint produces a hash of a subset of the consensus parameters. The values +that are hashed currently are the `BlockMaxGas` and the `BlockMaxSize`. These +are currently in the [HashedParams struct][hashed-params]. This hash is included +in the block and validators use it to validate that their local view of the consensus +parameters matches what the rest of the network is configured with. + +Any new consensus parameters added to Tendermint should be included in this +hash. This presents a challenge for verification of historical blocks when consensus +parameters are added. If a network produced blocks with a version of Tendermint that +did not yet have the new consensus parameters, the parameter hash it produced will +not reference the new parameters. Any nodes joining the network with the newer +version of Tendermint will have the new consensus parameters. Tendermint will need +to handle this case so that new versions of Tendermint with new consensus parameters +can still validate old blocks correctly without having to do anything overly complex +or hacky. + +### Allowing Developer-Defined Values and the `EndBlock` Problem + +When new consensus parameters are added, application developers may wish to set +values for them so that the developer-defined values may be used as soon as the +software upgrades. We do not currently have a clean mechanism for handling this. + +Consensus parameter updates are communicated from the application to Tendermint +within `EndBlock` of some height `H` and take effect at the next height, `H+1`. +This means that for updates that add a consensus parameter, there is a single +height where the new parameters cannot take effect. The parameters did not exist +in the version of the software that emitted the `EndBlock` response for height `H-1`, +so they cannot take effect at height `H`. The first height that the updated params +can take effect is height `H+1`. As of now, height `H` must run with the defaults. + +## Discussion + +### Hash Compatibility + +This section discusses possible solutions to the problem of maintaining backwards-compatibility +of hashed parameters while adding new parameters. + +#### Never Hash Defaults + +One solution to the problem of backwards-compatibility is to never include parameters +in the hash if the are using the default value. This means that blocks produced +before the parameters existed will have implicitly been created with the defaults. +This works because any software with newer versions of Tendermint must be using the +defaults for new parameters when validating old blocks since the defaults can not +have been updated until a height at which the parameters existed. + +#### Only Update HashedParams on Hash-Breaking Releases + +An alternate solution to never hashing defaults is to not update the hashed +parameters on non-hash-breaking releases. This means that when new consensus +parameters are added to Tendermint, there may be a release that makes use of the +parameters but does not verify that they are the same across all validators by +referencing them in the hash. This seems reasonably safe given the fact that +only a very far subset of the consensus parameters are currently verified at all. + +#### Version The Consensus Parameter Hash Scheme + +The upcoming work on [soft upgrades](https://github.com/tendermint/spec/pull/222) +proposes applying different hashing rules depending on the active block version. +The consensus parameter hash could be versioned in the same way. When different +block versions are used, a different set of consensus parameters will be included +in the hash. + +### Developer Defined Values + +This section discusses possible solutions to the problem of allowing application +developers to define values for the new parameters during the upgrade that adds +the parameters. + +#### Using `InitChain` for New Values + +One solution to the problem of allowing application developers to define values +for new consensus parameters is to call the `InitChain` ABCI method on application +startup and fetch the value for any new consensus parameters. The [response object][init-chain-response] +contains a field for `ConsensusParameter` updates so this may serve as a natural place +to put this logic. + +This poses a few difficulties. Nodes replaying old blocks while running new +software do not ever call `InitChain` after the initial time. They will therefore +not have a way to determine that the parameters changed at some height by using a +call to `InitChain`. The `EndBlock` response is how parameter changes at a height +are currently communicated to Tendermint and conflating these cases seems risky. + +#### Force Defaults For Single Height + +An alternate option is to not use `InitChain` and instead require chains to use the +default values of the new parameters for a single height. + +As documented in the upcoming [ADR-74][adr-74], popular chains often simply use the default +values. Additionally, great care is being taken to ensure that logic governed by upcoming +consensus parameters is not liveness-breaking. This means that, at worst-case, +chains will experience a single slow height while waiting for the new values to +by applied. + +#### Add a new `UpgradeChain` method + +An additional method for allowing chains to update the consensus parameters that +do not yet exist is to add a new `UpgradeChain` method to `ABCI`. The upgrade chain +method would be called when the chain detects that the version of block that it +is about to produce does not match the previous block. This method would be called +after `EndBlock` and would return the set of consensus parameters to use at the +next height. It would therefore give an application the chance to set the new +consensus parameters before running a height with these new parameter. + +### References + +[hashed-params]: https://github.com/tendermint/tendermint/blob/0ae974e63911804d4a2007bd8a9b3ad81d6d2a90/types/params.go#L49 +[init-chain-response]: https://github.com/tendermint/tendermint/blob/0ae974e63911804d4a2007bd8a9b3ad81d6d2a90/abci/types/types.pb.go#L1616 +[adr-74]: https://github.com/tendermint/tendermint/pull/7503 diff --git a/docs/rfc/rfc-010-p2p-light-client.rst b/docs/rfc/rfc-010-p2p-light-client.rst new file mode 100644 index 0000000000..b5f465589f --- /dev/null +++ b/docs/rfc/rfc-010-p2p-light-client.rst @@ -0,0 +1,145 @@ +================================== +RFC 010: Peer to Peer Light Client +================================== + +Changelog +--------- + +- 2022-01-21: Initial draft (@tychoish) + +Abstract +-------- + +The dependency on access to the RPC system makes running or using the light +client more complicated than it should be, because in practice node operators +choose to restrict access to these end points (often correctly.) There is no +deep dependency for the light client on the RPC system, and there is a +persistent notion that "make a p2p light client" is a solution to this +operational limitation. This document explores the implications and +requirements of implementing a p2p-based light client, as well as the +possibilities afforded by this implementation. + +Background +---------- + +High Level Design +~~~~~~~~~~~~~~~~~ + +From a high level, the light client P2P implementation, is relatively straight +forward, but is orthogonal to the P2P-backed statesync implementation that +took place during the 0.35 cycle. The light client only really needs to be +able to request (and receive) a `LightBlock` at a given height. To support +this, a new Reactor would run on every full node and validator which would be +able to service these requests. The workload would be entirely +request-response, and the implementation of the reactor would likely be very +straight forward, and the implementation of the provider is similarly +relatively simple. + +The complexity of the project focuses around peer discovery, handling when +peers disconnect from the light clients, and how to change the current P2P +code to appropriately handle specialized nodes. + +I believe it's safe to assume that much of the current functionality of the +current ``light`` mode would *not* need to be maintained: there is no need to +proxy the RPC endpoints over the P2P layer and there may be no need to run a +node/process for the p2p light client (e.g. all use of this will be as a +client.) + +The ability to run light clients using the RPC system will continue to be +maintained. + +LibP2P +~~~~~~ + +While some aspects of the P2P light client implementation are orthogonal to +LibP2P project, it's useful to think about the ways that these efforts may +combine or interact. + +We expect to be able to leverage libp2p tools to provide some kind of service +discovery for tendermint-based networks. This means that it will be possible +for the p2p stack to easily identify specialized nodes, (e.g. light clients) +thus obviating many of the design challenges with providing this feature in +the context of the current stack. + +Similarly, libp2p makes it possible for a project to be able back their non-Go +light clients, without the major task of first implementing Tendermint's p2p +connection handling. We should identify if there exist users (e.g. the go IBC +relayer, it's maintainers, and operators) who would be able to take advantage +of p2p light client, before switching to libp2p. To our knowledge there are +limited implementations of this p2p protocol (a simple implementation without +secret connection support exists in rust but it has not been used in +production), and it seems unlikely that a team would implement this directly +ahead of its impending removal. + +User Cases +~~~~~~~~~~ + +This RFC makes a few assumptions about the use cases and users of light +clients in tendermint. + +The most active and delicate use cases for light clients is in the +implementation of the IBC relayer. Thus, we expect that providing P2P light +clients might increase the reliability of relayers and reduce the cost of +running a relayer, because relayer operators won't have to decide between rely +on public RPC endpoints (unreliable) or running their own full nodes +(expensive.) This also assumes that there are *no* other uses of the RPC in +the relayer, and unless the relayers have the option of dropping all RPC use, +it's unclear if a P2P light client will actually be able to successfully +remove the dependency on the RPC system. + +Given that the primary relayer implementation is Hermes (rust,) it might be +safe to deliver a version of Tendermint that adds a light client rector in +the full nodes, but that does not provide an implementation of a Go light +client. This either means that the rust implementation would need support for +the legacy P2P connection protocol or wait for the libp2p implementation. + +Client side light client (e.g. wallets, etc.) users may always want to use (a +subset) of the RPC rather than connect to the P2P network for an ephemeral +use. + +Discussion +---------- + +Implementation Questions +~~~~~~~~~~~~~~~~~~~~~~~~ + +Most of the complication in the is how to have a long lived light client node +that *only* runs the light client reactor, as this raises a few questions: + +- would users specify a single P2P node to connect to when creating a light + client or would they also need/want to discover peers? + + - **answer**: most light client use cases won't care much about selecting + peers (and those that do can either disable PEX and specify persistent + peers, *or* use the RPC light client.) + +- how do we prevent full nodes and validators from allowing their peer slots, + which are typically limited, from filling with light clients? If + light-clients aren't limited, how do we prevent light clients from consuming + resources on consensus nodes? + + - **answer**: I think we can institute an internal cap on number of light + client connections to accept and also elide light client nodes from PEX + (pre-libp2p, if we implement this.) I believe that libp2p should provide + us with the kind of service discovery semantics for network connectivity + that would obviate this issue. + +- when a light client disconnects from its peers will it need to reset its + internal state (cache)? does this change if it connects to the same peers? + + - **answer**: no, the internal state only needs to be reset if the light + client detects an invalid block or other divergence, and changing + witnesses--which will be more common with a p2p light client--need not + invalidate the cache. + +These issues are primarily present given that the current peer management later +does not have a particularly good service discovery mechanism nor does it have +a very sophisticated way of identifying nodes of different types or modes. + +Report Evidence +~~~~~~~~~~~~~~~ + +The current light client implementation currently has the ability to report +observed evidence. Either the notional light client reactor needs to be able +to handle these kinds of requests *or* all light client nodes need to also run +the evidence reactor. This could be configured at runtime. diff --git a/docs/rfc/rfc-011-delete-gas.md b/docs/rfc/rfc-011-delete-gas.md new file mode 100644 index 0000000000..a4e643ef2f --- /dev/null +++ b/docs/rfc/rfc-011-delete-gas.md @@ -0,0 +1,162 @@ +# RFC 011: Remove Gas From Tendermint + +## Changelog + +- 03-Feb-2022: Initial draft (@williambanfield). +- 10-Feb-2022: Update in response to feedback (@williambanfield). +- 11-Feb-2022: Add reflection on MaxGas during consensus (@williambanfield). + +## Abstract + +In the v0.25.0 release, Tendermint added a mechanism for tracking 'Gas' in the mempool. +At a high level, Gas allows applications to specify how much it will cost the network, +often in compute resources, to execute a given transaction. While such a mechanism is common +in blockchain applications, it is not generalizable enough to be a maintained as a part +of Tendermint. This RFC explores the possibility of removing the concept of Gas from +Tendermint while still allowing applications the power to control the contents of +blocks to achieve similar goals. + +## Background + +The notion of Gas was included in the original Ethereum whitepaper and exists as +an important feature of the Ethereum blockchain. + +The [whitepaper describes Gas][eth-whitepaper-messages] as an Anti-DoS mechanism. The Ethereum Virtual Machine +provides a Turing complete execution platform. Without any limitations, malicious +actors could waste computation resources by directing the EVM to perform large +or even infinite computations. Gas serves as a metering mechanism to prevent this. + +Gas appears to have been added to Tendermint multiple times, initially as part of +a now defunct `/vm` package, and in its most recent iteration [as part of v0.25.0][gas-add-pr] +as a mechanism to limit the transactions that will be included in the block by an additional +parameter. + +Gas has gained adoption within the Cosmos ecosystem [as part of the Cosmos SDK][cosmos-sdk-gas]. +The SDK provides facilities for tracking how much 'Gas' a transaction is expected to take +and a mechanism for tracking how much gas a transaction has already taken. + +Non-SDK applications also make use of the concept of Gas. Anoma appears to implement +[a gas system][anoma-gas] to meter the transactions it executes. + +While the notion of gas is present in projects that make use of Tendermint, it is +not a concern of Tendermint's. Tendermint's value and goal is producing blocks +via a distributed consensus algorithm. Tendermint relies on the application specific +code to decide how to handle the transactions Tendermint has produced (or if the +application wants to consider them at all). Gas is an application concern. + +Our implementation of Gas is not currently enforced by consensus. Our current validation check that +occurs during block propagation does not verify that the block is under the configured `MaxGas`. +Ensuring that the transactions in a proposed block do not exceed `MaxGas` would require +input from the application during propagation. The `ProcessProposal` method introduced +as part of ABCI++ would enable such input but would further entwine Tendermint and +the application. The issue of checking `MaxGas` during block propagation is important +because it demonstrates that the feature as it currently exists is not implemented +as fully as it perhaps should be. + +Our implementation of Gas is causing issues for node operators and relayers. At +the moment, transactions that overflow the configured 'MaxGas' can be silently rejected +from the mempool. Overflowing MaxGas is the _only_ way that a transaction can be considered +invalid that is not directly a result of failing the `CheckTx`. Operators, and the application, +do not know that a transaction was removed from the mempool for this reason. A stateless check +of this nature is exactly what `CheckTx` exists for and there is no reason for the mempool +to keep track of this data separately. A special [MempoolError][add-mempool-error] field +was added in v0.35 to communicate to clients that a transaction failed after `CheckTx`. +While this should alleviate the pain for operators wishing to understand if their +transaction was included in the mempool, it highlights that the abstraction of +what is included in the mempool is not currently well defined. + +Removing Gas from Tendermint and the mempool would allow for the mempool to be a better +abstraction: any transaction that arrived at `CheckTx` and passed the check will either be +a candidate for a later block or evicted after a TTL is reached or to make room for +other, higher priority transactions. All other transactions are completely invalid and can be discarded forever. + +Removing gas will not be completely straightforward. It will mean ensuring that +equivalent functionality can be implemented outside of the mempool using the mempool's API. + +## Discussion + +This section catalogs the functionality that will need to exist within the Tendermint +mempool to allow Gas to be removed and replaced by application-side bookkeeping. + +### Requirement: Provide Mempool Tx Sorting Mechanism + +Gas produces a market for inclusion in a block. On many networks, a [gas fee][cosmos-sdk-fees] is +included in pending transactions. This fee indicates how much a user is willing to +pay per unit of execution and the fees are distributed to validators. + +Validators wishing to extract higher gas fees are incentivized to include transactions +with the highest listed gas fees into each block. This produces a natural ordering +of the pending transactions. Applications wishing to implement a gas mechanism need +to be able to order the transactions in the mempool. This can trivially be accomplished +by sorting transactions using the `priority` field available to applications as part of +v0.35's `ResponseCheckTx` message. + +### Requirement: Allow Application-Defined Block Resizing + +When creating a block proposal, Tendermint pulls a set of possible transactions out of +the mempool to include in the next block. Tendermint uses MaxGas to limit the set of transactions +it pulls out of the mempool fetching a set of transactions whose sum is less than MaxGas. + +By removing gas tracking from Tendermint's mempool, Tendermint will need to provide a way for +applications to determine an acceptable set of transactions to include in the block. + +This is what the new ABCI++ `PrepareProposal` method is useful for. Applications +that wish to limit the contents of a block by an application-defined limit may +do so by removing transactions from the proposal it is passed during `PrepareProposal`. +Applications wishing to reach parity with the current Gas implementation may do +so by creating an application-side limit: filtering out transactions from +`PrepareProposal` the cause the proposal the exceed the maximum gas. Additionally, +applications can currently opt to have all transactions in the mempool delivered +during `PrepareProposal` by passing `-1` for `MaxGas` and `MaxBytes` into +[ReapMaxBytesMaxGas][reap-max-bytes-max-gas]. + +### Requirement: Handle Transaction Metadata + +Moving the gas mechanism into applications adds an additional piece of complexity +to applications. The application must now track how much gas it expects a transaction +to consume. The mempool currently handles this bookkeeping responsibility and uses the estimated +gas to determine the set of transactions to include in the block. In order to task +the application with keeping track of this metadata, we should make it easier for the +application to do so. In general, we'll want to keep only one copy of this type +of metadata in the program at a time, either in the application or in Tendermint. + +The following sections are possible solutions to the problem of storing transaction +metadata without duplication. + +#### Metadata Handling: EvictTx Callback + +A possible approach to handling transaction metadata is by adding a new `EvictTx` +ABCI method. Whenever the mempool is removing a transaction, either because it has +reached its TTL or because it failed `RecheckTx`, `EvictTx` would be called with +the transaction hash. This would indicate to the application that it could free any +metadata it was storing about the transaction such as the computed gas fee. + +Eviction callbacks are pretty common in caching systems, so this would be very +well-worn territory. + +#### Metadata Handling: Application-Specific Metadata Field(s) + +An alternative approach to handling transaction metadata would be would be the +addition of a new application-metadata field in the `ResponseCheckTx`. This field +would be a protocol buffer message whose contents were entirely opaque to Tendermint. +The application would be responsible for marshalling and unmarshalling whatever data +it stored in this field. During `PrepareProposal`, the application would be passed +this metadata along with the transaction, allowing the application to use it to perform +any necessary filtering. + +If either of these proposed metadata handling techniques are selected, it's likely +useful to enable applications to gossip metadata along with the transaction it is +gossiping. This could easily take the form of an opaque proto message that is +gossiped along with the transaction. + +## References + +[eth-whitepaper-messages]: https://ethereum.org/en/whitepaper/#messages-and-transactions +[gas-add-pr]: https://github.com/tendermint/tendermint/pull/2360 +[cosmos-sdk-gas]: https://github.com/cosmos/cosmos-sdk/blob/c00cedb1427240a730d6eb2be6f7cb01f43869d3/docs/basics/gas-fees.md +[cosmos-sdk-fees]: https://github.com/cosmos/cosmos-sdk/blob/c00cedb1427240a730d6eb2be6f7cb01f43869d3/docs/basics/tx-lifecycle.md#gas-and-fees +[anoma-gas]: https://github.com/anoma/anoma/blob/6974fe1532a59db3574fc02e7f7e65d1216c1eb2/docs/src/specs/ledger.md#transaction-execution +[cosmos-sdk-fee]: https://github.com/cosmos/cosmos-sdk/blob/c00cedb1427240a730d6eb2be6f7cb01f43869d3/types/tx/tx.pb.go#L780-L794 +[issue-7750]: https://github.com/tendermint/tendermint/issues/7750 +[reap-max-bytes-max-gas]: https://github.com/tendermint/tendermint/blob/1ac58469f32a98f1c0e2905ca1773d9eac7b7103/internal/mempool/types.go#L45 +[add-mempool-error]: https://github.com/tendermint/tendermint/blob/205bfca66f6da1b2dded381efb9ad3792f9404cf/rpc/coretypes/responses.go#L239 diff --git a/docs/rfc/rfc-012-custom-indexing.md b/docs/rfc/rfc-012-custom-indexing.md new file mode 100644 index 0000000000..9dc9bdbd60 --- /dev/null +++ b/docs/rfc/rfc-012-custom-indexing.md @@ -0,0 +1,352 @@ +# RFC 012: Event Indexing Revisited + +## Changelog + +- 11-Feb-2022: Add terminological notes. +- 10-Feb-2022: Updated from review feedback. +- 07-Feb-2022: Initial draft (@creachadair) + +## Abstract + +A Tendermint node allows ABCI events associated with block and transaction +processing to be "indexed" into persistent storage. The original Tendermint +implementation provided a fixed, built-in [proprietary indexer][kv-index] for +such events. + +In response to user requests to customize indexing, [ADR 065][adr065] +introduced an "event sink" interface that allows developers (at least in +theory) to plug in alternative index storage. + +Although ADR-065 was a good first step toward customization, its implementation +model does not satisfy all the user requirements. Moreover, this approach +leaves some existing technical issues with indexing unsolved. + +This RFC documents these concerns, and discusses some potential approaches to +solving them. This RFC does _not_ propose a specific technical decision. It is +meant to unify and focus some of the disparate discussions of the topic. + + +## Background + +We begin with some important terminological context. The term "event" in +Tendermint can be confusing, as the same word is used for multiple related but +distinct concepts: + +1. **ABCI Events** refer to the key-value metadata attached to blocks and + transactions by the application. These values are represented by the ABCI + `Event` protobuf message type. + +2. **Consensus Events** refer to the data published by the Tendermint node to + its pubsub bus in response to various consensus state transitions and other + important activities, such as round updates, votes, transaction delivery, + and block completion. + +This confusion is compounded because some "consensus event" values also have +"ABCI event" metadata attached to them. Notably, block and transaction items +typically have ABCI metadata assigned by the application. + +Indexers and RPC clients subscribed to the pubsub bus receive **consensus +events**, but they identify which ones to care about using query expressions +that match against the **ABCI events** associated with them. + +In the discussion that follows, we will use the term **event item** to refer to +a datum published to or received from the pubsub bus, and **ABCI event** or +**event metadata** to refer to the key/value annotations. + +**Indexing** in this context means recording the association between certain +ABCI metadata and the blocks or transactions they're attached to. The ABCI +metadata typically carry application-specific details like sender and recipient +addresses, catgory tags, and so forth, that are not part of consensus but are +used by UI tools to find and display transactions of interest. + +The consensus node records the blocks and transactions as part of its block +store, but does not persist the application metadata. Metadata persistence is +the task of the indexer, which can be (optionally) enabled by the node +operator. + +### History + +The [original indexer][kv-index] built in to Tendermint stored index data in an +embedded [`tm-db` database][tmdb] with a proprietary key layout. +In [ADR 065][adr065], we noted that this implementation has both performance +and scaling problems under load. Moreover, the only practical way to query the +index data is via the [query filter language][query] used for event +subscription. [Issue #1161][i1161] appears to be a motivational context for that ADR. + +To mitigate both of these concerns, we introduced the [`EventSink`][esink] +interface, combining the original transaction and block indexer interfaces +along with some service plumbing. Using this interface, a developer can plug +in an indexer that uses a more efficient storage engine, and provides a more +expressive query language. As a proof-of-concept, we built a [PostgreSQL event +sink][psql] that exports data to a [PostgreSQL database][postgres]. + +Although this approach addressed some of the immediate concerns, there are +several issues for custom indexing that have not been fully addressed. Here we +will discuss them in more detail. + +For further context, including links to user reports and related work, see also +the [Pluggable custom event indexing tracking issue][i7135] issue. + +### Issue 1: Tight Coupling + +The `EventSink` interface supports multiple implementations, but plugging in +implementations still requires tight integration with the node. In particular: + +- Any custom indexer must either be written in Go and compiled in to the + Tendermint binary, or the developer must write a Go shim to communicate with + the implementation and build that into the Tendermint binary. + +- This means to support a custom indexer, it either has to be integrated into + the Tendermint core repository, or every installation that uses that indexer + must fetch or build a patched version of Tendermint. + +The problem with integrating indexers into Tendermint Core is that every user +of Tendermint Core takes a dependency on all supported indexers, including +those they never use. Even if the unused code is disabled with build tags, +users have to remember to do this or potentially be exposed to security issues +that may arise in any of the custom indexers. This is a risk for Tendermint, +which is a trust-critical component of all applications built on it. + +The problem with _not_ integrating indexers into Tendermint Core is that any +developer who wants to use a particular indexer must now fetch or build a +patched version of the core code that includes the custom indexer. Besides +being inconvenient, this makes it harder for users to upgrade their node, since +they need to either re-apply their patches directly or wait for an intermediary +to do it for them. + +Even for developers who have written their applications in Go and link with the +consensus node directly (e.g., using the [Cosmos SDK][sdk]), these issues add a +potentially significant complication to the build process. + +### Issue 2: Legacy Compatibility + +The `EventSink` interface retains several limitations of the original +proprietary indexer. These include: + +- The indexer has no control over which event items are reported. Only the + exact block and transaction events that were reported to the original indexer + are reported to a custom indexer. + +- The interface requires the implementation to define methods for the legacy + search and query API. This requirement comes from the integation with the + [event subscription RPC API][event-rpc], but actually supporting these + methods is not trivial. + +At present, only the original KV indexer implements the query methods. Even the +proof-of-concept PostgreSQL implementation simply reports errors for all calls +to these methods. + +Even for a plugin written in Go, implementing these methods "correctly" would +require parsing and translating the custom query language over whatever storage +platform the indexer uses. + +For a plugin _not_ written in Go, even beyond the cost of integration the +developer would have to re-implement the entire query language. + +### Issue 3: Indexing Delays Consensus + +Within the node, indexing hooks in to the same internal pubsub dispatcher that +is used to export event items to the [event subscription RPC API][event-rpc]. +In contrast with RPC subscribers, however, indexing is a "privileged" +subscriber: If an RPC subscriber is "too slow", the node may terminate the +subscription and disconnect the client. That means that RPC subscribers may +lose (miss) event items. The indexer, however, is "unbuffered", and the +publisher will never drop or disconnect from it. If the indexer is slow, the +publisher will block until it returns, to ensure that no event items are lost. + +In practice, this means that the performance of the indexer has a direct effect +on the performance of the consensus node: If the indexer is slow or stalls, it +will slow or halt the progress of consensus. Users have already reported this +problem even with the built-in indexer (see, for example, [#7247][i7247]). +Extending this concern to arbitrary user-defined custom indexers gives that +risk a much larger surface area. + + +## Discussion + +It is not possible to simultaneously guarantee that publishing event items will +not delay consensus, and also that all event items of interest are always +completely indexed. + +Therefore, our choice is between eliminating delay (and minimizing loss) or +eliminating loss (and minimizing delay). Currently, we take the second +approach, which has led to user complaints about consensus delays due to +indexing and subscription overhead. + +- If we agree that consensus performance supersedes index completeness, our + design choices are to constrain the likelihood and frequency of missing event + items. + +- If we decide that consensus performance is more important than index + completeness, our option is to minimize overhead on the event delivery path + and document that indexer plugins constrain the rate of consensus. + +Since we have user reports requesting both properties, we have to choose one or +the other. Since the primary job of the consensus engine is to correctly, +robustly, reliablly, and efficiently replicate application state across the +network, I believe the correct choice is to favor consensus performance. + +An important consideration for this decision is that a node does not index +application metadata separately: If indexing is disabled, there is no built-in +mechanism to go back and replay or reconstruct the data that an indexer would +have stored. The node _does_ store the blockchain itself (i.e., the blocks and +their transactions), so potentially some use cases currently handled by the +indexer could be handled by the node. For example, allowing clients to ask +whether a given transaction ID has been committed to a block could in principle +be done without an indexer, since it does not depend on application metadata. + +Inevitably, a question will arise whether we could implement both strategies +and toggle between them with a flag. That would be a worst-case scenario, +requiring us to maintain the complexity of two very-different operational +concerns. If our goal is that Tendermint should be as simple, efficient, and +trustworthy as posible, there is not a strong case for making these options +configurable: We should pick a side and commit to it. + +### Design Principles + +Although there is no unique "best" solution to the issues described above, +there are some specific principles that a solution should include: + +1. **A custom indexer should not require integration into Tendermint core.** A + developer or node operator can create, build, deploy, and use a custom + indexer with a stock build of the Tendermint consensus node. + +2. **Custom indexers cannot stall consensus.** An indexer that is slow or + stalls cannot slow down or prevent core consensus from making progress. + + The plugin interface must give node operators control over the tolerances + for acceptable indexer performance, and the means to detect when indexers + are falling outside those tolerances, but indexer failures should "fail + safe" with respect to consensus (even if that means the indexer may miss + some data, in sufficiently-extreme circumstances). + +3. **Custom indexers control which event items they index.** A custom indexer + is not limited to only the current transaction and block events, but can + observe any event item published by the node. + +4. **Custom indexing is forward-compatible.** Adding new event item types or + metadata to the consensus node should not require existing custom indexers + to be rebuilt or modified, unless they want to take advantage of the new + data. + +5. **Indexers are responsible for answering queries.** An indexer plugin is not + required to support the legacy query filter language, nor to be compatible + with the legacy RPC endpoints for accessing them. Any APIs for clients to + query a custom index are the responsibility of the indexer, not the node. + +### Open Questions + +Given the constraints outlined above, there are important design questions we +must answer to guide any specific changes: + +1. **What is an acceptable probability that, given sufficiently extreme + operational issues, an indexer might miss some number of events?** + + There are two parts to this question: One is what constitutes an extreme + operational problem, the other is how likely we are to miss some number of + events items. + + - If the consensus is that no event item must ever be missed, no matter how + bad the operational circumstances, then we _must_ accept that indexing can + slow or halt consensus arbitrarily. It is impossible to guarantee complete + index coverage without potentially unbounded delays. + + - Otherwise, how much data can we afford to lose and how often? For example, + if we can ensure no event item will be lost unless the indexer halts for + at least five minutes, is that acceptable? What probabilities and time + ranges are reasonable for real production environments? + +2. **What level of operational overhead is acceptable to impose on node + operators to support indexing?** + + Are node operators willing to configure and run custom indexers as sidecar + type processes alongside a node? How much indexer setup above and beyond the + work of setting up the underlying node in isolation is tractable in + production networks? + + The answer to this question also informs the question of whether we should + keep an "in-process" indexing option, and to what extent that option needs + to satisfy the suggested design principles. + + Relatedly, to what extent do we need to be concerned about the cost of + encoding and sending event items to an external process (e.g., as JSON blobs + or protobuf wire messages)? Given that the node already encodes event items + as JSON for subscription purposes, the overhead would be negligible for the + node itself, but the indexer would have to decode to process the results. + +3. **What (if any) query APIs does the consensus node need to export, + independent of the indexer implementation?** + + One typical example is whether the node should be able to answer queries + like "is this transaction ID in a block?" Currently, a node cannot answer + this query _unless_ it runs the built-in KV indexer. Does the node need to + continue to support that query even for nodes that disable the KV indexer, + or which use a custom indexer? + +### Informal Design Intent + +The design principles described above implicate several components of the +Tendermint node, beyond just the indexer. In the context of [ADR 075][adr075], +we are re-working the RPC event subscription API to improve some of the UX +issues discussed above for RPC clients. It is our expectation that a solution +for pluggable custom indexing will take advantage of some of the same work. + +On that basis, the design approach I am considering for custom indexing looks +something like this (subject to refinement): + +1. A custom indexer runs as a separate process from the node. + +2. The indexer subscribes to event items via the ADR 075 events API. + + This means indexers would receive event payloads as JSON rather than + protobuf, but since we already have to support JSON encoding for the RPC + interface anyway, that should not increase complexity for the node. + +3. The existing PostgreSQL indexer gets reworked to have this form, and no + longer built as part of the Tendermint core binary. + + We can retain the code in the core repository as a proof-of-concept, or + perhaps create a separate repository with contributed indexers and move it + there. + +4. (Possibly) Deprecate and remove the legacy KV indexer, or disable it by + default. If we decide to remove it, we can also remove the legacy RPC + endpoints for querying the KV indexer. + + If we plan to do this, we should also investigate providing a way for + clients to query whether a given transaction ID has landed in a block. That + serves a common need, and currently _only_ works if the KV indexer is + enabled, but could be addressed more simply using the other data a node + already has stored, without having to answer more general queries. + + +## References + +- [ADR 065: Custom Event Indexing][adr065] +- [ADR 075: RPC Event Subscription Interface][adr075] +- [Cosmos SDK][sdk] +- [Event subscription RPC][event-rpc] +- [KV transaction indexer][kv-index] +- [Pluggable custom event indexing][i7135] (#7135) +- [PostgreSQL event sink][psql] + - [PostgreSQL database][postgres] +- [Query filter language][query] +- [Stream events to postgres for indexing][i1161] (#1161) +- [Unbuffered event subscription slow down the consensus][i7247] (#7247) +- [`EventSink` interface][esink] +- [`tm-db` library][tmdb] + +[adr065]: https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-065-custom-event-indexing.md +[adr075]: https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-075-rpc-subscription.md +[esink]: https://pkg.go.dev/github.com/tendermint/tendermint/internal/state/indexer#EventSink +[event-rpc]: https://docs.tendermint.com/master/rpc/#/Websocket/subscribe +[i1161]: https://github.com/tendermint/tendermint/issues/1161 +[i7135]: https://github.com/tendermint/tendermint/issues/7135 +[i7247]: https://github.com/tendermint/tendermint/issues/7247 +[kv-index]: https://github.com/tendermint/tendermint/blob/master/internal/state/indexer/tx/kv +[postgres]: https://postgresql.org/ +[psql]: https://github.com/tendermint/tendermint/blob/master/internal/state/indexer/sink/psql +[psql]: https://github.com/tendermint/tendermint/blob/master/internal/state/indexer/sink/psql +[query]: https://pkg.go.dev/github.com/tendermint/tendermint/internal/pubsub/query/syntax +[sdk]: https://github.com/cosmos/cosmos-sdk +[tmdb]: https://pkg.go.dev/github.com/tendermint/tm-db#DB diff --git a/docs/rfc/rfc-013-abci++.md b/docs/rfc/rfc-013-abci++.md new file mode 100644 index 0000000000..0289c187ec --- /dev/null +++ b/docs/rfc/rfc-013-abci++.md @@ -0,0 +1,253 @@ +# RFC 013: ABCI++ + +## Changelog + +- 2020-01-11: initialized +- 2021-02-11: Migrate RFC to tendermint repo (Originally [RFC 004](https://github.com/tendermint/spec/pull/254)) + +## Author(s) + +- Dev (@valardragon) +- Sunny (@sunnya97) + +## Context + +ABCI is the interface between the consensus engine and the application. +It defines when the application can talk to consensus during the execution of a blockchain. +At the moment, the application can only act at one phase in consensus, immediately after a block has been finalized. + +This restriction on the application prohibits numerous features for the application, including many scalability improvements that are now better understood than when ABCI was first written. +For example, many of the scalability proposals can be boiled down to "Make the miner / block proposers / validators do work, so the network does not have to". +This includes optimizations such as tx-level signature aggregation, state transition proofs, etc. +Furthermore, many new security properties cannot be achieved in the current paradigm, as the application cannot enforce validators do more than just finalize txs. +This includes features such as threshold cryptography, and guaranteed IBC connection attempts. +We propose introducing three new phases to ABCI to enable these new features, and renaming the existing methods for block execution. + +#### Prepare Proposal phase + +This phase aims to allow the block proposer to perform more computation, to reduce load on all other full nodes, and light clients in the network. +It is intended to enable features such as batch optimizations on the transaction data (e.g. signature aggregation, zk rollup style validity proofs, etc.), enabling stateless blockchains with validator provided authentication paths, etc. + +This new phase will only be executed by the block proposer. The application will take in the block header and raw transaction data output by the consensus engine's mempool. It will then return block data that is prepared for gossip on the network, and additional fields to include into the block header. + +#### Process Proposal Phase + +This phase aims to allow applications to determine validity of a new block proposal, and execute computation on the block data, prior to the blocks finalization. +It is intended to enable applications to reject block proposals with invalid data, and to enable alternate pipelined execution models. (Such as Ethereum-style immediate execution) + +This phase will be executed by all full nodes upon receiving a block, though on the application side it can do more work in the even that the current node is a validator. + +#### Vote Extension Phase + +This phase aims to allow applications to require their validators do more than just validate blocks. +Example usecases of this include validator determined price oracles, validator guaranteed IBC connection attempts, and validator based threshold crypto. + +This adds an app-determined data field that every validator must include with their vote, and these will thus appear in the header. + +#### Rename {BeginBlock, [DeliverTx], EndBlock} to FinalizeBlock + +The prior phases gives the application more flexibility in their execution model for a block, and they obsolete the current methods for how the consensus engine relates the block data to the state machine. Thus we refactor the existing methods to better reflect what is happening in the new ABCI model. + +This rename doesn't on its own enable anything new, but instead improves naming to clarify the expectations from the application in this new communication model. The existing ABCI methods `BeginBlock, [DeliverTx], EndBlock` are renamed to a single method called `FinalizeBlock`. + +#### Summary + +We include a more detailed list of features / scaling improvements that are blocked, and which new phases resolve them at the end of this document. + + +On the top is the existing definition of ABCI, and on the bottom is the proposed ABCI++. + +## Proposal + +Below we suggest an API to add these three new phases. +In this document, sometimes the final round of voting is referred to as precommit for clarity in how it acts in the Tendermint case. + +### Prepare Proposal + +*Note, APIs in this section will change after Vote Extensions, we list the adjusted APIs further in the proposal.* + +The Prepare Proposal phase allows the block proposer to perform application-dependent work in a block, to lower the amount of work the rest of the network must do. This enables batch optimizations to a block, which has been empirically demonstrated to be a key component for scaling. This phase introduces the following ABCI method + +```rust +fn PrepareProposal(Block) -> BlockData +``` + +where `BlockData` is a type alias for however data is internally stored within the consensus engine. In Tendermint Core today, this is `[]Tx`. + +The application may read the entire block proposal, and mutate the block data fields. Mutated transactions will still get removed from the mempool later on, as the mempool rechecks all transactions after a block is executed. + +The `PrepareProposal` API will be modified in the vote extensions section, for allowing the application to modify the header. + +### Process Proposal + +The Process Proposal phase sends the block data to the state machine, prior to running the last round of votes on the state machine. This enables features such as allowing validators to reject a block according to whether state machine deems it valid, and changing block execution pipeline. + +We introduce three new methods, + +```rust +fn VerifyHeader(header: Header, isValidator: bool) -> ResponseVerifyHeader {...} +fn ProcessProposal(block: Block) -> ResponseProcessProposal {...} +fn RevertProposal(height: usize, round: usize) {...} +``` + +where + +```rust +struct ResponseVerifyHeader { + accept_header: bool, + evidence: Vec +} +struct ResponseProcessProposal { + accept_block: bool, + evidence: Vec +} +``` + +Upon receiving a block header, every validator runs `VerifyHeader(header, isValidator)`. The reason for why `VerifyHeader` is split from `ProcessProposal` is due to the later sections for Preprocess Proposal and Vote Extensions, where there may be application dependent data in the header that must be verified before accepting the header. +If the returned `ResponseVerifyHeader.accept_header` is false, then the validator must precommit nil on this block, and reject all other precommits on this block. `ResponseVerifyHeader.evidence` is appended to the validators local `EvidencePool`. + +Upon receiving an entire block proposal (in the current implementation, all "block parts"), every validator runs `ProcessProposal(block)`. If the returned `ResponseProcessProposal.accept_block` is false, then the validator must precommit nil on this block, and reject all other precommits on this block. `ResponseProcessProposal.evidence` is appended to the validators local `EvidencePool`. + +Once a validator knows that consensus has failed to be achieved for a given block, it must run `RevertProposal(block.height, block.round)`, in order to signal to the application to revert any potentially mutative state changes it may have made. In Tendermint, this occurs when incrementing rounds. + +**RFC**: How do we handle the scenario where honest node A finalized on round x, and honest node B finalized on round x + 1? (e.g. when 2f precommits are publicly known, and a validator precommits themself but doesn't broadcast, but they increment rounds) Is this a real concern? The state root derived could change if everyone finalizes on round x+1, not round x, as the state machine can depend non-uniformly on timestamp. + +The application is expected to cache the block data for later execution. + +The `isValidator` flag is set according to whether the current node is a validator or a full node. This is intended to allow for beginning validator-dependent computation that will be included later in vote extensions. (An example of this is threshold decryptions of ciphertexts.) + +### DeliverTx rename to FinalizeBlock + +After implementing `ProcessProposal`, txs no longer need to be delivered during the block execution phase. Instead, they are already in the state machine. Thus `BeginBlock, DeliverTx, EndBlock` can all be replaced with a single ABCI method for `ExecuteBlock`. Internally the application may still structure its method for executing the block as `BeginBlock, DeliverTx, EndBlock`. However, it is overly restrictive to enforce that the block be executed after it is finalized. There are multiple other, very reasonable pipelined execution models one can go for. So instead we suggest calling this succession of methods `FinalizeBlock`. We propose the following API + +Replace the `BeginBlock, DeliverTx, EndBlock` ABCI methods with the following method + +```rust +fn FinalizeBlock() -> ResponseFinalizeBlock +``` + +where `ResponseFinalizeBlock` has the following API, in terms of what already exists + +```rust +struct ResponseFinalizeBlock { + updates: ResponseEndBlock, + tx_results: Vec +} +``` + +`ResponseEndBlock` should then be renamed to `ConsensusUpdates` and `ResponseDeliverTx` should be renamed to `ResponseTx`. + +### Vote Extensions + +The Vote Extensions phase allow applications to force their validators to do more than just validate within consensus. This is done by allowing the application to add more data to their votes, in the final round of voting. (Namely the precommit) +This additional application data will then appear in the block header. + +First we discuss the API changes to the vote struct directly + +```rust +fn ExtendVote(height: u64, round: u64) -> (UnsignedAppVoteData, SelfAuthenticatingAppData) +fn VerifyVoteExtension(signed_app_vote_data: Vec, self_authenticating_app_vote_data: Vec) -> bool +``` + +There are two types of data that the application can enforce validators to include with their vote. +There is data that the app needs the validator to sign over in their vote, and there can be self-authenticating vote data. Self-authenticating here means that the application upon seeing these bytes, knows its valid, came from the validator and is non-malleable. We give an example of each type of vote data here, to make their roles clearer. + +- Unsigned app vote data: A use case of this is if you wanted validator backed oracles, where each validator independently signs some oracle data in their vote, and the median of these values is used on chain. Thus we leverage consensus' signing process for convenience, and use that same key to sign the oracle data. +- Self-authenticating vote data: A use case of this is in threshold random beacons. Every validator produces a threshold beacon share. This threshold beacon share can be verified by any node in the network, given the share and the validators public key (which is not the same as its consensus public key). However, this decryption share will not make it into the subsequent block's header. They will be aggregated by the subsequent block proposer to get a single random beacon value that will appear in the subsequent block's header. Everyone can then verify that this aggregated value came from the requisite threshold of the validator set, without increasing the bandwidth for full nodes or light clients. To achieve this goal, the self-authenticating vote data cannot be signed over by the consensus key along with the rest of the vote, as that would require all full nodes & light clients to know this data in order to verify the vote. + +The `CanonicalVote` struct will acommodate the `UnsignedAppVoteData` field by adding another string to its encoding, after the `chain-id`. This should not interfere with existing hardware signing integrations, as it does not affect the constant offset for the `height` and `round`, and the vote size does not have an explicit upper bound. (So adding this unsigned app vote data field is equivalent from the HSM's perspective as having a superlong chain-ID) + +**RFC**: Please comment if you think it will be fine to have elongate the message the HSM signs, or if we need to explore pre-hashing the app vote data. + +The flow of these methods is that when a validator has to precommit, Tendermint will first produce a precommit canonical vote without the application vote data. It will then pass it to the application, which will return unsigned application vote data, and self authenticating application vote data. It will bundle the `unsigned_application_vote_data` into the canonical vote, and pass it to the HSM to sign. Finally it will package the self-authenticating app vote data, and the `signed_vote_data` together, into one final Vote struct to be passed around the network. + +#### Changes to Prepare Proposal Phase + +There are many use cases where the additional data from vote extensions can be batch optimized. +This is mainly of interest when the votes include self-authenticating app vote data that be batched together, or the unsigned app vote data is the same across all votes. +To allow for this, we change the PrepareProposal API to the following + +```rust +fn PrepareProposal(Block, UnbatchedHeader) -> (BlockData, Header) +``` + +where `UnbatchedHeader` essentially contains a "RawCommit", the `Header` contains a batch-optimized `commit` and an additional "Application Data" field in its root. This will involve a number of changes to core data structures, which will be gone over in the ADR. +The `Unbatched` header and `rawcommit` will never be broadcasted, they will be completely internal to consensus. + +#### Inter-process communication (IPC) effects + +For brevity in exposition above, we did not discuss the trade-offs that may occur in interprocess communication delays that these changs will introduce. +These new ABCI methods add more locations where the application must communicate with the consensus engine. +In most configurations, we expect that the consensus engine and the application will be either statically or dynamically linked, so all communication is a matter of at most adjusting the memory model the data is layed out within. +This memory model conversion is typically considered negligible, as delay here is measured on the order of microseconds at most, whereas we face milisecond delays due to cryptography and network overheads. +Thus we ignore the overhead in the case of linked libraries. + +In the case where the consensus engine and the application are ran in separate processes, and thus communicate with a form of Inter-process communication (IPC), the delays can easily become on the order of miliseconds based upon the data sent. Thus its important to consider whats happening here. +We go through this phase by phase. + +##### Prepare proposal IPC overhead + +This requires a round of IPC communication, where both directions are quite large. Namely the proposer communicating an entire block to the application. +However, this can be mitigated by splitting up `PrepareProposal` into two distinct, async methods, one for the block IPC communication, and one for the Header IPC communication. + +Then for chains where the block data does not depend on the header data, the block data IPC communication can proceed in parallel to the prior block's voting phase. (As a node can know whether or not its the leader in the next round) + +Furthermore, this IPC communication is expected to be quite low relative to the amount of p2p gossip time it takes to send the block data around the network, so this is perhaps a premature concern until more sophisticated block gossip protocols are implemented. + +##### Process Proposal IPC overhead + +This phase changes the amount of time available for the consensus engine to deliver a block's data to the state machine. +Before, the block data for block N would be delivered to the state machine upon receiving a commit for block N and then be executed. +The state machine would respond after executing the txs and before prevoting. +The time for block delivery from the consensus engine to the state machine after this change is the time of receiving block proposal N to the to time precommit on proposal N. +It is expected that this difference is unimportant in practice, as this time is in parallel to one round of p2p communication for prevoting, which is expected to be significantly less than the time for the consensus engine to deliver a block to the state machine. + +##### Vote Extension IPC overhead + +This has a small amount of data, but does incur an IPC round trip delay. This IPC round trip delay is pretty negligible as compared the variance in vote gossip time. (the IPC delay is typically on the order of 10 microseconds) + +## Status + +Proposed + +## Consequences + +### Positive + +- Enables a large number of new features for applications +- Supports both immediate and delayed execution models +- Allows application specific data from each validator +- Allows for batch optimizations across txs, and votes + +### Negative + +- This is a breaking change to all existing ABCI clients, however the application should be able to have a thin wrapper to replicate existing ABCI behavior. + - PrepareProposal - can be a no-op + - Process Proposal - has to cache the block, but can otherwise be a no-op + - Vote Extensions - can be a no-op + - Finalize Block - Can black-box call BeginBlock, DeliverTx, EndBlock given the cached block data + +- Vote Extensions adds more complexity to core Tendermint Data Structures +- Allowing alternate alternate execution models will lead to a proliferation of new ways for applications to violate expected guarantees. + +### Neutral + +- IPC overhead considerations change, but mostly for the better + +## References + +Reference for IPC delay constants: + +### Short list of blocked features / scaling improvements with required ABCI++ Phases + +| Feature | PrepareProposal | ProcessProposal | Vote Extensions | +| :--- | :---: | :---: | :---: | +| Tx based signature aggregation | X | | | +| SNARK proof of valid state transition | X | | | +| Validator provided authentication paths in stateless blockchains | X | | | +| Immediate Execution | | X | | +| Simple soft forks | | X | | +| Validator guaranteed IBC connection attempts | | | X | +| Validator based price oracles | | | X | +| Immediate Execution with increased time for block execution | X | X | X | +| Threshold Encrypted txs | X | X | X | diff --git a/docs/rfc/rfc-014-semantic-versioning.md b/docs/rfc/rfc-014-semantic-versioning.md new file mode 100644 index 0000000000..0119901b13 --- /dev/null +++ b/docs/rfc/rfc-014-semantic-versioning.md @@ -0,0 +1,94 @@ +# RFC 014: Semantic Versioning + +## Changelog + +- 2021-11-19: Initial Draft +- 2021-02-11: Migrate RFC to tendermint repo (Originally [RFC 006](https://github.com/tendermint/spec/pull/365)) + +## Author(s) + +- Callum Waters @cmwaters + +## Context + +We use versioning as an instrument to hold a set of promises to users and signal when such a set changes and how. In the conventional sense of a Go library, major versions signal that the public Go API’s have changed in a breaking way and thus require the users of such libraries to change their usage accordingly. Tendermint is a bit different in that there are multiple users: application developers (both in-process and out-of-process), node operators, and external clients. More importantly, both how these users interact with Tendermint and what's important to these users differs from how users interact and what they find important in a more conventional library. + +This document attempts to encapsulate the discussions around versioning in Tendermint and draws upon them to propose a guide to how Tendermint uses versioning to make promises to its users. + +For a versioning policy to make sense, we must also address the intended frequency of breaking changes. The strictest guarantees in the world will not help users if we plan to break them with every release. + +Finally I would like to remark that this RFC only addresses the "what", as in what are the rules for versioning. The "how" of Tendermint implementing the versioning rules we choose, will be addressed in a later RFC on Soft Upgrades. + +## Discussion + +We first begin with a round up of the various users and a set of assumptions on what these users expect from Tendermint in regards to versioning: + +1. **Application Developers**, those that use the ABCI to build applications on top of Tendermint, are chiefly concerned with that API. Breaking changes will force developers to modify large portions of their codebase to accommodate for the changes. Some ABCI changes such as introducing priority for the mempool don't require any effort and can be lazily adopted whilst changes like ABCI++ may force applications to redesign their entire execution system. It's also worth considering that the API's for go developers differ to developers of other languages. The former here can use the entire Tendermint library, most notably the local RPC methods, and so the team must be wary of all public Go API's. +2. **Node Operators**, those running node infrastructure, are predominantly concerned with downtime, complexity and frequency of upgrading, and avoiding data loss. They may be also concerned about changes that may break the scripts and tooling they use to supervise their nodes. +3. **External Clients** are those that perform any of the following: + - consume the RPC endpoints of nodes like `/block` + - subscribe to the event stream + - make queries to the indexer + + This set are concerned with chain upgrades which will impact their ability to query state and block data as well as broadcast transactions. Examples include wallets and block explorers. + +4. **IBC module and relayers**. The developers of IBC and consumers of their software are concerned about changes that may affect a chain's ability to send arbitrary messages to another chain. Specifically, these users are affected by any breaking changes to the light client verification algorithm. + +Although we present them here as having different concerns, in a broader sense these user groups share a concern for the end users of applications. A crucial principle guiding this RFC is that **the ability for chains to provide continual service is more important than the actual upgrade burden put on the developers of these chains**. This means some extra burden for application developers is tolerable if it minimizes or substantially reduces downtime for the end user. + +### Modes of Interprocess Communication + +Tendermint has two primary mechanisms to communicate with other processes: RPC and P2P. The division marks the boundary between the internal and external components of the network: + +- The P2P layer is used in all cases that nodes (of any type) need to communicate with one another. +- The RPC interface is for any outside process that wants to communicate with a node. + +The design principle here is that **communication via RPC is to a trusted source** and thus the RPC service prioritizes inspection rather than verification. The P2P interface is the primary medium for verification. + +As an example, an in-browser light client would verify headers (and perhaps application state) via the p2p layer, and then pass along information on to the client via RPC (or potentially directly via a separate API). + +The main exceptions to this are the IBC module and relayers, which are external to the node but also require verifiable data. Breaking changes to the light client verification path mean that all neighbouring chains that are connected will no longer be able to verify state transitions and thus pass messages back and forward. + +## Proposal + +Tendermint version labels will follow the syntax of [Semantic Versions 2.0.0](https://semver.org/) with a major, minor and patch version. The version components will be interpreted according to these rules: + +For the entire cycle of a **major version** in Tendermint: + +- All blocks and state data in a blockchain can be queried. All headers can be verified even across minor version changes. Nodes can both block sync and state sync from genesis to the head of the chain. +- Nodes in a network are able to communicate and perform BFT state machine replication so long as the agreed network version is the lowest of all nodes in a network. For example, nodes using version 1.5.x and 1.2.x can operate together so long as the network version is 1.2 or lower (but still within the 1.x range). This rule essentially captures the concept of network backwards compatibility. +- Node RPC endpoints will remain compatible with existing external clients: + - New endpoints may be added, but old endpoints may not be removed. + - Old endpoints may be extended to add new request and response fields, but requests not using those fields must function as before the change. +- Migrations should be automatic. Upgrading of one node can happen asynchronously with respect to other nodes (although agreement of a network-wide upgrade must still occur synchronously via consensus). + +For the entire cycle of a **minor version** in Tendermint: + +- Public Go API's, for example in `node` or `abci` packages will not change in a way that requires any consumer (not just application developers) to modify their code. +- No breaking changes to the block protocol. This means that all block related data structures should not change in a way that breaks any of the hashes, the consensus engine or light client verification. +- Upgrades between minor versions may not result in any downtime (i.e., no migrations are required), nor require any changes to the config files to continue with the existing behavior. A minor version upgrade will require only stopping the existing process, swapping the binary, and starting the new process. + +A new **patch version** of Tendermint will only contain bug fixes and updates that impact the security and stability of Tendermint. + +These guarantees will come into effect at release 1.0. + +## Status + +Proposed + +## Consequences + +### Positive + +- Clearer communication of what versioning means to us and the effect they have on our users. + +### Negative + +- Can potentially incur greater engineering effort to uphold and follow these guarantees. + +### Neutral + +## References + +- [SemVer](https://semver.org/) +- [Tendermint Tracking Issue](https://github.com/tendermint/tendermint/issues/5680) diff --git a/docs/rfc/rfc-015-abci++-tx-mutation.md b/docs/rfc/rfc-015-abci++-tx-mutation.md new file mode 100644 index 0000000000..3c7854ed35 --- /dev/null +++ b/docs/rfc/rfc-015-abci++-tx-mutation.md @@ -0,0 +1,261 @@ +# RFC 015: ABCI++ TX Mutation + +## Changelog + +- 23-Feb-2022: Initial draft (@williambanfield). +- 28-Feb-2022: Revised draft (@williambanfield). + +## Abstract + +A previous version of the ABCI++ specification detailed a mechanism for proposers to replace transactions +in the proposed block. This scheme required the proposer to construct new transactions +and mark these new transactions as replacing other removed transactions. The specification +was ambiguous as to how the replacement may be communicated to peer nodes. +This RFC discusses issues with this mechanism and possible solutions. + +## Background + +### What is the proposed change? + +A previous version of the ABCI++ specification proposed mechanisms for adding, removing, and replacing +transactions in a proposed block. To replace a transaction, the application running +`ProcessProposal` could mark a transaction as replaced by other application-supplied +transactions by returning a new transaction marked with the `ADDED` flag setting +the `new_hashes` field of the removed transaction to contain the list of transaction hashes +that replace it. In that previous specification for ABCI++, the full use of the +`new_hashes` field is left somewhat ambiguous. At present, these hashes are not +gossiped and are not eventually included in the block to signal replacement to +other nodes. The specification did indicate that the transactions specified in +the `new_hashes` field will be removed from the mempool but it's not clear how +peer nodes will learn about them. + +### What systems would be affected by adding transaction replacement? + +The 'transaction' is a central building block of a Tendermint blockchain, so adding +a mechanism for transaction replacement would require changes to many aspects of Tendermint. + +The following is a rough list of the functionality that this mechanism would affect: + +#### Transaction indexing + +Tendermint's indexer stores transactions and transaction results using the hash of the executed +transaction [as the key][tx-result-index] and the ABCI results and transaction bytes as the value. + +To allow transaction replacement, the replaced transactions would need to stored as well in the +indexer, likely as a mapping of original transaction to list of transaction hashes that replaced +the original transaction. + +#### Transaction inclusion proofs + +The result of a transaction query includes a Merkle proof of the existence of the +transaction in the block chain. This [proof is built][inclusion-proof] as a merkle tree +of the hashes of all of the transactions in the block where the queried transaction was executed. + +To allow transaction replacement, these proofs would need to be updated to prove +that a replaced transaction was included by replacement in the block. + +#### RPC-based transaction query parameters and results + +Tendermint's RPC allows clients to retrieve information about transactions via the +`/tx_search` and `/tx` RPC endpoints. + +RPC query results containing replaced transactions would need to be updated to include +information on replaced transactions, either by returning results for all of the replaced +transactions, or by including a response with just the hashes of the replaced transactions +which clients could proceed to query individually. + +#### Mempool transaction removal + +Additional logic would need to be added to the Tendermint mempool to clear out replaced +transactions after each block is executed. Tendermint currently removes executed transactions +from the mempool, so this would be a pretty straightforward change. + +## Discussion + +### What value may be added to Tendermint by introducing transaction replacement? + +Transaction replacement would would enable applications to aggregate or disaggregate transactions. + +For aggregation, a set of transactions that all related work, such as transferring +tokens between the same two accounts, could be replaced with a single transaction, +i.e. one that transfers a single sum from one account to the other. +Applications that make frequent use of aggregation may be able to achieve a higher throughput. +Aggregation would decrease the space occupied by a single client-submitted transaction in the block, allowing +more client-submitted transactions to be executed per block. + +For disaggregation, a very complex transaction could be split into multiple smaller transactions. +This may be useful if an application wishes to perform more fine-grained indexing on intermediate parts +of a multi-part transaction. + +### Drawbacks to transaction replacement + +Transaction replacement would require updating and shimming many of the places that +Tendermint records and exposes information about executed transactions. While +systems within Tendermint could be updated to account for transaction replacement, +such a system would leave new issues and rough edges. + +#### No way of guaranteeing correct replacement + +If a user issues a transaction to the network and the transaction is replaced, the +user has no guarantee that the replacement was correct. For example, suppose a set of users issue +transactions A, B, and C and they are all aggregated into a new transaction, D. +There is nothing guaranteeing that D was constructed correctly from the inputs. +The only way for users to ensure D is correct would be if D contained all of the +information of its constituent transactions, in which case, nothing is really gained by the replacement. + +#### Replacement transactions not signed by submitter + +Abstractly, Tendermint simply views transactions as a ball of bytes and therefore +should be fine with replacing one for another. However, many applications require +that transactions submitted to the chain be signed by some private key to authenticate +and authorize the transaction. Replaced transactions could not be signed by the +submitter, only by the application node. Therefore, any use of transaction replacement +could not contain authorization from the submitter and would either need to grant +application-submitted transactions power to perform application logic on behalf +of a user without their consent. + +Granting this power to application-submitted transactions would be very dangerous +and therefore might not be of much value to application developers. +Transaction replacement might only be really safe in the case of application-submitted +transactions or for transactions that require no authorization. For such transactions, +it's quite not quite clear what the utility of replacement is: the application can already +generate any transactions that it wants. The fact that such a transaction was a replacement +is not particularly relevant to participants in the chain since the application is +merely replacing its own transactions. + +#### New vector for censorship + +Depending on the implementation, transaction replacement may allow a node signal +to the rest of the chain that some transaction should no longer be considered for execution. +Honest nodes will use the replacement mechanism to signal that a transaction has been aggregated. +Malicious nodes will be granted a new vector for censoring transactions. +There is no guarantee that a replaced transactions is actually executed at all. +A malicious node could censor a transaction by simply listing it as replaced. +Honest nodes seeing the replacement would flush the transaction from their mempool +and not execute or propose it it in later blocks. + +### Transaction tracking implementations + +This section discusses possible ways to flesh out the implementation of transaction replacement. +Specifically, this section proposes a few alternative ways that Tendermint blockchains could +track and store transaction replacements. + +#### Include transaction replacements in the block + +One option to track transaction replacement is to include information on the +transaction replacement within the block. An additional structure may be added +the block of the following form: + +```proto +message Block { +... + repeated Replacement replacements = 5; +} + +message Replacement { + bytes included_tx_key = 1; + repeated bytes replaced_txs_keys = 2; +} +``` + +Applications executing `PrepareProposal` would return the list of replacements and +Tendermint would include an encoding of these replacements in the block that is gossiped +and committed. + +Tendermint's transaction indexing would include a new mapping for each replaced transaction +key to the committed transaction. +Transaction inclusion proofs would be updated to include these additional new transaction +keys in the Merkle tree and queries for transaction hashes that were replaced would return +information indicating that the transaction was replaced along with the hash of the +transaction that replaced it. + +Block validation of gossiped blocks would be updated to check that each of the +`included_txs_key` matches the hash of some transaction in the proposed block. + +Implementing the changes described in this section would allow Tendermint to gossip +and index transaction replacements as part of block propagation. These changes would +still require the application to certify that the replacements were valid. This +validation may be performed in one of two ways: + +1. **Applications optimistically trust that the proposer performed a legitimate replacement.** + +In this validation scheme, applications would not verify that the substitution +is valid during consensus and instead simply trust that the proposer is correct. +This would have the drawback of allowing a malicious proposer to remove transactions +it did not want executed. + +2. **Applications completely validate transaction replacement.** + +In this validation scheme, applications that allow replacement would check that +each listed replaced transaction was correctly reflected in the replacement transaction. +In order to perform such validation, the node would need to have the replaced transactions +locally. This could be accomplished one of a few ways: by querying the mempool, +by adding an additional p2p gossip channel for transaction replacements, or by including the replaced transactions +in the block. Replacement validation via mempool querying would require the node +to have received all of the replaced transactions in the mempool which is far from +guaranteed. Adding an additional gossip channel would make gossiping replaced transactions +a requirement for consensus to proceed, since all nodes would need to receive all replacement +messages before considering a block valid. Finally, including replaced transactions in +the block seems to obviate any benefit gained from performing a transaction replacement +since the replaced transaction and the original transactions would now both appear in the block. + +#### Application defined transaction replacement + +An additional option for allowing transaction replacement is to leave it entirely as a responsibility +of the application. The `PrepareProposal` ABCI++ call allows for applications to add +new transactions to a proposed block. Applications that wished to implement a transaction +replacement mechanism would be free to do so without the newly defined `new_hashes` field. +Applications wishing to implement transaction replacement would add the aggregated +transactions in the `PrepareProposal` response, and include one additional bookkeeping +transaction that listed all of the replacements, with a similar scheme to the `new_hashes` +field described in ABCI++. This new bookkeeping transaction could be used by the +application to determine which transactions to clear from the mempool in future calls +to `CheckTx`. + +The meaning of any transaction in the block is completely opaque to Tendermint, +so applications performing this style of replacement would not be able to have the replacement +reflected in any most of Tendermint's transaction tracking mechanisms, such as transaction indexing +and the `/tx` endpoint. + +#### Application defined Tx Keys + +Tendermint currently uses cryptographic hashes, SHA256, as a key for each transaction. +As noted in the section on systems that would require changing, this key is used +to identify the transaction in the mempool, in the indexer, and within the RPC system. + +An alternative approach to allowing `ProcessProposal` to specify a set of transaction +replacements would be instead to allow the application to specify an additional key or set +of keys for each transaction during `ProcessProposal`. This new `secondary_keys` set +would be included in the block and therefore gossiped during block propagation. +Additional RPC endpoints could be exposed to query by the application-defined keys. + +Applications wishing to implement replacement would leverage this new field by providing the +replaced transaction hashes as the `secondary_keys` and checking their validity during +`ProcessProposal`. During `RecheckTx` the application would then be responsible for +clearing out transactions that matched the `secondary_keys`. + +It is worth noting that something like this would be possible without `secondary_keys`. +An application wishing to implement a system like this one could define a replacement +transaction, as discussed in the section on application-defined transaction replacement, +and use a custom [ABCI event type][abci-event-type] to communicate that the replacement should +be indexed within Tendermint's ABCI event indexing. + +### Complexity to value-add tradeoff + +It is worth remarking that adding a system like this may introduce a decent amount +of new complexity into Tendermint. An approach that leaves much of the replacement +logic to Tendermint would require altering the core transaction indexing and querying +data. In many of the cases listed, a system for transaction replacement is possible +without explicitly defining it as part of `PrepareProposal`. Since applications +can now add transactions during `PrepareProposal` they can and should leverage this +functionality to include additional bookkeeping transactions in the block. It may +be worth encouraging applications to discover new and interesting ways to leverage this +power instead of immediately solving the problem for them. + +### References + +[inclusion-proof]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/types/tx.go#L67 +[tx-serach-result]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/rpc/coretypes/responses.go#L267 +[tx-rpc-func]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/internal/rpc/core/tx.go#L21 +[tx-result-index]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/internal/state/indexer/tx/kv/kv.go#L90 +[abci-event-type]: https://github.com/tendermint/tendermint/blob/0fcfaa4568cb700e27c954389c1fcd0b9e786332/abci/types/types.pb.go#L3168 diff --git a/docs/rfc/rfc-019-config-version.md b/docs/rfc/rfc-019-config-version.md new file mode 100644 index 0000000000..3dfd7840b5 --- /dev/null +++ b/docs/rfc/rfc-019-config-version.md @@ -0,0 +1,400 @@ +# RFC 019: Configuration File Versioning + +## Changelog + +- 19-Apr-2022: Initial draft (@creachadair) +- 20-Apr-2022: Updates from review feedback (@creachadair) + +## Abstract + +Updating configuration settings is an essential part of upgrading an existing +node to a new version of the Tendermint software. Unfortunately, it is also +currently a very manual process. This document discusses some of the history of +changes to the config format, actions we've taken to improve the tooling for +configuration upgrades, and additional steps we may want to consider. + +## Background + +A Tendermint node reads configuration settings at startup from a TOML formatted +text file, typically named `config.toml`. The contents of this file are defined +by the [`github.com/tendermint/tendermint/config`][config-pkg]. + +Although many settings in this file remain valid from one version of Tendermint +to the next, new versions of Tendermint often add, update, and remove settings. +These changes often require manual intervention by operators who are upgrading +their nodes. + +I propose we should provide better tools and documentation to help operators +make configuration changes correctly during version upgrades. Ideally, as much +as possible of any configuration file update should be automated, and where +that is not possible or practical, we should provide clear, explicit directions +for what steps need to be taken manually. Moreover, when the node discovers +incorrect or invalid configuration, we should improve the diagnostics it emits +so that the operator can quickly and easily find the relevant documentation, +without having to grep through source code. + +## Discussion + +By convention, we are supposed to document required changes to the config file +in the `UPGRADING.md` file for the release that introduces them. Although we +have mostly done this, the level of detail in the upgrading instructions is +often insufficient for an operator to correctly update their file. + +The updates vary widely in complexity: Operators may need to add new required +settings, update obsolete values for existing settings, move or rename existing +settings within the file, or remove obsolete settings (which are thus invalid). +Here are a few examples of each of these cases: + +- **New required settings:** Tendermint v0.35 added a new top-level `mode` + setting that determines whether a node runs as a validator, a full node, or a + seed node. The default value is `"full"`, which means the operator of a + validator must manually add `mode = "validator"` (or set the `--mode` flag on + the command line) for their node to come up in the correct mode. + +- **Updated obsolete values:** Tendermint v0.35 removed support for versions + `"v1"` and `"v2"` of the blocksync (formerly "fastsync") protocol, requiring + any node using either of those values to update to `"v0"`. + +- **Moved/renamed settings:** Version v0.34 moved the top-level `pprof_laddr` + setting under the `[rpc]` section. + + Version v0.35 renamed every setting in the file from `snake_case` to + `kebab-case`, moved the top-level `fast_sync` setting into the `[blocksync]` + section as (itself renamed from `[fastsync]`), and moved all the top-level + `priv-validator-*` settings under a new `[priv-validator]` section with their + prefix trimmed off. + +- **Removed obsolete settings:** Version v0.34 removed the `index_all_keys` and + `index_keys` settings from the `[tx_index]` section; version v0.35 removed + the `wal-dir` setting from the `[mempool]` section, and version v0.36 removed + the `[blocksync]` section entirely. + +While many of these changes are mentioned in the config section of the upgrade +instructions, some are not mentioned at all, or are hidden in other parts of +the doc. For instance, the v0.34 `pprof_laddr` change was documented only as an +RPC flag change. (A savvy reader might realize that the flag `--rpc.pprof_laddr` +implies a corresponding config section, but it omits the related detail that +there was a top-level setting that's been renamed). The lesson here is not +that the docs are bad, but to point out that prose is not the most efficient +format to convey detailed changes like this. The upgrading instructions are +still valuable for the human reader to understand what to expect. + +### Concrete Steps + +As part of the v0.36 development cycle, we spent some time reverse-engineering +the configuration changes since the v0.34 release and built an experimental +command-line tool called [`confix`][confix], whose job it is to automatically +update the settings in a `config.toml` file to the latest version. We also +backported a version of this tool into the v0.35.x branch at release v0.35.4. + +This tool should work fine for configuration files created by Tendermint v0.34 +and later, but does not (yet) know how to handle changes from prior versions of +Tendermint. Part of the difficulty for older versions is simply logistical: To +figure out which changes to apply, we need to understand something about the +version that made the file, as well as the version we're converting it to. + +> **Discussion point:** In the future we might want to consider incorporating +> this into the node CLI directly, but we're keeping it separate for now until +> we can get some feedback from operators. + +For the experiment, we handled this by carefully searching the history of +config format changes for shibboleths to bound the version: For example, the +`[fastsync]` section was added in Tendermint v0.32 and renamed `[blocksync]` in +Tendermint v0.35. So if we see a `[fastsync]` section, we have some confidence +that the file was created by v0.32, v0.33, or v0.34. + +But such signals are delicate: The `[blocksync]` section was removed in v0.36, +so if we do not find `[fastsync]`, we cannot conclude from that alone that the +file is from v0.31 or earlier -- we have to look for corroborating details. +While such "sniffing" tactics are fine for an experiment, they aren't as robust +as we might like. + +This is especially relevant for configuration files that may have already been +manually upgraded across several versions by the time we are asked to update +them again. Another related concern is that we'd like to make sure conversion +is idempotent, so that it would be safe to rerun the tool over an +already-converted file without breaking anything. + +### Config Versioning + +One obvious tactic we could use for future releases is add a version marker to +the config file. This would give tools like `confix` (and the node itself) a +way to calibrate their expectations. Rather than being a version for the file +itself, however, this version marker would indicate which version of Tendermint +is needed to read the file. + +Provisionally, this might look something like: + +```toml +# THe minimum version of Tendermint compatible with the contents of +# this configuration file. +config-version = 'v0.35' +``` + +When initializing a new node, Tendermint would populate this field with its own +version (e.g., `v0.36`). When conducting an upgrade, tools like `confix` can +then use this to decide which conversions are valid, and then update the value +accordingly. After converting a file marked `'v0.35'` to`'v0.37'`, the +conversion tool sets the file's `config-version` to reflect its compatibility. + +> **Discussion point:** This example presumes we would keep config files +> compatible within a given release cycle, e.g., all of v0.36.x. We could also +> use patch numbers here, if we think there's some reason to permit changes +> that would require config file edits at that granularity. I don't think we +> should, but that's a design question to consider. + +Upon seeing an up-to-date version marker, the conversion tool can simply exit +with a diagnostic like "this file is already up-to-date", rather than sniffing +the keyspace and potentially introducing errors. In addition, this would let a +tool detect config files that are _newer_ than the one it understands, and +issue a safe diagnostic rather than doing something wrong. Plus, besides +avoiding potentially unsafe conversions, this would also serve as +human-readable documentation that the file is up-to-date for a given version. + +Adding a config version would not address the problem of how to convert files +created by older versions of Tendermint, but it would at least help us build +more robust config tooling going forward. + +### Stability and Change + +In light of the discussion so far, it is natural to examine why we make so many +changes to the configuration file from one version to the next, and whether we +could reduce friction by being more conservative about what we make +configurable, what config changes we make over time, and how we roll them out. + +Some changes, like renaming everything from snake case to kebab case, are +entirely gratuitous. We could safely agree not to make those kinds of changes. +Apart from that obvious case, however, many other configuration settings +provide value to node operators in cases where there is no simple, universal +setting that matches every application. + +Taking a high-level view, there are several broad reasons why we might want to +make changes to configuration settings: + +- **Lessons learned:** Configuration settings are a good way to try things out + in production, before making more invasive changes to the consensus protocol. + + For example, up until Tendermint v0.35, consensus timeouts were specified as + per-node configuration settings (e.g., `timeout-precommit` et al.). This + allowed operators to tune these values for the needs of their network, but + had the downside that individually-misconfigured nodes could stall consensus. + + Based on that experience, these timeouts have been deprecated in Tendermint + v0.36 and converted to consensus parameters, to be consistent across all + nodes in the network. + +- **Migration & experimentation:** Introducing new features and updating old + features can complicate migration for existing users of the software. + Temporary or "experimental" configuration settings can be a valuable way to + mitigate that friction. + + For example, Tendermint v0.36 introduces a new RPC event subscription + endpoint (see [ADR 075][adr075]) that will eventually replace the existing + webwocket-based interface. To give users time to migrate, v0.36 adds an + `experimental-disable-websocket` setting, defaulted to `false`, that allows + operators to selectively disable the websocket API for testing purposes + during the conversion. This setting is designed to be removed in v0.37, when + the old interface is no longer supported. + +- **Ongoing maintenance:** Sometimes configuration settings become obsolete, + and the cost of removing them trades off against the potential risks of + leaving a non-functional or deprecated knob hooked up indefinitely. + + For example, Tendermint v0.35 deprecated two alternate implementations of the + blocksync protocol, one of which was deleted entirely (`v1`) and one of which + was scheduled for removal (`v2`). The `blocksync.version` setting, which had + been added as a migration aid, became obsolete and needed to be updated. + + Despite our best intentions, sometimes engineering designs do not work out. + It's just as important to leave room to back out of changes we have since + reconsidered, as it is to support migrations forward onto new and improved + code. + +- **Clarity and legibility:** Besides configuring the software, another + important purpose of a config file is to document intent for the humans who + operate and maintain the software. Operators need adjust settings to keep the + node running, and developers need to know what options were in use when + something goes wrong so they can diagnose and fix bugs. The legibility of a + config file as a _human_ artifact is also thus important. + + For example, Tendermint v0.35 moved settings related to validator private + keys from the top-level section of the configuration file to their own + designated `[priv-validator]` section. Although this change did not make any + difference to the meaning of those settings, it made the organization of the + file easier to understand, and allowed the names of the individual settings + to be simplified (e.g., `priv-validator-key-file` became simply `key-file` in + the new section). + + Although such changes are "gratuitous" with respect to the software, there is + often value in making things more legible for the humans. While there is no + simple rule to define the line, the Potter Stewart principle can be used with + due care. + +Keeping these examples in mind, we can and should take reasonable steps to +avoid churn in the configuration file across versions where we can. However, we +must also accept that part of the reason for _having_ a config file is to allow +us flexibility elsewhere in the design. On that basis, we should not attempt +to be too dogmatic about config changes either. Unlike changes in the block +protocol, for example, which affect every user of every network that adopts +them, config changes are relatively self-contained. + +There are few guiding principles I think we can use to strike a sensible +balance: + +1. **No gratuitous changes.** Aesthetic changes that do not enhance legibility, + avert confusion, or clarity documentation, should be entirely avoided. + +2. **Prefer mechanical changes.** Whenever it is practical, change settings in + a way that can be updated by a tool without operator judgement. This implies + finding safe, universal defaults for new settings, and not changing the + default values of existing settings. + + Even if that means we have to make multiple changes (e.g., add a new setting + in the current version, deprecate the old one, and remove the old one in the + next version) it's preferable if we can mechanize each step. + +3. **Clearly signal intent.** When adding temporary or experimental settings, + they should be clearly named and documented as such. Use long names and + suggestive prefixes (e.g., `experimental-*`) so that they stand out when + read in the config file or printed in logs. + + Relatedly, using temporary or experimental settings should cause the + software to emit diagnostic logs at runtime. These log messages should be + easy to grep for, and should contain pointers to more complete documentation + (say, issue numbers or URLs) that the operator can read, as well as a hint + about when the setting is expected to become invalid. For example: + + ``` + WARNING: Websocket RPC access is deprecated and will be removed in + Tendermint v0.37. See https://tinyurl.com/adr075 for more information. + ``` + +4. **Consider both directions.** When adding a configuration setting, take some + time during the implementation process to think about how the setting could + be removed, as well as how it will be rolled out. This applies even for + settings we imagine should be permanent. Experience may cause is to rethink + our original design intent more broadly than we expected. + + This does not mean we have to spend a long time picking nits over the design + of every setting; merely that we should convince ourselves we _could_ undo + it without making too big a mess later. Even a little extra effort up front + can sometimes save a lot. + +## References + +- [Tendermint `config` package][config-pkg] +- [`confix` command-line tool][confix] +- [`condiff` command-line tool][condiff] +- [Configuration update plan][plan] +- [ADR 075: RPC Event Subscription Interface][adr075] + +[config-pkg]: https://godoc.org/github.com/tendermint/tendermint/config +[confix]: https://github.com/tendermint/tendermint/blob/master/scripts/confix +[condiff]: https://github.com/tendermint/tendermint/blob/master/scripts/confix/condiff +[plan]: https://github.com/tendermint/tendermint/blob/master/scripts/confix/plan.go +[testdata]: https://github.com/tendermint/tendermint/blob/master/scripts/confix/testdata +[adr075]: https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-075-rpc-subscription.md + +## Appendix: Research Notes + +Discovering when various configuration settings were added, updated, and +removed turns out to be surprisingly tedious. To solve this puzzle, we had to +answer the following questions: + +1. What changes were made between v0.x and v0.y? This is further complicated by + cases where we have backported config changes into the middle of an earlier + release cycle (e.g., `psql-conn` from v0.35.x into v0.34.13). + +2. When during the development cycle were those changes made? This allows us to + recognize features that were backported into a previous release. + +3. What were the default values of the changed settings, and did they change at + all during or across the release boundary? + +Each step of the [configuration update plan][plan] is commented with a link to +one or more PRs where that change was made. The sections below discuss how we +found these references. + +### Tracking Changes Across Releases + +To figure out what changed between two releases, we built a tool called +[`condiff`][condiff], which performs a "keyspace" diff of two TOML documents. +This diff respects the structure of the TOML file, but ignores comments, blank +lines, and configuration values, so that we can see what was added and removed. + +To use it, run: + +```shell +go run ./scripts/confix/condiff old.toml new.toml +``` + +This tool works on any TOML documents, but for our purposes we needed +Tendermint `config.toml` files. The easiest way to get these is to build the +node binary for your version of interest, run `tendermint init` on a clean home +directory, and copy the generated config file out. The [`testdata`][testdata] +directory for the `confix` tool has configs generated from the heads of each +release branch from v0.31 through v0.35. + +If you want to reproduce this yourself, it looks something like this: + +```shell +# Example for Tendermint v0.32. +git checkout --track origin/v0.32.x +go get golang.org/x/sys/unix +go mod tidy +make build +rm -fr -- tmhome +./build/tendermint --home=tmhome init +cp tmhome/config/config.toml config-v32.toml +``` + +Be advised that the further back you go, the more idiosyncrasies you will +encounter. For example, Tendermint v0.31 and earlier predate Go modules (v0.31 +used dep), and lack backport branches. And you may need to do some editing of +Makefile rules once you get back into the 20s. + +Note that when diffing config files across the v0.34/v0.35 gap, the swap from +`snake_case` to `kebab-case` makes it look like everything changed. The +`condiff` tool has a `-desnake` flag that normalizes all the keys to kebab case +in both inputs before comparison. + +### Locating Additions and Deletions + +To figure out when a configuration setting was added or removed, your tool of +choice is `git bisect`. The only tricky part is finding the endpoints for the +search. If the transition happened within a release, you can use that +release's backport branch as the endpoint (if it has one, e.g., `v0.35.x`). + +However, the start point can be more problematic. The backport branches are not +ancestors of `master` or of each other, which means you need to find some point +in history _prior_ to the change but still attached to the mainline. For recent +releases there is a dev root (e.g., `v0.35.0-dev`, `v0.34.0-dev1`, etc.). These +are not named consistently, but you can usually grep the output of `git tag` to +find them. + +In the worst case you could try starting from the root commit of the repo, but +that turns out not to work in all cases. We've done some branching shenanigans +over the years that mean the root is not a direct ancestor of all our release +branches. When you find this you will probably swear a lot. I did. + +Once you have a start and end point (say, `v0.35.0-dev` and `master`), you can +bisect in the usual way. I use `git grep` on the `config` directory to check +whether the case I am looking for is present. For example, to find when the +`[fastsync]` section was removed: + +```shell +# Setup: +git checkout master +git bisect start +git bisect bad # it's not present on tip of master. +git bisect good v0.34.0-dev1 # it was present at the start of v0.34. +``` + +```shell +# Now repeat this until it gives you a specific commit: +if git grep -q '\[fastsync\]' config ; then git bisect good ; else git bisect bad ; fi +``` + +The above example finds where a config was removed: To find where a setting was +added, do the same thing except reverse the sense of the test (`if ! git grep -q +...`). diff --git a/docs/roadmap/roadmap.md b/docs/roadmap/roadmap.md index 19d9d89fbf..90274ca1d9 100644 --- a/docs/roadmap/roadmap.md +++ b/docs/roadmap/roadmap.md @@ -4,11 +4,11 @@ order: 1 # Tendermint Roadmap -*Last Updated: Friday 8 October 2021* +*Last Updated: Friday 4 February 2022* This document endeavours to inform the wider Tendermint community about development plans and priorities for Tendermint Core, and when we expect features to be delivered. It is intended to broadly inform all users of Tendermint, including application developers, node operators, integrators, and the engineering and research teams. -Anyone wishing to propose work to be a part of this roadmap should do so by opening an [issue](https://github.com/tendermint/spec/issues/new/choose) in the spec. Bug reports and other implementation concerns should be brought up in the [core repository](https://github.com/tendermint/tendermint). +Anyone wishing to propose work to be a part of this roadmap should do so by opening an [issue](https://github.com/tendermint/tendermint/issues/new/choose). Bug reports and other implementation concerns should be brought up in the [core repository](https://github.com/tendermint/tendermint). This roadmap should be read as a high-level guide to plans and priorities, rather than a commitment to schedules and deliverables. Features earlier on the roadmap will generally be more specific and detailed than those later on. We will update this document periodically to reflect the current status. @@ -43,38 +43,38 @@ Added a new `EventSink` interface to allow alternatives to Tendermint's propriet ### ABCI++ -An overhaul of the existing interface between the application and consensus, to give the application more control over block construction. ABCI++ adds new hooks allowing modification of transactions before they get into a block, verification of a block before voting, injection of signed information into votes, and more compact delivery of blocks after agreement (to allow for concurrent execution). [More](https://github.com/tendermint/spec/blob/master/rfc/004-abci%2B%2B.md) +An overhaul of the existing interface between the application and consensus, to give the application more control over block construction. ABCI++ adds new hooks allowing modification of transactions before they get into a block, verification of a block before voting, and complete delivery of blocks after agreement (to allow for concurrent execution). It enables both immediate and delayed agreement. [More](https://github.com/tendermint/tendermint/blob/master/spec/abci++/README.md) ### Proposer-Based Timestamps -Proposer-based timestamps are a replacement of [BFT time](https://docs.tendermint.com/master/spec/consensus/bft-time.html), whereby the proposer chooses a timestamp and validators vote on the block only if the timestamp is considered *timely*. This increases reliance on an accurate local clock, but in exchange makes block time more reliable and resistant to faults. This has important use cases in light clients, IBC relayers, CosmosHub inflation and enabling signature aggregation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-071-proposer-based-timestamps.md) +Proposer-based timestamps are a replacement of [BFT time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/bft-time.md), whereby the proposer chooses a timestamp and validators vote on the block only if the timestamp is considered *timely*. This increases reliance on an accurate local clock, but in exchange makes block time more reliable and resistant to faults. This has important use cases in light clients, IBC relayers, CosmosHub inflation and enabling signature aggregation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-071-proposer-based-timestamps.md) -### Soft Upgrades +### RPC Event Subscription -We are working on a suite of tools and patterns to make it easier for both node operators and application developers to quickly and safely upgrade to newer versions of Tendermint. [More](https://github.com/tendermint/spec/pull/222) +The websocket-based RPC event subscription API has been an ongoing pain point for users and operators of Tendermint. In this release, we are adding a new API for event subscription that will be more predictable and reliable for clients, easier to use, and reduce resource pressure for the consensus node. The existing API based on websockets will be kept as-is but deprecated, and we plan to remove it entirely in the following release. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-075-rpc-subscription.md) ### Minor Works - Remove the "legacy" P2P framework, and clean up of P2P package. [More](https://github.com/tendermint/tendermint/issues/5670) - Remove the global mutex from the local ABCI client to enable application-controlled concurrency. [More](https://github.com/tendermint/tendermint/issues/7073) -- Enable P2P support for light clients -- Node orchestration of services + Node initialization and composibility +- Improve life cycle management of a node and its reactors. - Remove redundancy in several data structures. Remove unused components such as the block sync v2 reactor, gRPC in the RPC layer, and the socket-based remote signer. -- Improve node visibility by introducing more metrics +- Improve node visibility through the introduction of more metrics +- Migrating locally configured consensus timeouts to global consensus parameters. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-074-timeout-params.md) ## V0.37 (expected Q3 2022) -### Complete P2P Refactor +### LibP2P Implementation -Finish the final phase of the P2P system. Ongoing research and planning is taking place to decide whether to adopt [libp2p](https://libp2p.io/), alternative transports to `MConn` such as [QUIC](https://en.wikipedia.org/wiki/QUIC) and handshake/authentication protocols such as [Noise](https://noiseprotocol.org/). Research into more advanced gossiping techniques. +Implement LibP2P to replace `mconnection` in sending and receiving messages across `Channel`s. Use LibP2P also for peer life cycle management and discovery. This aims to reduce the occurence of network thrashing and overall network traffic to provide a more stable networking layer. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-073-libp2p.md). -### Streamline Storage Engine +### Soft Upgrades -Tendermint currently has an abstraction to allow support for multiple database backends. This generality incurs maintenance overhead and interferes with application-specific optimizations that Tendermint could use (ACID guarantees, etc.). We plan to converge on a single database and streamline the Tendermint storage engine. [More](https://github.com/tendermint/tendermint/pull/6897) +We are working on a suite of tools and patterns to make it easier for both node operators and application developers to quickly and safely upgrade to newer versions of Tendermint. [More](https://github.com/tendermint/spec/pull/222) -### Evaluate Interprocess Communication +### Streamline Storage Engine -Tendermint nodes currently have multiple areas of communication with other processes (ABCI, remote-signer, P2P, JSONRPC, websockets, events as examples). Many of these have multiple implementations in which a single suffices. Consolidate and clean up IPC. [More](https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-002-ipc-ecosystem.md) +Tendermint currently has an abstraction to allow support for multiple database backends. This generality incurs maintenance overhead and interferes with application-specific optimizations that Tendermint could use (ACID guarantees, etc.). We plan to converge on a single database and streamline the Tendermint storage engine. [More](https://github.com/tendermint/tendermint/pull/6897) ### Minor Works @@ -82,6 +82,7 @@ Tendermint nodes currently have multiple areas of communication with other proce - Remove / Update Consensus WAL. [More](https://github.com/tendermint/tendermint/issues/6397) - Signature Aggregation. [More](https://github.com/tendermint/tendermint/issues/1319) - Remove gogoproto dependency. [More](https://github.com/tendermint/tendermint/issues/5446) +- Enable P2P support for light clients. [More](https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-010-p2p-light-client.rst) ## V1.0 (expected Q4 2022) @@ -91,6 +92,7 @@ Has the same feature set as V0.37 but with a focus towards testing, protocol cor - Improved block propagation with erasure coding and/or compact blocks. [More](https://github.com/tendermint/spec/issues/347) - Consensus engine refactor +- Fork accountability protocol - Bidirectional ABCI - Randomized Leader Election - ZK proofs / other cryptographic primitives diff --git a/docs/tendermint-core/README.md b/docs/tendermint-core/README.md index 2ac57d0154..d8af4a3d1c 100644 --- a/docs/tendermint-core/README.md +++ b/docs/tendermint-core/README.md @@ -1,7 +1,7 @@ --- order: 1 parent: - title: System + title: Understanding Tendermint order: 5 --- @@ -10,7 +10,6 @@ parent: This section dives into the internals of Go-Tendermint. - [Using Tendermint](./using-tendermint.md) -- [Running in Production](./running-in-production.md) - [Subscribing to events](./subscription.md) - [Block Structure](./block-structure.md) - [RPC](./rpc.md) @@ -19,7 +18,7 @@ This section dives into the internals of Go-Tendermint. - [Mempool](./mempool/README.md) - [Light Client](./light-client.md) - [Consensus](./consensus/README.md) -- [Peer Exachange (PEX)](./pex/README.md) +- [Peer Exchange (PEX)](./pex/README.md) - [Evidence](./evidence/README.md) For full specifications refer to the [spec repo](https://github.com/tendermint/spec). diff --git a/docs/tendermint-core/block-structure.md b/docs/tendermint-core/block-structure.md index 63c4862e1f..c30dbdc074 100644 --- a/docs/tendermint-core/block-structure.md +++ b/docs/tendermint-core/block-structure.md @@ -11,7 +11,7 @@ nodes. This blockchain is accessible via various RPC endpoints, mainly `/blockchain?minHeight=_&maxHeight=_` to get a list of headers. But what exactly is stored in these blocks? -The [specification](https://github.com/tendermint/spec/blob/8dd2ed4c6fe12459edeb9b783bdaaaeb590ec15c/spec/core/data_structures.md) contains a detailed description of each component - that's the best place to get started. +The [specification](https://github.com/tendermint/tendermint/tree/master/spec/core/data_structures.md) contains a detailed description of each component - that's the best place to get started. To dig deeper, check out the [types package documentation](https://godoc.org/github.com/tendermint/tendermint/types). diff --git a/docs/tendermint-core/block-sync/img/block-retention.png b/docs/tendermint-core/block-sync/img/block-retention.png new file mode 100644 index 0000000000..e013e1ab38 Binary files /dev/null and b/docs/tendermint-core/block-sync/img/block-retention.png differ diff --git a/docs/tendermint-core/consensus/README.md b/docs/tendermint-core/consensus/README.md index bd7def551f..1bf9662df2 100644 --- a/docs/tendermint-core/consensus/README.md +++ b/docs/tendermint-core/consensus/README.md @@ -23,7 +23,7 @@ explained in a forthcoming document. For efficiency reasons, validators in Tendermint consensus protocol do not agree directly on the block as the block size is big, i.e., they don't embed the block inside `Proposal` and `VoteMessage`. Instead, they reach agreement on the `BlockID` (see `BlockID` definition in -[Blockchain](https://github.com/tendermint/spec/blob/master/spec/core/data_structures.md#blockid) section) +[Blockchain](https://github.com/tendermint/tendermint/blob/master/spec/core/data_structures.md#blockid) section) that uniquely identifies each block. The block itself is disseminated to validator processes using peer-to-peer gossiping protocol. It starts by having a proposer first splitting a block into a number of block parts, that are then gossiped between diff --git a/docs/tendermint-core/consensus/proposer-based-timestamps.md b/docs/tendermint-core/consensus/proposer-based-timestamps.md new file mode 100644 index 0000000000..7f98f10d6b --- /dev/null +++ b/docs/tendermint-core/consensus/proposer-based-timestamps.md @@ -0,0 +1,95 @@ +--- +order: 3 +--- + +# PBTS + + This document provides an overview of the Proposer-Based Timestamp (PBTS) + algorithm added to Tendermint in the v0.36 release. It outlines the core + functionality as well as the parameters and constraints of the this algorithm. + +## Algorithm Overview + +The PBTS algorithm defines a way for a Tendermint blockchain to create block +timestamps that are within a reasonable bound of the clocks of the validators on +the network. This replaces the original BFTTime algorithm for timestamp +assignment that relied on the timestamps included in precommit messages. + +## Algorithm Parameters + +The functionality of the PBTS algorithm is governed by two parameters within +Tendermint. These two parameters are [consensus +parameters](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#L291), +meaning they are configured by the ABCI application and are expected to be the +same across all nodes on the network. + +### `Precision` + +The `Precision` parameter configures the acceptable upper-bound of clock drift +among all of the nodes on a Tendermint network. Any two nodes on a Tendermint +network are expected to have clocks that differ by at most `Precision` +milliseconds any given instant. + +### `MessageDelay` + +The `MessageDelay` parameter configures the acceptable upper-bound for +transmitting a `Proposal` message from the proposer to _all_ of the validators +on the network. + +Networks should choose as small a value for `MessageDelay` as is practical, +provided it is large enough that messages can reach all participants with high +probability given the number of participants and latency of their connections. + +## Algorithm Concepts + +### Block timestamps + +Each block produced by the Tendermint consensus engine contains a timestamp. +The timestamp produced in each block is a meaningful representation of time that is +useful for the protocols and applications built on top of Tendermint. + +The following protocols and application features require a reliable source of time: + +* Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/tendermint/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification. +* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/tendermint/blob/master/spec/consensus/evidence.md#verification). +* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21 + days](https://github.com/cosmos/governance/blob/master/params-change/Staking.md#unbondingtime). +* IBC packets can use either a [timestamp or a height to timeout packet + delivery](https://docs.cosmos.network/v0.44/ibc/overview.html#acknowledgements) + +### Proposer Selects a Block Timestamp + +When the proposer node creates a new block proposal, the node reads the time +from its local clock and uses this reading as the timestamp for the proposed +block. + +### Timeliness + +When each validator on a Tendermint network receives a proposed block, it +performs a series of checks to ensure that the block can be considered valid as +a candidate to be the next block in the chain. + +The PBTS algorithm performs a validity check on the timestamp of proposed +blocks. When a validator receives a proposal it ensures that the timestamp in +the proposal is within a bound of the validator's local clock. Specifically, the +algorithm checks that the timestamp is no more than `Precision` greater than the +node's local clock and no less than `Precision` + `MessageDelay` behind than the +node's local clock. This creates range of acceptable timestamps around the +node's local time. If the timestamp is within this range, the PBTS algorithm +considers the block **timely**. If a block is not **timely**, the node will +issue a `nil` `prevote` for this block, signaling to the rest of the network +that the node does not consider the block to be valid. + +### Clock Synchronization + +The PBTS algorithm requires the clocks of the validators on a Tendermint network +are within `Precision` of each other. In practice, this means that validators +should periodically synchronize to a reliable NTP server. Validators that drift +too far away from the rest of the network will no longer propose blocks with +valid timestamps. Additionally they will not view the timestamps of blocks +proposed by their peers to be valid either. + +## See Also + +* [The PBTS specification](https://github.com/tendermint/tendermint/blob/master/spec/consensus/proposer-based-timestamp/README.md) + contains all of the details of the algorithm. diff --git a/docs/tendermint-core/light-client.md b/docs/tendermint-core/light-client.md index eb20bd684b..daa1017dba 100644 --- a/docs/tendermint-core/light-client.md +++ b/docs/tendermint-core/light-client.md @@ -17,7 +17,7 @@ The light client protocol verifies headers by retrieving a chain of headers, commits and validator sets from a trusted height to the target height, verifying the signatures of each of these intermediary signed headers till it reaches the target height. From there, all the application state is verifiable with -[merkle proofs](https://github.com/tendermint/spec/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/encoding.md#iavl-tree). +[merkle proofs](https://github.com/tendermint/tendermint/blob/953523c3cb99fdb8c8f7a2d21e3a99094279e9de/spec/blockchain/encoding.md#iavl-tree). ## Properties @@ -38,7 +38,7 @@ a provider and a set of witnesses. This sets the trust period: the period that full nodes should be accountable for faulty behavior and a trust level: the fraction of validators in a validator set with which we trust that at least one is correct. As Tendermint consensus can withstand 1/3 byzantine faults, this is -the default trust level, however, for greater security you can increase it (max: +the default trust level, however, for greater security you can increase it (max: 1). Similar to a full node, light clients can also be subject to byzantine attacks. diff --git a/docs/tendermint-core/rpc.md b/docs/tendermint-core/rpc.md index 31371cb780..54f60ea04f 100644 --- a/docs/tendermint-core/rpc.md +++ b/docs/tendermint-core/rpc.md @@ -8,7 +8,7 @@ The RPC documentation is hosted here: - [https://docs.tendermint.com/master/rpc/](https://docs.tendermint.com/master/rpc/) -To update the documentation, edit the relevant `godoc` comments in the [rpc directory](https://github.com/tendermint/tendermint/blob/v0.35.x/rpc). +To update the documentation, edit the relevant `godoc` comments in the [rpc directory](https://github.com/tendermint/tendermint/tree/master/rpc). If you are using Tendermint in-process, you will need to set the version to be displayed in the RPC. diff --git a/docs/tendermint-core/subscription.md b/docs/tendermint-core/subscription.md index 1ab6828b6a..0f452c5633 100644 --- a/docs/tendermint-core/subscription.md +++ b/docs/tendermint-core/subscription.md @@ -22,12 +22,12 @@ method via Websocket along with a valid query. ```json { - "jsonrpc": "2.0", - "method": "subscribe", - "id": 0, - "params": { - "query": "tm.event='NewBlock'" - } + "jsonrpc": "2.0", + "method": "subscribe", + "id": 0, + "params": { + "query": "tm.event='NewBlock'" + } } ``` @@ -43,33 +43,33 @@ transactions](../app-dev/indexing-transactions.md) for details. When validator set changes, ValidatorSetUpdates event is published. The event carries a list of pubkey/power pairs. The list is the same Tendermint receives from ABCI application (see [EndBlock -section](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md#endblock) in +section](https://github.com/tendermint/tendermint/blob/master/spec/abci/abci.md#endblock) in the ABCI spec). Response: ```json { - "jsonrpc": "2.0", - "id": 0, - "result": { - "query": "tm.event='ValidatorSetUpdates'", - "data": { - "type": "tendermint/event/ValidatorSetUpdates", - "value": { - "validator_updates": [ - { - "address": "09EAD022FD25DE3A02E64B0FE9610B1417183EE4", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "ww0z4WaZ0Xg+YI10w43wTWbBmM3dpVza4mmSQYsd0ck=" - }, - "voting_power": "10", - "proposer_priority": "0" - } - ] - } - } + "jsonrpc": "2.0", + "id": 0, + "result": { + "query": "tm.event='ValidatorSetUpdates'", + "data": { + "type": "tendermint/event/ValidatorSetUpdates", + "value": { + "validator_updates": [ + { + "address": "09EAD022FD25DE3A02E64B0FE9610B1417183EE4", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "ww0z4WaZ0Xg+YI10w43wTWbBmM3dpVza4mmSQYsd0ck=" + }, + "voting_power": "10", + "proposer_priority": "0" + } + ] + } } + } } ``` diff --git a/docs/tendermint-core/using-tendermint.md b/docs/tendermint-core/using-tendermint.md index 466cf59ba3..dce6ae05cb 100644 --- a/docs/tendermint-core/using-tendermint.md +++ b/docs/tendermint-core/using-tendermint.md @@ -47,9 +47,9 @@ definition](https://github.com/tenderdash/tenderdash/blob/master/types/genesis.g - `chain_id`: ID of the blockchain. **This must be unique for every blockchain.** If your testnet blockchains do not have unique chain IDs, you will have a bad time. The ChainID must be less than 50 symbols. -- `initial_height`: Height at which Tenderdash should begin at. If a blockchain is conducting a network upgrade, +- `initial_height`: Height at which Tenderdash should begin. If a blockchain is conducting a network upgrade, starting from the stopped height brings uniqueness to previous heights. -- `consensus_params` [spec](https://github.com/tenderdash/spec/blob/master/spec/core/state.md#consensusparams) +- `consensus_params` [spec](https://github.com/tendermint/tendermint/blob/master/spec/core/state.md#consensusparams) - `block` - `max_bytes`: Max block size, in bytes. - `max_gas`: Max gas per block. diff --git a/docs/tools/README.md b/docs/tools/README.md index 3e87a2ea1f..b6e08162b5 100644 --- a/docs/tools/README.md +++ b/docs/tools/README.md @@ -12,7 +12,6 @@ Tendermint has some tools that are associated with it for: - [Debugging](./debugging/pro.md) - [Benchmarking](#benchmarking) - [Testnets](#testnets) -- [Validation of remote signers](./remote-signer-validation.md) ## Benchmarking @@ -27,3 +26,11 @@ testing Tendermint networks. This repository contains various different configurations of test networks for, and relating to, Tendermint. + +Use [Docker Compose](./docker-compose.md) to spin up Tendermint testnets on your +local machine. + +Use [Terraform and Ansible](./terraform-and-ansible.md) to deploy Tendermint +testnets to the cloud. + +See the `tendermint testnet --help` command for more help initializing testnets. diff --git a/docs/tools/debugging/proposer-based-timestamps-runbook.md b/docs/tools/debugging/proposer-based-timestamps-runbook.md new file mode 100644 index 0000000000..a817bd29eb --- /dev/null +++ b/docs/tools/debugging/proposer-based-timestamps-runbook.md @@ -0,0 +1,216 @@ +--- +order: 3 +--- + +# Proposer-Based Timestamps Runbook + +Version v0.36 of Tendermint added new constraints for the timestamps included in +each block created by Tendermint. The new constraints mean that validators may +fail to produce valid blocks or may issue `nil` `prevotes` for proposed blocks +depending on the configuration of the validator's local clock. + +## What is this document for? + +This document provides a set of actionable steps for application developers and +node operators to diagnose and fix issues related to clock synchronization and +configuration of the Proposer-Based Timestamps [SynchronyParams](https://github.com/tendermint/tendermint/blob/master/spec/core/data_structures.md#synchronyparams). + +Use this runbook if you observe that validators are frequently voting `nil` for a block that the rest +of the network votes for or if validators are frequently producing block proposals +that are not voted for by the rest of the network. + +## Requirements + +To use this runbook, you must be running a node that has the [Prometheus metrics endpoint enabled](https://github.com/tendermint/tendermint/blob/master/docs/nodes/metrics.md) +and the Tendermint RPC endpoint enabled and accessible. + +It is strongly recommended to also run a Prometheus metrics collector to gather and +analyze metrics from the Tendermint node. + +## Debugging a Single Node + +If you observe that a single validator is frequently failing to produce blocks or +voting nil for proposals that other validators vote for and suspect it may be +related to clock synchronization, use the following steps to debug and correct the issue. + +### Check Timely Metric + +Tendermint exposes a histogram metric for the difference between the timestamp in the proposal +the and the time read from the node's local clock when the proposal is received. + +The histogram exposes multiple metrics on the Prometheus `/metrics` endpoint called +* `tendermint_consensus_proposal_timestamp_difference_bucket`. +* `tendermint_consensus_proposal_timestamp_difference_sum`. +* `tendermint_consensus_proposal_timestamp_difference_count`. + +Each metric is also labeled with the key `is_timely`, which can have a value of +`true` or `false`. + +#### From the Prometheus Collector UI + +If you are running a Prometheus collector, navigate to the query web interface and select the 'Graph' tab. + +Issue a query for the following: + +``` +tendermint_consensus_proposal_timestamp_difference_count{is_timely="false"} / +tendermint_consensus_proposal_timestamp_difference_count{is_timely="true"} +``` + +This query will graph the ratio of proposals the node considered timely to those it +considered untimely. If the ratio is increasing, it means that your node is consistently +seeing more proposals that are far from its local clock. If this is the case, you should +check to make sure your local clock is properly synchronized to NTP. + +#### From the `/metrics` url + +If you are not running a Prometheus collector, navigate to the `/metrics` endpoint +exposed on the Prometheus metrics port with `curl` or a browser. + +Search for the `tendermint_consensus_proposal_timestamp_difference_count` metrics. +This metric is labeled with `is_timely`. Investigate the value of +`tendermint_consensus_proposal_timestamp_difference_count` where `is_timely="false"` +and where `is_timely="true"`. Refresh the endpoint and observe if the value of `is_timely="false"` +is growing. + +If you observe that `is_timely="false"` is growing, it means that your node is consistently +seeing proposals that are far from its local clock. If this is the case, you should check +to make sure your local clock is properly synchronized to NTP. + +### Checking Clock Sync + +NTP configuration and tooling is very specific to the operating system and distribution +that your validator node is running. This guide assumes you have `timedatectl` installed with +[chrony](https://chrony.tuxfamily.org/), a popular tool for interacting with time +synchronization on Linux distributions. If you are using an operating system or +distribution with a different time synchronization mechanism, please consult the +documentation for your operating system to check the status and re-synchronize the daemon. + +#### Check if NTP is Enabled + +```shell +$ timedatectl +``` + +From the output, ensure that `NTP service` is `active`. If `NTP service` is `inactive`, run: + +```shell +$ timedatectl set-ntp true +``` + +Re-run the `timedatectl` command and verify that the change has taken effect. + +#### Check if Your NTP Daemon is Synchronized + +Check the status of your local `chrony` NTP daemon using by running the following: + +```shell +$ chronyc tracking +``` + +If the `chrony` daemon is running, you will see output that indicates its current status. +If the `chrony` daemon is not running, restart it and re-run `chronyc tracking`. + +The `System time` field of the response should show a value that is much smaller than 100 +milliseconds. + +If the value is very large, restart the `chronyd` daemon. + +## Debugging a Network + +If you observe that a network is frequently failing to produce blocks and suspect +it may be related to clock synchronization, use the following steps to debug and correct the issue. + +### Check Prevote Message Delay + +Tendermint exposes metrics that help determine how synchronized the clocks on a network are. + +These metrics are visible on the Prometheus `/metrics` endpoint and are called: +* `tendermint_consensus_quorum_prevote_delay` +* `tendermint_consensus_full_prevote_delay` + +These metrics calculate the difference between the timestamp in the proposal message and +the timestamp of a prevote that was issued during consensus. + +The `tendermint_consensus_quorum_prevote_delay` metric is the interval in seconds +between the proposal timestamp and the timestamp of the earliest prevote that +achieved a quorum during the prevote step. + +The `tendermint_consensus_full_prevote_delay` metric is the interval in seconds +between the proposal timestamp and the timestamp of the latest prevote in a round +where 100% of the validators voted. + +#### From the Prometheus Collector UI + +If you are running a Prometheus collector, navigate to the query web interface and select the 'Graph' tab. + +Issue a query for the following: + +``` +sum(tendermint_consensus_quorum_prevote_delay) by (proposer_address) +``` + +This query will graph the difference in seconds for each proposer on the network. + +If the value is much larger for some proposers, then the issue is likely related to the clock +synchronization of their nodes. Contact those proposers and ensure that their nodes +are properly connected to NTP using the steps for [Debugging a Single Node](#debugging-a-single-node). + +If the value is relatively similar for all proposers you should next compare this +value to the `SynchronyParams` values for the network. Continue to the [Checking +Sychrony](#checking-synchrony) steps. + +#### From the `/metrics` url + +If you are not running a Prometheus collector, navigate to the `/metrics` endpoint +exposed on the Prometheus metrics port. + +Search for the `tendermint_consensus_quorum_prevote_delay` metric. There will be one +entry of this metric for each `proposer_address`. If the value of this metric is +much larger for some proposers, then the issue is likely related to synchronization of their +nodes with NTP. Contact those proposers and ensure that their nodes are properly connected +to NTP using the steps for [Debugging a Single Node](#debugging-a-single-node). + +If the values are relatively similar for all proposers you should next compare, +you'll need to compare this value to the `SynchronyParams` for the network. Continue +to the [Checking Sychrony](#checking-synchrony) steps. + +### Checking Synchrony + +To determine the currently configured `SynchronyParams` for your network, issue a +request to your node's RPC endpoint. For a node running locally with the RPC server +exposed on port `26657`, run the following command: + +```shell +$ curl localhost:26657/consensus_params +``` + +The json output will contain a field named `synchrony`, with the following structure: + +```json +{ + "precision": "500000000", + "message_delay": "3000000000" +} +``` + +The `precision` and `message_delay` values returned are listed in nanoseconds: +In the examples above, the precision is 500ms and the message delay is 3s. +Remember, `tendermint_consensus_quorum_prevote_delay` is listed in seconds. +If the `tendermint_consensus_quorum_prevote_delay` value approaches the sum of `precision` and `message_delay`, +then the value selected for these parameters is too small. Your application will +need to be modified to update the `SynchronyParams` to have larger values. + +### Updating SynchronyParams + +The `SynchronyParams` are `ConsensusParameters` which means they are set and updated +by the application running alongside Tendermint. Updates to these parameters must +be passed to the application during the `FinalizeBlock` ABCI method call. + +If the application was built using the CosmosSDK, then these parameters can be updated +programatically using a governance proposal. For more information, see the [CosmosSDK +documentation](https://hub.cosmos.network/main/governance/submitting.html#sending-the-transaction-that-submits-your-governance-proposal). + +If the application does not implement a way to update the consensus parameters +programatically, then the application itself must be updated to do so. More information on updating +the consensus parameters via ABCI can be found in the [FinalizeBlock documentation](https://github.com/tendermint/tendermint/blob/master/spec/abci++/abci++_methods_002_draft.md#finalizeblock). diff --git a/docs/networks/docker-compose.md b/docs/tools/docker-compose.md similarity index 100% rename from docs/networks/docker-compose.md rename to docs/tools/docker-compose.md diff --git a/docs/tools/remote-signer-validation.md b/docs/tools/remote-signer-validation.md deleted file mode 100644 index ab55008602..0000000000 --- a/docs/tools/remote-signer-validation.md +++ /dev/null @@ -1,156 +0,0 @@ -# Remote Signer - -Located under the `tools/tm-signer-harness` folder in the [Tendermint -repository](https://github.com/tendermint/tendermint). - -The Tendermint remote signer test harness facilitates integration testing -between Tendermint and remote signers such as -[tkkms](https://github.com/iqlusioninc/tmkms). Such remote signers allow for signing -of important Tendermint messages using -[HSMs](https://en.wikipedia.org/wiki/Hardware_security_module), providing -additional security. - -When executed, `tm-signer-harness`: - -1. Runs a listener (either TCP or Unix sockets). -2. Waits for a connection from the remote signer. -3. Upon connection from the remote signer, executes a number of automated tests - to ensure compatibility. -4. Upon successful validation, the harness process exits with a 0 exit code. - Upon validation failure, it exits with a particular exit code related to the - error. - -## Prerequisites - -Requires the same prerequisites as for building -[Tendermint](https://github.com/tendermint/tendermint). - -## Building - -From the `tools/tm-signer-harness` directory in your Tendermint source -repository, simply run: - -```bash -make - -# To have global access to this executable -make install -``` - -## Docker Image - -To build a Docker image containing the `tm-signer-harness`, also from the -`tools/tm-signer-harness` directory of your Tendermint source repo, simply run: - -```bash -make docker-image -``` - -## Running against KMS - -As an example of how to use `tm-signer-harness`, the following instructions show -you how to execute its tests against [tkkms](https://github.com/iqlusioninc/tmkms). -For this example, we will make use of the **software signing module in KMS**, as -the hardware signing module requires a physical -[YubiHSM](https://www.yubico.com/products/yubihsm/) device. - -### Step 1: Install KMS on your local machine - -See the [tkkms repo](https://github.com/iqlusioninc/tmkms) for details on how to set -KMS up on your local machine. - -If you have [Rust](https://www.rust-lang.org/) installed on your local machine, -you can simply install KMS by: - -```bash -cargo install tmkms -``` - -### Step 2: Make keys for KMS - -The KMS software signing module needs a key with which to sign messages. In our -example, we will simply export a signing key from our local Tendermint instance. - -```bash -# Will generate all necessary Tendermint configuration files, including: -# - ~/.tendermint/config/priv_validator_key.json -# - ~/.tendermint/data/priv_validator_state.json -tendermint init validator - -# Extract the signing key from our local Tendermint instance -tm-signer-harness extract_key \ # Use the "extract_key" command - -tmhome ~/.tendermint \ # Where to find the Tendermint home directory - -output ./signing.key # Where to write the key -``` - -Also, because we want KMS to connect to `tm-signer-harness`, we will need to -provide a secret connection key from KMS' side: - -```bash -tmkms keygen secret_connection.key -``` - -### Step 3: Configure and run KMS - -KMS needs some configuration to tell it to use the softer signing module as well -as the `signing.key` file we just generated. Save the following to a file called -`tmkms.toml`: - -```toml -[[validator]] -addr = "tcp://127.0.0.1:61219" # This is where we will find tm-signer-harness. -chain_id = "test-chain-0XwP5E" # The Tendermint chain ID for which KMS will be signing (found in ~/.tendermint/config/genesis.json). -reconnect = true # true is the default -secret_key = "./secret_connection.key" # Where to find our secret connection key. - -[[providers.softsign]] -id = "test-chain-0XwP5E" # The Tendermint chain ID for which KMS will be signing (same as validator.chain_id above). -path = "./signing.key" # The signing key we extracted earlier. -``` - -Then run KMS with this configuration: - -```bash -tmkms start -c tmkms.toml -``` - -This will start KMS, which will repeatedly try to connect to -`tcp://127.0.0.1:61219` until it is successful. - -### Step 4: Run tm-signer-harness - -Now we get to run the signer test harness: - -```bash -tm-signer-harness run \ # The "run" command executes the tests - -addr tcp://127.0.0.1:61219 \ # The address we promised KMS earlier - -tmhome ~/.tendermint # Where to find our Tendermint configuration/data files. -``` - -If the current version of Tendermint and KMS are compatible, `tm-signer-harness` -should now exit with a 0 exit code. If they are somehow not compatible, it -should exit with a meaningful non-zero exit code (see the exit codes below). - -### Step 5: Shut down KMS - -Simply hit Ctrl+Break on your KMS instance (or use the `kill` command in Linux) -to terminate it gracefully. - -## Exit Code Meanings - -The following list shows the various exit codes from `tm-signer-harness` and -their meanings: - -| Exit Code | Description | -| --- | --- | -| 0 | Success! | -| 1 | Invalid command line parameters supplied to `tm-signer-harness` | -| 2 | Maximum number of accept retries reached (the `-accept-retries` parameter) | -| 3 | Failed to load `${TMHOME}/config/genesis.json` | -| 4 | Failed to create listener specified by `-addr` parameter | -| 5 | Failed to start listener | -| 6 | Interrupted by `SIGINT` (e.g. when hitting Ctrl+Break or Ctrl+C) | -| 7 | Other unknown error | -| 8 | Test 1 failed: public key mismatch | -| 9 | Test 2 failed: signing of proposals failed | -| 10 | Test 3 failed: signing of votes failed | diff --git a/docs/networks/terraform-and-ansible.md b/docs/tools/terraform-and-ansible.md similarity index 99% rename from docs/networks/terraform-and-ansible.md rename to docs/tools/terraform-and-ansible.md index 6efbf3751e..78e45652c2 100644 --- a/docs/networks/terraform-and-ansible.md +++ b/docs/tools/terraform-and-ansible.md @@ -164,7 +164,7 @@ page](https://app.logz.io/#/dashboard/data-sources/Filebeat), then: yum install systemd-devel || echo "This will only work on RHEL-based systems." apt-get install libsystemd-dev || echo "This will only work on Debian-based systems." -go get github.com/mheese/journalbeat +go install github.com/mheese/journalbeat@latest ansible-playbook -i inventory/digital_ocean.py -l sentrynet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 ``` diff --git a/docs/tutorials/go-built-in.md b/docs/tutorials/go-built-in.md index 48da74b0cc..be018b7605 100644 --- a/docs/tutorials/go-built-in.md +++ b/docs/tutorials/go-built-in.md @@ -11,14 +11,14 @@ Core application from scratch. It does not assume that you have any prior experience with Tendermint Core. Tendermint Core is a service that provides a Byzantine fault tolerant consensus engine -for state-machine replication. The replicated state-machine, or "application", can be written +for state-machine replication. The replicated state-machine, or "application", can be written in any language that can send and receive protocol buffer messages. This tutorial is written for Go and uses Tendermint as a library, but applications not written in Go can use Tendermint to drive state-machine replication in a client-server model. -This tutorial expects some understanding of the Go programming language. +This tutorial expects some understanding of the Go programming language. If you have never written Go, you may want to go through [Learn X in Y minutes Where X=Go](https://learnxinyminutes.com/docs/go/) first to familiarize yourself with the syntax. @@ -26,9 +26,15 @@ yourself with the syntax. By following along with this guide, you'll create a Tendermint Core application called kvstore, a (very) simple distributed BFT key-value store. -> Note: please use a released version of Tendermint with this guide. The guides will work with the latest released version. -> Be aware that they may not apply to unreleased changes on master. -> We strongly advise against using unreleased commits for your development. +> Note: please use a released version of Tendermint with this guide. The guides will work with the latest version. Please, do not use master. + +## Built-in app vs external app + +Running your application inside the same process as Tendermint Core will give +you the best possible performance. + +For other languages, your application have to communicate with Tendermint Core +through a TCP, Unix domain socket or gRPC. ## 1.1 Installing Go @@ -79,11 +85,11 @@ Hello, Tendermint Core ## 1.3 Writing a Tendermint Core application Tendermint Core communicates with an application through the Application -BlockChain Interface (ABCI) protocol. All of the message types Tendermint uses for +BlockChain Interface (ABCI) protocol. All of the message types Tendermint uses for communicating with the application can be found in the ABCI [protobuf file](https://github.com/tendermint/spec/blob/b695d30aae69933bc0e630da14949207d18ae02c/proto/tendermint/abci/types.proto). -We will begin by creating the basic scaffolding for an ABCI application in +We will begin by creating the basic scaffolding for an ABCI application in a new `app.go` file. The first step is to create a new type, `KVStoreApplication` with methods that implement the abci `Application` interface. @@ -158,17 +164,17 @@ func (app *KVStoreApplication) ApplySnapshotChunk(abcitypes.RequestApplySnapshot Our application will need to write its state out to persistent storage so that it can stop and start without losing all of its data. -For this tutorial, we will use [BadgerDB](https://github.com/dgraph-io/badger). -Badger is a fast embedded key-value store. +For this tutorial, we will use [BadgerDB](https://github.com/dgraph-io/badger). +Badger is a fast embedded key-value store. First, add Badger as a dependency of your go module using the `go get` command: `go get github.com/dgraph-io/badger/v3` -Next, let's update the application and its constructor to receive a handle to the -database. +Next, let's update the application and its constructor to receive a handle to the +database. -Update the application struct as follows: +Update the application struct as follows: ```go type KVStoreApplication struct { @@ -237,10 +243,15 @@ information on why the transaction was rejected. Note that `CheckTx` _does not execute_ the transaction, it only verifies that that the transaction _could_ be executed. We do not know yet if the rest of the network has -agreed to accept this transaction into a block. +agreed to accept this transaction into a block. + +Valid transactions will eventually be committed given they are not too big and +have enough gas. To learn more about gas, check out ["the +specification"](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#gas). -Finally, make sure to add the `bytes` package to the your import stanza -at the top of `app.go`: +For the underlying key-value store we'll use +[badger](https://github.com/dgraph-io/badger), which is an embeddable, +persistent and fast key-value (KV) database. ```go import( @@ -268,7 +279,7 @@ receive a block. `EndBlock` is called once to indicate to the application that no more transactions will be delivered to the application. -To implement these calls in our application we're going to make use of Badger's +To implement these calls in our application we're going to make use of Badger's transaction mechanism. Bagder uses the term _transaction_ in the context of databases, be careful not to confuse it with _blockchain transactions_. @@ -374,23 +385,15 @@ func (app *KVStoreApplication) Query(req abcitypes.RequestQuery) abcitypes.Respo } ``` -## 1.3.4 Additional Methods - -You'll notice that we left several methods unchanged. Specifically, we have yet -to implement the `Info` and `InitChain` methods and we did not implement -any of the `*Snapthot` methods. These methods are all important for running Tendermint -applications in production but are not required for getting a very simple application -up and running. - -To better understand these methods and why they are useful, check out the Tendermint -[specification on ABCI](https://github.com/tendermint/spec/tree/20b2abb5f9a83c2d9d97b53e555e4ea5a6bd7dc4/spec/abci). +The complete specification can be found +[here](https://github.com/tendermint/tendermint/tree/master/spec/abci/). ## 1.4 Starting an application and a Tendermint Core instance in the same process Now that we have the basic functionality of our application in place, let's put it all together inside of our `main.go` file. -Add the following code to your `main.go` file: +Add the following code to your `main.go` file: ```go package main @@ -554,7 +557,7 @@ Finally, we start the node: The additional logic at the end of the file allows the program to catch `SIGTERM`. This means that the node can shutdown gracefully when an operator tries to kill the program: -```go +```go ... c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) @@ -583,14 +586,14 @@ From the root of your project, run: go run github.com/tendermint/tendermint/cmd/tendermint@v0.35.0 init validator --home ./tendermint-home ``` -Next, build the application: +Next, build the application: ```bash go build -mod=mod -o my-app # use -mod=mod to automatically update go.sum ``` Everything is now in place to run your application. -Run: +Run: ```bash $ rm -rf /tmp/example $ TMHOME="/tmp/example" tenderdash init validator @@ -617,9 +620,18 @@ Open another terminal window and run the following curl command: ```bash $ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' +{ + "check_tx": { + "gasWanted": "1", + ... + }, + "deliver_tx": { ... }, + "hash": "1B3C5A1093DB952C331B1749A21DCCBB0F6C7F4E0055CD04D16346472FC60EC6", + "height": "128" +} ``` -If everything went well, you should see a response indicating which height the +If everything went well, you should see a response indicating which height the transaction was included in the blockchain. Finally, let's make sure that transaction really was persisted by the application. @@ -628,20 +640,23 @@ Run the following command: ```bash $ curl -s 'localhost:26657/abci_query?data="tendermint"' -``` - -Let's examine the response object that this request returns. -The request returns a `json` object with a `key` and `value` field set. - -```json -... - "key": "dGVuZGVybWludA==", - "value": "cm9ja3M=", -... +{ + "response": { + "code": 0, + "log": "exists", + "info": "", + "index": "0", + "key": "dGVuZGVybWludA==", + "value": "cm9ja3M=", + "proofOps": null, + "height": "6", + "codespace": "" + } +} ``` Those values don't look like the `key` and `value` we sent to Tendermint, -what's going on here? +what's going on here? The response contain a `base64` encoded representation of the data we submitted. To get the original value out of this data, we can use the `base64` command line utility. diff --git a/docs/tutorials/go.md b/docs/tutorials/go.md index 56fc0dc1c1..aa47918c77 100644 --- a/docs/tutorials/go.md +++ b/docs/tutorials/go.md @@ -210,7 +210,7 @@ etc.) by Tendermint Core. Valid transactions will eventually be committed given they are not too big and have enough gas. To learn more about gas, check out ["the -specification"](https://docs.tendermint.com/master/spec/abci/apps.html#gas). +specification"](https://github.com/tendermint/tendermint/blob/master/spec/abci/apps.md#gas). For the underlying key-value store we'll use [badger](https://github.com/dgraph-io/badger), which is an embeddable, @@ -328,7 +328,7 @@ func (app *KVStoreApplication) Query(reqQuery abcitypes.RequestQuery) (resQuery ``` The complete specification can be found -[here](https://docs.tendermint.com/master/spec/abci/). +[here](https://github.com/tendermint/tendermint/tree/master/spec/abci/). ## 1.4 Starting an application and a Tendermint Core instances @@ -367,7 +367,11 @@ func main() { flag.Parse() - logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) + logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) + if err != nil { + fmt.Fprintf(os.Stderr, "failed to configure logger: %v", err) + os.Exit(1) + } server := abciserver.NewSocketServer(socketAddr, app) server.SetLogger(logger) @@ -380,7 +384,6 @@ func main() { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) <-c - os.Exit(0) } ``` @@ -421,7 +424,6 @@ defer server.Stop() c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) <-c -os.Exit(0) ``` ## 1.5 Getting Up and Running @@ -521,17 +523,15 @@ I[2019-07-16|18:26:20.330] Accepted a new connection Now open another tab in your terminal and try sending a transaction: ```json -curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' +$ curl -s 'localhost:26657/broadcast_tx_commit?tx="tendermint=rocks"' { - "jsonrpc": "2.0", - "id": "", - "result": { - "check_tx": { - "gasWanted": "1" - }, - "deliver_tx": {}, - "hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB", - "height": "33" + "check_tx": { + "gasWanted": "1", + ... + }, + "deliver_tx": { ... }, + "hash": "CDD3C6DFA0A08CAEDF546F9938A2EEC232209C24AA0E4201194E0AFB78A2C2BB", + "height": "33" } ``` @@ -540,16 +540,18 @@ Response should contain the height where this transaction was committed. Now let's check if the given key now exists and its value: ```json -curl -s 'localhost:26657/abci_query?data="tendermint"' +$ curl -s 'localhost:26657/abci_query?data="tendermint"' { - "jsonrpc": "2.0", - "id": "", - "result": { - "response": { - "log": "exists", - "key": "dGVuZGVybWludA==", - "value": "cm9ja3My" - } + "response": { + "code": 0, + "log": "exists", + "info": "", + "index": "0", + "key": "dGVuZGVybWludA==", + "value": "cm9ja3M=", + "proofOps": null, + "height": "6", + "codespace": "" } } ``` diff --git a/docs/tutorials/readme.md b/docs/tutorials/readme.md index a60fba3492..0216df800d 100644 --- a/docs/tutorials/readme.md +++ b/docs/tutorials/readme.md @@ -4,4 +4,4 @@ parent: order: 2 --- -# Guides +# Tutorials diff --git a/go.mod b/go.mod index 946f1f03d9..92981cf04d 100644 --- a/go.mod +++ b/go.mod @@ -3,122 +3,116 @@ module github.com/tendermint/tendermint go 1.17 require ( - github.com/BurntSushi/toml v1.0.0 + github.com/BurntSushi/toml v1.1.0 github.com/Microsoft/go-winio v0.5.2 // indirect - github.com/Workiva/go-datastructures v1.0.53 github.com/adlio/schema v1.3.0 - github.com/btcsuite/btcd v0.22.0-beta + github.com/btcsuite/btcd v0.22.1 github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce github.com/containerd/continuity v0.2.2 // indirect github.com/dashevo/dashd-go v0.23.4 github.com/dashevo/dashd-go/btcec/v2 v2.0.6 // indirect github.com/dashpay/bls-signatures/go-bindings v0.0.0-20201127091120-745324b80143 - github.com/dgraph-io/badger/v2 v2.2007.4 // indirect - github.com/dgraph-io/ristretto v0.1.0 // indirect - github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/didip/tollbooth/v6 v6.1.2 // indirect - github.com/didip/tollbooth_chi v0.0.0-20200828173446-a7173453ea21 // indirect - github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c // indirect - github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect - github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect github.com/fortytw2/leaktest v1.3.0 - github.com/go-chi/chi v4.1.2+incompatible // indirect github.com/go-kit/kit v0.12.0 - github.com/go-pkgz/jrpc v0.2.0 - github.com/go-pkgz/rest v1.13.0 // indirect github.com/gogo/protobuf v1.3.2 - github.com/golang/glog v1.0.0 // indirect github.com/golang/protobuf v1.5.2 - github.com/golang/snappy v0.0.4 // indirect - github.com/golangci/golangci-lint v1.44.2 - github.com/google/btree v1.0.1 // indirect github.com/google/orderedcode v0.0.1 github.com/google/uuid v1.3.0 github.com/gorilla/websocket v1.5.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-multierror v1.1.1 - github.com/klauspost/compress v1.15.1 // indirect - github.com/lib/pq v1.10.4 + github.com/lib/pq v1.10.5 github.com/libp2p/go-buffer-pool v0.0.2 - github.com/magiconair/properties v1.8.6 // indirect - github.com/minio/highwayhash v1.0.2 github.com/mroth/weightedrand v0.4.1 - github.com/oasisprotocol/curve25519-voi v0.0.0-20220328075252-7dd334e3daae - github.com/opencontainers/runc v1.1.1 // indirect + github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b github.com/ory/dockertest v3.3.5+incompatible - github.com/petermattis/goid v0.0.0-20220302125637-5f11c28912df // indirect github.com/prometheus/client_golang v1.12.1 - github.com/prometheus/common v0.33.0 // indirect - github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/rs/cors v1.8.2 github.com/rs/zerolog v1.26.1 - github.com/sasha-s/go-deadlock v0.3.1 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa - github.com/spf13/afero v1.8.2 // indirect github.com/spf13/cobra v1.4.0 - github.com/spf13/viper v1.10.1 - github.com/stretchr/objx v0.3.0 // indirect + github.com/spf13/viper v1.11.0 github.com/stretchr/testify v1.7.1 github.com/tendermint/tm-db v0.6.6 - github.com/vektra/mockery/v2 v2.10.0 - golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064 - golang.org/x/net v0.0.0-20220325170049-de3da57026de + golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 + golang.org/x/net v0.0.0-20220412020605-290c469a71a5 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f // indirect - golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 // indirect - google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7 // indirect - google.golang.org/grpc v1.45.0 + google.golang.org/grpc v1.46.0 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect - gopkg.in/ini.v1 v1.66.4 // indirect pgregory.net/rapid v0.4.7 ) +require ( + github.com/creachadair/atomicfile v0.2.5 + github.com/creachadair/taskgroup v0.3.2 + github.com/go-pkgz/jrpc v0.2.0 + github.com/golangci/golangci-lint v1.45.2 + github.com/google/go-cmp v0.5.8 + github.com/vektra/mockery/v2 v2.12.1 + gotest.tools v2.2.0+incompatible +) + +require ( + github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect + github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect + github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect + github.com/dashevo/dashd-go/btcutil v1.1.1 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect + github.com/didip/tollbooth/v6 v6.0.1 // indirect + github.com/didip/tollbooth_chi v0.0.0-20200524181329-8b84cd7183d9 // indirect + github.com/go-chi/chi v4.1.1+incompatible // indirect + github.com/go-chi/render v1.0.1 // indirect + github.com/go-pkgz/expirable-cache v0.0.3 // indirect + github.com/go-pkgz/rest v1.5.0 // indirect + github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect +) + require ( 4d63.com/gochecknoglobals v0.1.0 // indirect github.com/Antonboom/errname v0.1.5 // indirect github.com/Antonboom/nilnil v0.1.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/DataDog/zstd v1.4.1 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect github.com/Masterminds/semver v1.5.0 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect github.com/OpenPeeDeeP/depguard v1.1.0 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/ashanbrown/forbidigo v1.3.0 // indirect - github.com/ashanbrown/makezero v1.1.0 // indirect + github.com/ashanbrown/makezero v1.1.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bkielbasa/cyclop v1.2.0 // indirect - github.com/blizzy78/varnamelen v0.6.0 // indirect + github.com/blizzy78/varnamelen v0.6.1 // indirect github.com/bombsimon/wsl/v3 v3.3.0 // indirect github.com/breml/bidichk v0.2.2 // indirect github.com/breml/errchkjson v0.2.3 // indirect - github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect - github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect - github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect github.com/butuzov/ireturn v0.1.1 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/charithe/durationcheck v0.0.9 // indirect github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af // indirect - github.com/daixiang0/gci v0.3.1-0.20220208004058-76d765e3ab48 // indirect - github.com/dashevo/dashd-go/btcutil v1.1.1 // indirect + github.com/creachadair/tomledit v0.0.19 + github.com/daixiang0/gci v0.3.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect - github.com/denis-tingajkin/go-header v0.4.2 // indirect + github.com/denis-tingaikin/go-header v0.4.3 // indirect + github.com/dgraph-io/badger/v2 v2.2007.2 // indirect + github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect + github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect github.com/docker/go-connections v0.4.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/esimonov/ifshort v1.0.4 // indirect github.com/ettle/strcase v0.1.1 // indirect + github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect + github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect + github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/fatih/color v1.13.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/fzipp/gocyclo v0.4.0 // indirect - github.com/go-chi/render v1.0.1 // indirect github.com/go-critic/go-critic v0.6.2 // indirect - github.com/go-pkgz/expirable-cache v0.0.3 // indirect github.com/go-toolsmith/astcast v1.0.0 // indirect github.com/go-toolsmith/astcopy v1.0.0 // indirect github.com/go-toolsmith/astequal v1.0.1 // indirect @@ -129,6 +123,7 @@ require ( github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.8.1 // indirect + github.com/golang/snappy v0.0.3 // indirect github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 // indirect @@ -138,12 +133,15 @@ require ( github.com/golangci/misspell v0.3.5 // indirect github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 // indirect github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect - github.com/google/go-cmp v0.5.7 // indirect + github.com/google/btree v1.0.0 // indirect github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect github.com/gostaticanalysis/nilerr v0.1.1 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 + github.com/hashicorp/go-version v1.4.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect @@ -160,6 +158,7 @@ require ( github.com/ldez/gomoddirectives v0.2.2 // indirect github.com/ldez/tagliatelle v0.3.1 // indirect github.com/leonklingele/grouper v1.1.0 // indirect + github.com/magiconair/properties v1.8.6 // indirect github.com/maratori/testpackage v1.0.1 // indirect github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect github.com/mattn/go-colorable v0.1.12 // indirect @@ -167,7 +166,6 @@ require ( github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/mbilski/exhaustivestruct v1.2.0 // indirect - github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 // indirect github.com/mgechev/revive v1.1.4 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect @@ -179,12 +177,14 @@ require ( github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/opencontainers/runc v1.0.3 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polyfloyd/go-errorlint v0.0.0-20211125173453-6d6d39c5bb8b // indirect github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/quasilyte/go-ruleguard v0.3.15 // indirect github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3 // indirect @@ -192,17 +192,19 @@ require ( github.com/ryancurrah/gomodguard v1.2.3 // indirect github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect - github.com/securego/gosec/v2 v2.9.6 // indirect + github.com/securego/gosec/v2 v2.10.0 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect github.com/sirupsen/logrus v1.8.1 // indirect - github.com/sivchari/containedctx v1.0.1 // indirect + github.com/sivchari/containedctx v1.0.2 // indirect github.com/sivchari/tenv v1.4.7 // indirect github.com/sonatard/noctx v0.0.1 // indirect github.com/sourcegraph/go-diff v0.6.1 // indirect + github.com/spf13/afero v1.8.2 // indirect github.com/spf13/cast v1.4.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/ssgreg/nlreturn/v2 v2.2.1 // indirect + github.com/stretchr/objx v0.1.1 // indirect github.com/subosito/gotenv v1.2.0 // indirect github.com/sylvia7788/contextcheck v1.0.4 // indirect github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca // indirect @@ -210,7 +212,7 @@ require ( github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect github.com/tetafro/godot v1.4.11 // indirect github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect - github.com/tomarrell/wrapcheck/v2 v2.4.0 // indirect + github.com/tomarrell/wrapcheck/v2 v2.5.0 // indirect github.com/tommy-muehle/go-mnd/v2 v2.5.0 // indirect github.com/ultraware/funlen v0.0.3 // indirect github.com/ultraware/whitespace v0.0.5 // indirect @@ -219,16 +221,19 @@ require ( github.com/yeya24/promlinter v0.1.1-0.20210918184747-d757024714a1 // indirect gitlab.com/bosi/decorder v0.2.1 // indirect go.etcd.io/bbolt v1.3.6 // indirect - golang.org/x/mod v0.5.1 // indirect + golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect + golang.org/x/sys v0.0.0-20220412211240-33da011f77ad // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect - golang.org/x/tools v0.1.9 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + golang.org/x/tools v0.1.10 // indirect + golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect + google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac // indirect google.golang.org/protobuf v1.28.0 // indirect + gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect honnef.co/go/tools v0.2.2 // indirect - mvdan.cc/gofumpt v0.2.1 // indirect + mvdan.cc/gofumpt v0.3.0 // indirect mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect mvdan.cc/unparam v0.0.0-20211214103731-d0ef000c54e5 // indirect diff --git a/go.sum b/go.sum index 5c8c32076d..379c8612b5 100644 --- a/go.sum +++ b/go.sum @@ -34,12 +34,16 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= @@ -68,12 +72,14 @@ github.com/Azure/azure-sdk-for-go/sdk/internal v0.7.0/go.mod h1:yqy467j36fJxcRV2 github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.1.0 h1:ksErzDEI1khOiGPgpwuI7x2ebx/uXQNw7xJpn9Eq1+I= +github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= @@ -99,8 +105,6 @@ github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWX github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig= -github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= github.com/adlio/schema v1.3.0 h1:eSVYLxYWbm/6ReZBCkLw4Fz7uqC+ZNoPvA39bOwi52A= github.com/adlio/schema v1.3.0/go.mod h1:51QzxkpeFs6lRY11kPye26IaFPOV+HqEj01t5aXXKfs= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= @@ -127,8 +131,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/ashanbrown/forbidigo v1.3.0 h1:VkYIwb/xxdireGAdJNZoo24O4lmnEWkactplBlWTShc= github.com/ashanbrown/forbidigo v1.3.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= -github.com/ashanbrown/makezero v1.1.0 h1:b2FVq4dTlBpy9f6qxhbyWH+6zy56IETE9cFbBGtDqs8= -github.com/ashanbrown/makezero v1.1.0/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= +github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= +github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= @@ -145,8 +149,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= -github.com/blizzy78/varnamelen v0.6.0 h1:TOIDk9qRIMspALZKX8x+5hQfAjuvAFogppnxtvuNmBo= -github.com/blizzy78/varnamelen v0.6.0/go.mod h1:zy2Eic4qWqjrxa60jG34cfL0VXcSwzUrIx68eJPb4Q8= +github.com/blizzy78/varnamelen v0.6.1 h1:kttPCLzXFa+0nt++Cw9fb7GrSSM4KkyIAoX/vXsbuqA= +github.com/blizzy78/varnamelen v0.6.1/go.mod h1:zy2Eic4qWqjrxa60jG34cfL0VXcSwzUrIx68eJPb4Q8= github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM= github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= github.com/breml/bidichk v0.2.2 h1:w7QXnpH0eCBJm55zGCTJveZEkQBt6Fs5zThIdA6qQ9Y= @@ -154,8 +158,10 @@ github.com/breml/bidichk v0.2.2/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt7 github.com/breml/errchkjson v0.2.3 h1:97eGTmR/w0paL2SwfRPI1jaAZHaH/fXnxWTw2eEIqE0= github.com/breml/errchkjson v0.2.3/go.mod h1:jZEATw/jF69cL1iy7//Yih8yp/mXp2CBoBr9GJwCAsY= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.22.0-beta h1:LTDpDKUM5EeOFBPM8IXpinEcmZ6FWfNZbE3lfrfdnWo= -github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA= +github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= +github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= @@ -188,12 +194,10 @@ github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6pr github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af h1:spmv8nSH9h5oCQf40jt/ufBCt9j0/58u4G+rkeMqXGI= github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= -github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= @@ -210,7 +214,6 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= github.com/containerd/continuity v0.2.1/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= github.com/containerd/continuity v0.2.2 h1:QSqfxcn8c+12slxwu00AtzXrsami0MJb/MQs9lOLHLA= github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= @@ -229,12 +232,17 @@ github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwc github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creachadair/atomicfile v0.2.5 h1:wkOlpsjyJOvJ3Hd8juHKdirJnCSIPacvtY21/3nYjAo= +github.com/creachadair/atomicfile v0.2.5/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc= +github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= +github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= +github.com/creachadair/tomledit v0.0.19 h1:zbpfUtYFYFdpRjwJY9HJlto1iZ4M5YwYB6qqc37F6UM= +github.com/creachadair/tomledit v0.0.19/go.mod h1:gvtfnSZLa+YNQD28vaPq0Nk12bRxEhmUdBzAWn+EGF4= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/daixiang0/gci v0.3.1-0.20220208004058-76d765e3ab48 h1:9rJGqaC5do9zkvKrtRdx0HJoxj7Jd6vDa0O2eBU0AbU= -github.com/daixiang0/gci v0.3.1-0.20220208004058-76d765e3ab48/go.mod h1:jaASoJmv/ykO9dAAPy31iJnreV19248qKDdVWf3QgC4= +github.com/daixiang0/gci v0.3.3 h1:55xJKH7Gl9Vk6oQ1cMkwrDWjAkT1D+D1G9kNmRcAIY4= +github.com/daixiang0/gci v0.3.3/go.mod h1:1Xr2bxnQbDxCqqulUOv8qpGqkgRw9RSCGGjEC2LjF8o= github.com/dashevo/dashd-go v0.23.0-test.7/go.mod h1:vc45V5rc4L4uA8ccXwyj6uuAcXQ0sdrZZiqKCkAhpps= github.com/dashevo/dashd-go v0.23.0-test.8/go.mod h1:OLSRGjMkJbTVHVDDaAYOJ0ronCLDBe7AV02BzHo40VE= github.com/dashevo/dashd-go v0.23.2/go.mod h1:GaTY1dpsl+KkfQwW6APnMim9YUx78XiyDIwn3aVN4Rk= @@ -260,27 +268,22 @@ github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/denis-tingajkin/go-header v0.4.2 h1:jEeSF4sdv8/3cT/WY8AgDHUoItNSoEZ7qg9dX7pc218= -github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= +github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= +github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= github.com/denisenkom/go-mssqldb v0.12.0 h1:VtrkII767ttSPNRfFekePK3sctr+joXgO58stqQbtUA= github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU= +github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= -github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= -github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= -github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/didip/tollbooth/v6 v6.0.1 h1:QvLvRpB1G2bzKvkRze0muMUBlGN9H1z7tJ4DH4ypWOU= github.com/didip/tollbooth/v6 v6.0.1/go.mod h1:j2pKs+JQ5PvU/K4jFnrnwntrmfUbYLJE5oSdxR37FD0= -github.com/didip/tollbooth/v6 v6.1.2 h1:Kdqxmqw9YTv0uKajBUiWQg+GURL/k4vy9gmLCL01PjQ= -github.com/didip/tollbooth/v6 v6.1.2/go.mod h1:xjcse6CTHCLuOkzsWrEgdy9WPJFv+p/x6v+MyfP+O9s= +github.com/didip/tollbooth_chi v0.0.0-20200524181329-8b84cd7183d9 h1:gTh8fKuI/yLqQtZEPlDX3ZGsiTPZIe0ADHsxXSbwO1I= github.com/didip/tollbooth_chi v0.0.0-20200524181329-8b84cd7183d9/go.mod h1:YWyIfq3y4ArRfWZ9XksmuusP+7Mad+T0iFZ0kv0XG/M= -github.com/didip/tollbooth_chi v0.0.0-20200828173446-a7173453ea21 h1:x7YpwKSBIBcKe9I3aTNOqgSyJ6QKDdtOxnEkxBTsi9w= -github.com/didip/tollbooth_chi v0.0.0-20200828173446-a7173453ea21/go.mod h1:0ZVa6kSzS011nfTC1rELyxK4tjVf6vqBnOv7oY2KlsA= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= @@ -302,6 +305,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= @@ -309,12 +313,12 @@ github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStB github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= +github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQDg5gKsWoLBOB0n+ZW8s599zru8FJ2/Y= +github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= @@ -329,8 +333,8 @@ github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= -github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= +github.com/frankban/quicktest v1.14.2 h1:SPb1KFFmM+ybpEjPUhCCkZOM5xlovT5UbrMvWnXyBns= +github.com/frankban/quicktest v1.14.2/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= @@ -339,9 +343,8 @@ github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3n github.com/fzipp/gocyclo v0.4.0 h1:IykTnjwh2YLyYkGa0y92iTTEQcnyAz0r9zOo15EbJ7k= github.com/fzipp/gocyclo v0.4.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-chi/chi v4.1.1+incompatible h1:MmTgB0R8Bt/jccxp+t6S/1VGIKdJw5J74CK/c9tTfA4= github.com/go-chi/chi v4.1.1+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= -github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= -github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-chi/render v1.0.1 h1:4/5tis2cKaNdnv9zFLfXzcquC9HbeZgCnxGnKrltBS8= github.com/go-chi/render v1.0.1/go.mod h1:pq4Rr7HbnsdaeHagklXub+p6Wd16Af5l9koip1OvJns= github.com/go-critic/go-critic v0.6.2 h1:L5SDut1N4ZfsWZY0sH4DCrsHLHnhuuWak2wa165t9gs= @@ -364,9 +367,8 @@ github.com/go-pkgz/expirable-cache v0.0.3 h1:rTh6qNPp78z0bQE6HDhXBHUwqnV9i09Vm6d github.com/go-pkgz/expirable-cache v0.0.3/go.mod h1:+IauqN00R2FqNRLCLA+X5YljQJrwB179PfiAoMPlTlQ= github.com/go-pkgz/jrpc v0.2.0 h1:CLy/eZyekjraVrxZV18N2R1mYLMJ/nWrgdfyIOGPY/E= github.com/go-pkgz/jrpc v0.2.0/go.mod h1:wd8vtQ4CgtCnuqua6x2b1SKIgv0VSOh5Dn0uUITbiUE= +github.com/go-pkgz/rest v1.5.0 h1:C8SxXcXza4GiUUAn/95iCkvoIrGbS30qpwK19iqlrWQ= github.com/go-pkgz/rest v1.5.0/go.mod h1:nQaM3RhSTUAmbBZWY4hfe4buyeC9VckvhoCktiQXJxI= -github.com/go-pkgz/rest v1.13.0 h1:1larroBzcOYC5ySdrCLaXIEG1SGNnPborxm6uK+2RLQ= -github.com/go-pkgz/rest v1.13.0/go.mod h1:KUWAqbDteYGS/CiXftomQsKjtEOifXsJ36Ka0skYbmk= github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -397,7 +399,6 @@ github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -413,8 +414,6 @@ github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 h1:+eHOFJl1BaXrQ github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -451,9 +450,8 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= @@ -462,8 +460,8 @@ github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZB github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.44.2 h1:MzvkDt1j1OHkv42/feNJVNNXRFACPp7aAWBWDo5aYQw= -github.com/golangci/golangci-lint v1.44.2/go.mod h1:KjBgkLvsTWDkhfu12iCrv0gwL1kON5KNhbyjQ6qN7Jo= +github.com/golangci/golangci-lint v1.45.2 h1:9I3PzkvscJkFAQpTQi5Ga0V4qWdJERajX1UZ7QqkW+I= +github.com/golangci/golangci-lint v1.45.2/go.mod h1:f20dpzMmUTRp+oYnX0OGjV1Au3Jm2JeI9yLqHq1/xsI= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= @@ -475,9 +473,8 @@ github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2/go.mod h1:LK+zW4M github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -492,8 +489,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -530,6 +528,8 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -580,15 +580,15 @@ github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/ github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -602,8 +602,9 @@ github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerX github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI= github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.4.0 h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4= +github.com/hashicorp/go-version v1.4.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= @@ -616,6 +617,7 @@ github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hashicorp/serf v0.9.7/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -675,12 +677,9 @@ github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/kkdai/bstream v1.0.0/go.mod h1:FDnDOHt5Yx4p3FaHcioFT0QjDOtgUpvjeZqAs+NVZZA= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= -github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -712,8 +711,9 @@ github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.5 h1:J+gdV2cUmX7ZqL2B0lFcW0m+egaHC2V3lpO8nWxyYiQ= +github.com/lib/pq v1.10.5/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= @@ -756,7 +756,6 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517 h1:zpIH83+oKzcpryru8ceC6BxnoG8TBrhgAvRg8obzup0= github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= github.com/mgechev/revive v1.1.4 h1:sZOjY6GU35Kr9jKa/wsKSHgrFz8eASIB5i3tqWZMp0A= github.com/mgechev/revive v1.1.4/go.mod h1:ZZq2bmyssGh8MSPz3VVziqRNIMYTJXzP8MUKG90vZ9A= @@ -768,7 +767,6 @@ github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0 github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -784,7 +782,6 @@ github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= @@ -824,8 +821,8 @@ github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oasisprotocol/curve25519-voi v0.0.0-20220328075252-7dd334e3daae h1:7smdlrfdcZic4VfsGKD2ulWL804a4GVphr4s7WZxGiY= -github.com/oasisprotocol/curve25519-voi v0.0.0-20220328075252-7dd334e3daae/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s= +github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b h1:MKwruh+HeCSKWphkxuzvRzU4QzDkg7yiPkDVV0cDFgI= +github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b/go.mod h1:TLJifjWF6eotcfzDjKZsDqWJ+73Uvj/N85MvVyrvynM= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -840,26 +837,26 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k= github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.1.1 h1:PJ9DSs2sVwE0iVr++pAHE6QkS9tzcVWozlPifdwMgrU= -github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= -github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= @@ -876,14 +873,12 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.0-beta.8 h1:dy81yyLYJDwMTifq24Oi/IslOslRrDSb3jwDggjz3Z0= +github.com/pelletier/go-toml/v2 v2.0.0-beta.8/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= -github.com/petermattis/goid v0.0.0-20220302125637-5f11c28912df h1:/B1Q9E4W1cmiwPQfC2vymWL7FXHCEsUzg8Rywl5avtQ= -github.com/petermattis/goid v0.0.0-20220302125637-5f11c28912df/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= -github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= @@ -922,9 +917,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.33.0 h1:rHgav/0a6+uYgGdNt3jwz8FNSesO/Hsang3O0T9A5SE= -github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -950,8 +944,6 @@ github.com/quasilyte/gogrep v0.0.0-20220103110004-ffaa07af02e3/go.mod h1:wSEyW6O github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -975,19 +967,17 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM= +github.com/sagikazarmark/crypt v0.5.0/go.mod h1:l+nzl7KWh51rpzp2h7t4MZWyiEWdhNpOAnclKvg+mdA= github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= -github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= -github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/securego/gosec/v2 v2.9.6 h1:ysfvgQBp2zmTgXQl65UkqEkYlQGbnVSRUGpCrJiiR4c= -github.com/securego/gosec/v2 v2.9.6/go.mod h1:EESY9Ywxo/Zc5NyF/qIj6Cop+4PSWM0F0OfGD7FdIXc= +github.com/securego/gosec/v2 v2.10.0 h1:l6BET4EzWtyUXCpY2v7N92v0DDCas0L7ngg3bpqbr8g= +github.com/securego/gosec/v2 v2.10.0/go.mod h1:PVq8Ewh/nCN8l/kKC6zrGXSr7m2NmEK6ITIAWMtIaA0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.22.1/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= +github.com/shirou/gopsutil/v3 v3.22.2/go.mod h1:WapW1AOOPlHyXr+yOyw3uYx36enocrtSoSBy0L5vUHY= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -997,8 +987,8 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sivchari/containedctx v1.0.1 h1:fJq44cX+tD+uT5xGrsg25GwiaY61NGybQk9WWKij3Uo= -github.com/sivchari/containedctx v1.0.1/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw= +github.com/sivchari/containedctx v1.0.2 h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI= +github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw= github.com/sivchari/tenv v1.4.7 h1:FdTpgRlTue5eb5nXIYgS/lyVXSjugU8UUVDwhP1NLU8= github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -1040,17 +1030,17 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= -github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk= github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU= +github.com/spf13/viper v1.11.0 h1:7OX/1FS6n7jHD1zGrZTM7WtY13ZELRyosK4k93oPr44= +github.com/spf13/viper v1.11.0/go.mod h1:djo0X/bA5+tYVoCn+C7cAYJGcVn/qYLFTG8gdUsX7Zk= github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0= github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.3.0 h1:NGXK3lHquSN08v5vWalVI/L8XU9hdzE/G6xsrze47As= -github.com/stretchr/objx v0.3.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -1082,18 +1072,16 @@ github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro= github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.4.0 h1:mU4H9KsqqPZUALOUbVOpjy8qNQbWLoLI9fV68/1tq30= -github.com/tomarrell/wrapcheck/v2 v2.4.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2OgfoG8bLp9ZyoBfyY= +github.com/tomarrell/wrapcheck/v2 v2.5.0 h1:g27SGGHNoQdvHz4KZA9o4v09RcWzylR+b1yueE5ECiw= +github.com/tomarrell/wrapcheck/v2 v2.5.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2OgfoG8bLp9ZyoBfyY= github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s= github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= @@ -1110,8 +1098,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= -github.com/vektra/mockery/v2 v2.10.0 h1:MiiQWxwdq7/ET6dCXLaJzSGEN17k758H7JHS9kOdiks= -github.com/vektra/mockery/v2 v2.10.0/go.mod h1:m/WO2UzWzqgVX3nvqpRQq70I4Z7jbSCRhdmkgtp+Ab4= +github.com/vektra/mockery/v2 v2.12.1 h1:BAJk2fGjVg/P9Fi+BxZD1/ZeKTOclpeAb/SKCc12zXc= +github.com/vektra/mockery/v2 v2.12.1/go.mod h1:8vf4KDDUptfkyypzdHLuE7OE2xA7Gdt60WgIS8PgD+U= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= @@ -1143,10 +1131,13 @@ go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= +go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1196,12 +1187,13 @@ golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5 golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064 h1:S25/rfnfsMVgORT4/J61MJ7rdyseOZOyvLIrZEZ7s6s= -golang.org/x/crypto v0.0.0-20220321153916-2c7772ba3064/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA= +golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1243,8 +1235,9 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1308,8 +1301,9 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de h1:pZB1TWnKi+o4bENlbzAgLrEbY4RMYmUIRobMcSmfeYc= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5 h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1328,6 +1322,8 @@ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1431,16 +1427,13 @@ golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1448,9 +1441,12 @@ golang.org/x/sys v0.0.0-20211213223007-03aa0b5f6827/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220325203850-36772127a21f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f h1:rlezHXNlxYWvBCzNses9Dlc7nGFaNMJeqLolcmQSSZY= -golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad h1:ntjMns5wyP/fN65tdBD4g8J5w8n015+iIIs9rtjXkY0= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= @@ -1470,9 +1466,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 h1:M73Iuj3xbbb9Uk1DYhzydthsj6oOd6l9bpuFcNoUvTs= -golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1550,7 +1545,6 @@ golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1573,13 +1567,15 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.9 h1:j9KsMiaP1c3B0OTQGth0/k+miLGTgLsAFUCrF2vLcF8= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f h1:GGU+dLjvlC3qDwqYgL6UgRmHXhOOgns0bZu2Ty5mm6U= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= @@ -1618,6 +1614,10 @@ google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUb google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1698,8 +1698,16 @@ google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7 h1:HOL66YCI20JvN2hVk6o2YIp9i/3RvzVUz82PqNr7fXw= -google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac h1:qSNTkEN+L2mvWcLgJOR+8bdHX9rN/IdU3A1Ghpfb1Rg= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1733,8 +1741,10 @@ google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9K google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1797,8 +1807,8 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.2.2 h1:MNh1AVMyVX23VUHE2O27jm6lNj3vjO5DexS4A1xvnzk= honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= -mvdan.cc/gofumpt v0.2.1 h1:7jakRGkQcLAJdT+C8Bwc9d0BANkVPSkHZkzNv07pJAs= -mvdan.cc/gofumpt v0.2.1/go.mod h1:a/rvZPhsNaedOJBzqRD9omnwVwHZsBdJirXHa9Gh9Ig= +mvdan.cc/gofumpt v0.3.0 h1:kTojdZo9AcEYbQYhGuLf/zszYthRdhDNDUi2JKTxas4= +mvdan.cc/gofumpt v0.3.0/go.mod h1:0+VyGZWleeIj5oostkOex+nDBA0eyavuDnDusAJ8ylo= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= diff --git a/internal/blocksync/doc.go b/internal/blocksync/doc.go index 3111130e46..5f84b1261c 100644 --- a/internal/blocksync/doc.go +++ b/internal/blocksync/doc.go @@ -13,14 +13,9 @@ will no longer blocksync and thus no longer run the blocksync process. Note, the blocksync reactor Service gossips entire block and relevant data such that each receiving peer may construct the entire view of the blocksync state. -There are currently two versions of the blocksync reactor Service: - -- v0: The initial implementation that is battle-tested, but whose test coverage - is lacking and is not formally verifiable. -- v2: The latest implementation that has much higher test coverage and is formally - verified. However, the current implementation of v2 is not as battle-tested and - is known to have various bugs that could make it unreliable in production - environments. +There is currently only one version of the blocksync reactor Service +that is battle-tested, but whose test coverage is lacking and is not +formally verified. The v0 blocksync reactor Service has one p2p channel, BlockchainChannel. This channel is responsible for handling messages that both request blocks and respond diff --git a/internal/blocksync/v0/pool.go b/internal/blocksync/pool.go similarity index 84% rename from internal/blocksync/v0/pool.go rename to internal/blocksync/pool.go index b3704f3333..f00a2fab5b 100644 --- a/internal/blocksync/v0/pool.go +++ b/internal/blocksync/pool.go @@ -1,14 +1,15 @@ -package v0 +package blocksync import ( + "context" "errors" "fmt" "math" + "sync" "sync/atomic" "time" - flow "github.com/tendermint/tendermint/internal/libs/flowrate" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/libs/flowrate" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/types" @@ -27,7 +28,7 @@ eg, L = latency = 0.1s */ const ( - requestIntervalMS = 2 + requestInterval = 2 * time.Millisecond maxTotalRequesters = 600 maxPeerErrBuffer = 1000 maxPendingRequests = maxTotalRequesters @@ -68,9 +69,11 @@ type BlockRequest struct { // BlockPool keeps track of the block sync peers, block requests and block responses. type BlockPool struct { service.BaseService + logger log.Logger + lastAdvance time.Time - mtx tmsync.RWMutex + mtx sync.RWMutex // block requests requesters map[int64]*bpRequester height int64 // the lowest key in requesters. @@ -91,55 +94,59 @@ type BlockPool struct { // NewBlockPool returns a new BlockPool with the height equal to start. Block // requests and errors will be sent to requestsCh and errorsCh accordingly. -func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool { - bp := &BlockPool{ - peers: make(map[types.NodeID]*bpPeer), - - requesters: make(map[int64]*bpRequester), - height: start, - startHeight: start, - numPending: 0, +func NewBlockPool( + logger log.Logger, + start int64, + requestsCh chan<- BlockRequest, + errorsCh chan<- peerError, +) *BlockPool { + bp := &BlockPool{ + logger: logger, + peers: make(map[types.NodeID]*bpPeer), + requesters: make(map[int64]*bpRequester), + height: start, + startHeight: start, + numPending: 0, requestsCh: requestsCh, errorsCh: errorsCh, lastSyncRate: 0, } - bp.BaseService = *service.NewBaseService(nil, "BlockPool", bp) + bp.BaseService = *service.NewBaseService(logger, "BlockPool", bp) return bp } // OnStart implements service.Service by spawning requesters routine and recording // pool's start time. -func (pool *BlockPool) OnStart() error { +func (pool *BlockPool) OnStart(ctx context.Context) error { pool.lastAdvance = time.Now() pool.lastHundredBlockTimeStamp = pool.lastAdvance - go pool.makeRequestersRoutine() + go pool.makeRequestersRoutine(ctx) + return nil } +func (*BlockPool) OnStop() {} + // spawns requesters as needed -func (pool *BlockPool) makeRequestersRoutine() { - for { - if !pool.IsRunning() { - break +func (pool *BlockPool) makeRequestersRoutine(ctx context.Context) { + for pool.IsRunning() { + if ctx.Err() != nil { + return } _, numPending, lenRequesters := pool.GetStatus() - switch { - case numPending >= maxPendingRequests: - // sleep for a bit. - time.Sleep(requestIntervalMS * time.Millisecond) - // check for timed out peers - pool.removeTimedoutPeers() - case lenRequesters >= maxTotalRequesters: - // sleep for a bit. - time.Sleep(requestIntervalMS * time.Millisecond) - // check for timed out peers + if numPending >= maxPendingRequests || lenRequesters >= maxTotalRequesters { + // This is preferable to using a timer because the request interval + // is so small. Larger request intervals may necessitate using a + // timer/ticker. + time.Sleep(requestInterval) pool.removeTimedoutPeers() - default: - // request for more blocks. - pool.makeNextRequester() + continue } + + // request for more blocks. + pool.makeNextRequester(ctx) } } @@ -150,12 +157,12 @@ func (pool *BlockPool) removeTimedoutPeers() { for _, peer := range pool.peers { // check if peer timed out if !peer.didTimeout && peer.numPending > 0 { - curRate := peer.recvMonitor.Status().CurRate + curRate := peer.recvMonitor.CurrentTransferRate() // curRate can be 0 on start if curRate != 0 && curRate < minRecvRate { err := errors.New("peer is not sending us data fast enough") pool.sendError(err, peer.id) - pool.Logger.Error("SendTimeout", "peer", peer.id, + pool.logger.Error("SendTimeout", "peer", peer.id, "reason", err, "curRate", fmt.Sprintf("%d KB/s", curRate/1024), "minRate", fmt.Sprintf("%d KB/s", minRecvRate/1024)) @@ -217,9 +224,7 @@ func (pool *BlockPool) PopRequest() { defer pool.mtx.Unlock() if r := pool.requesters[pool.height]; r != nil { - if err := r.Stop(); err != nil { - pool.Logger.Error("Error stopping requester", "err", err) - } + r.Stop() delete(pool.requesters, pool.height) pool.height++ pool.lastAdvance = time.Now() @@ -265,7 +270,7 @@ func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSi requester := pool.requesters[block.Height] if requester == nil { - pool.Logger.Error("peer sent us a block we didn't expect", + pool.logger.Error("peer sent us a block we didn't expect", "peer", peerID, "curHeight", pool.height, "blockHeight", block.Height) diff := pool.height - block.Height if diff < 0 { @@ -285,7 +290,7 @@ func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSi } } else { err := errors.New("requester is different or block already exists") - pool.Logger.Error(err.Error(), "peer", peerID, "requester", requester.getPeerID(), "blockHeight", block.Height) + pool.logger.Error(err.Error(), "peer", peerID, "requester", requester.getPeerID(), "blockHeight", block.Height) pool.sendError(err, peerID) } } @@ -315,8 +320,16 @@ func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int6 peer.base = base peer.height = height } else { - peer = newBPPeer(pool, peerID, base, height) - peer.setLogger(pool.Logger.With("peer", peerID)) + peer = &bpPeer{ + pool: pool, + id: peerID, + base: base, + height: height, + numPending: 0, + logger: pool.logger.With("peer", peerID), + startAt: time.Now(), + } + pool.peers[peerID] = peer } @@ -391,7 +404,7 @@ func (pool *BlockPool) pickIncrAvailablePeer(height int64) *bpPeer { return nil } -func (pool *BlockPool) makeNextRequester() { +func (pool *BlockPool) makeNextRequester(ctx context.Context) { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -400,14 +413,14 @@ func (pool *BlockPool) makeNextRequester() { return } - request := newBPRequester(pool, nextHeight) + request := newBPRequester(pool.logger, pool, nextHeight) pool.requesters[nextHeight] = request atomic.AddInt32(&pool.numPending, 1) - err := request.Start() + err := request.Start(ctx) if err != nil { - request.Logger.Error("Error starting request", "err", err) + request.logger.Error("error starting request", "err", err) } } @@ -471,31 +484,16 @@ type bpPeer struct { base int64 pool *BlockPool id types.NodeID - recvMonitor *flow.Monitor + recvMonitor *flowrate.Monitor timeout *time.Timer + startAt time.Time logger log.Logger } -func newBPPeer(pool *BlockPool, peerID types.NodeID, base int64, height int64) *bpPeer { - peer := &bpPeer{ - pool: pool, - id: peerID, - base: base, - height: height, - numPending: 0, - logger: log.NewNopLogger(), - } - return peer -} - -func (peer *bpPeer) setLogger(l log.Logger) { - peer.logger = l -} - func (peer *bpPeer) resetMonitor() { - peer.recvMonitor = flow.New(time.Second, time.Second*40) + peer.recvMonitor = flowrate.New(peer.startAt, time.Second, time.Second*40) initialValue := float64(minRecvRate) * math.E peer.recvMonitor.SetREMA(initialValue) } @@ -540,18 +538,20 @@ func (peer *bpPeer) onTimeout() { type bpRequester struct { service.BaseService + logger log.Logger pool *BlockPool height int64 gotBlockCh chan struct{} redoCh chan types.NodeID // redo may send multitime, add peerId to identify repeat - mtx tmsync.Mutex + mtx sync.Mutex peerID types.NodeID block *types.Block } -func newBPRequester(pool *BlockPool, height int64) *bpRequester { +func newBPRequester(logger log.Logger, pool *BlockPool, height int64) *bpRequester { bpr := &bpRequester{ + logger: pool.logger, pool: pool, height: height, gotBlockCh: make(chan struct{}, 1), @@ -560,15 +560,17 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester { peerID: "", block: nil, } - bpr.BaseService = *service.NewBaseService(nil, "bpRequester", bpr) + bpr.BaseService = *service.NewBaseService(logger, "bpRequester", bpr) return bpr } -func (bpr *bpRequester) OnStart() error { - go bpr.requestRoutine() +func (bpr *bpRequester) OnStart(ctx context.Context) error { + go bpr.requestRoutine(ctx) return nil } +func (*bpRequester) OnStop() {} + // Returns true if the peer matches and block doesn't already exist. func (bpr *bpRequester) setBlock(block *types.Block, peerID types.NodeID) bool { bpr.mtx.Lock() @@ -623,7 +625,7 @@ func (bpr *bpRequester) redo(peerID types.NodeID) { // Responsible for making more requests as necessary // Returns only when a block is found (e.g. AddBlock() is called) -func (bpr *bpRequester) requestRoutine() { +func (bpr *bpRequester) requestRoutine(ctx context.Context) { OUTER_LOOP: for { // Pick a peer to send request to. @@ -633,9 +635,16 @@ OUTER_LOOP: if !bpr.IsRunning() || !bpr.pool.IsRunning() { return } + if ctx.Err() != nil { + return + } + peer = bpr.pool.pickIncrAvailablePeer(bpr.height) if peer == nil { - time.Sleep(requestIntervalMS * time.Millisecond) + // This is preferable to using a timer because the request + // interval is so small. Larger request intervals may + // necessitate using a timer/ticker. + time.Sleep(requestInterval) continue PICK_PEER_LOOP } break PICK_PEER_LOOP @@ -649,12 +658,7 @@ OUTER_LOOP: WAIT_LOOP: for { select { - case <-bpr.pool.Quit(): - if err := bpr.Stop(); err != nil { - bpr.Logger.Error("Error stopped requester", "err", err) - } - return - case <-bpr.Quit(): + case <-ctx.Done(): return case peerID := <-bpr.redoCh: if peerID == bpr.peerID { diff --git a/internal/blocksync/v0/pool_test.go b/internal/blocksync/pool_test.go similarity index 85% rename from internal/blocksync/v0/pool_test.go rename to internal/blocksync/pool_test.go index 67617d2b75..1cb8cca40c 100644 --- a/internal/blocksync/v0/pool_test.go +++ b/internal/blocksync/pool_test.go @@ -1,6 +1,7 @@ -package v0 +package blocksync import ( + "context" "fmt" mrand "math/rand" "testing" @@ -78,23 +79,20 @@ func makePeers(numPeers int, minHeight, maxHeight int64) testPeers { } func TestBlockPoolBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + start := int64(42) peers := makePeers(10, start+1, 1000) errorsCh := make(chan peerError, 1000) requestsCh := make(chan BlockRequest, 1000) - pool := NewBlockPool(start, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) + pool := NewBlockPool(log.NewNopLogger(), start, requestsCh, errorsCh) - err := pool.Start() - if err != nil { + if err := pool.Start(ctx); err != nil { t.Error(err) } - t.Cleanup(func() { - if err := pool.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); pool.Wait() }) peers.start() defer peers.stop() @@ -127,7 +125,6 @@ func TestBlockPoolBasic(t *testing.T) { case err := <-errorsCh: t.Error(err) case request := <-requestsCh: - t.Logf("Pulled new BlockRequest %v", request) if request.Height == 300 { return // Done! } @@ -138,25 +135,21 @@ func TestBlockPoolBasic(t *testing.T) { } func TestBlockPoolTimeout(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + start := int64(42) peers := makePeers(10, start+1, 1000) errorsCh := make(chan peerError, 1000) requestsCh := make(chan BlockRequest, 1000) - pool := NewBlockPool(start, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) - err := pool.Start() + pool := NewBlockPool(logger, start, requestsCh, errorsCh) + err := pool.Start(ctx) if err != nil { t.Error(err) } - t.Cleanup(func() { - if err := pool.Stop(); err != nil { - t.Error(err) - } - }) - - for _, peer := range peers { - t.Logf("Peer %v", peer.id) - } + t.Cleanup(func() { cancel(); pool.Wait() }) // Introduce each peer. go func() { @@ -186,7 +179,6 @@ func TestBlockPoolTimeout(t *testing.T) { for { select { case err := <-errorsCh: - t.Log(err) // consider error to be always timeout here if _, ok := timedOut[err.peerID]; !ok { counter++ @@ -195,12 +187,17 @@ func TestBlockPoolTimeout(t *testing.T) { } } case request := <-requestsCh: - t.Logf("Pulled new BlockRequest %+v", request) + logger.Debug("received request", + "counter", counter, + "request", request) } } } func TestBlockPoolRemovePeer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peers := make(testPeers, 10) for i := 0; i < 10; i++ { peerID := types.NodeID(fmt.Sprintf("%d", i+1)) @@ -210,15 +207,10 @@ func TestBlockPoolRemovePeer(t *testing.T) { requestsCh := make(chan BlockRequest) errorsCh := make(chan peerError) - pool := NewBlockPool(1, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) - err := pool.Start() + pool := NewBlockPool(log.NewNopLogger(), 1, requestsCh, errorsCh) + err := pool.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := pool.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); pool.Wait() }) // add peers for peerID, peer := range peers { diff --git a/internal/blocksync/v0/reactor.go b/internal/blocksync/reactor.go similarity index 56% rename from internal/blocksync/v0/reactor.go rename to internal/blocksync/reactor.go index 53c2724670..39b23ba9b0 100644 --- a/internal/blocksync/v0/reactor.go +++ b/internal/blocksync/reactor.go @@ -1,48 +1,27 @@ -package v0 +package blocksync import ( + "context" + "errors" "fmt" "runtime/debug" - "sync" + "sync/atomic" "time" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/blocksync" "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/p2p/conn" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - tmsync "github.com/tendermint/tendermint/libs/sync" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" "github.com/tendermint/tendermint/types" ) -var ( - _ service.Service = (*Reactor)(nil) - - // ChannelShims contains a map of ChannelDescriptorShim objects, where each - // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding - // p2p proto.Message the new p2p Channel is responsible for handling. - // - // - // TODO: Remove once p2p refactor is complete. - // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - BlockSyncChannel: { - MsgType: new(bcproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(BlockSyncChannel), - Priority: 5, - SendQueueCapacity: 1000, - RecvBufferCapacity: 1024, - RecvMessageCapacity: blocksync.MaxMsgSize, - MaxSendBytes: 100, - }, - }, - } -) +var _ service.Service = (*Reactor)(nil) const ( // BlockSyncChannel is a channel for blocks and status updates @@ -60,10 +39,22 @@ const ( syncTimeout = 60 * time.Second ) +func GetChannelDescriptor() *p2p.ChannelDescriptor { + return &p2p.ChannelDescriptor{ + ID: BlockSyncChannel, + MessageType: new(bcproto.Message), + Priority: 5, + SendQueueCapacity: 1000, + RecvBufferCapacity: 1024, + RecvMessageCapacity: MaxMsgSize, + Name: "blockSync", + } +} + type consensusReactor interface { // For when we switch from block sync reactor to the consensus // machine. - SwitchToConsensus(state sm.State, skipWAL bool) + SwitchToConsensus(ctx context.Context, state sm.State, skipWAL bool) } type peerError struct { @@ -78,37 +69,27 @@ func (e peerError) Error() string { // Reactor handles long-term catchup syncing. type Reactor struct { service.BaseService + logger log.Logger // immutable initialState sm.State + // store + stateStore sm.Store blockExec *sm.BlockExecutor store *store.BlockStore pool *BlockPool consReactor consensusReactor - blockSync *tmsync.AtomicBool - - blockSyncCh *p2p.Channel - // blockSyncOutBridgeCh defines a channel that acts as a bridge between sending Envelope - // messages that the reactor will consume in processBlockSyncCh and receiving messages - // from the peer updates channel and other goroutines. We do this instead of directly - // sending on blockSyncCh.Out to avoid race conditions in the case where other goroutines - // send Envelopes directly to the to blockSyncCh.Out channel, since processBlockSyncCh - // may close the blockSyncCh.Out channel at the same time that other goroutines send to - // blockSyncCh.Out. - blockSyncOutBridgeCh chan p2p.Envelope - peerUpdates *p2p.PeerUpdates - closeCh chan struct{} + blockSync *atomicBool + + chCreator p2p.ChannelCreator + peerEvents p2p.PeerEventSubscriber requestsCh <-chan BlockRequest errorsCh <-chan peerError - // poolWG is used to synchronize the graceful shutdown of the poolRoutine and - // requestRoutine spawned goroutines when stopping the reactor and before - // stopping the p2p Channel(s). - poolWG sync.WaitGroup - - metrics *consensus.Metrics + metrics *consensus.Metrics + eventBus *eventbus.EventBus syncStartTime time.Time @@ -118,48 +99,33 @@ type Reactor struct { // NewReactor returns new reactor instance. func NewReactor( logger log.Logger, - state sm.State, + stateStore sm.Store, blockExec *sm.BlockExecutor, store *store.BlockStore, nodeProTxHash crypto.ProTxHash, consReactor consensusReactor, - blockSyncCh *p2p.Channel, - peerUpdates *p2p.PeerUpdates, + channelCreator p2p.ChannelCreator, + peerEvents p2p.PeerEventSubscriber, blockSync bool, metrics *consensus.Metrics, -) (*Reactor, error) { - if state.LastBlockHeight != store.Height() { - return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height()) - } - - startHeight := store.Height() + 1 - if startHeight == 1 { - startHeight = state.InitialHeight - } - - requestsCh := make(chan BlockRequest, maxTotalRequesters) - errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count. - + eventBus *eventbus.EventBus, +) *Reactor { r := &Reactor{ - initialState: state, - blockExec: blockExec, - store: store, - pool: NewBlockPool(startHeight, requestsCh, errorsCh), - consReactor: consReactor, - blockSync: tmsync.NewBool(blockSync), - requestsCh: requestsCh, - errorsCh: errorsCh, - blockSyncCh: blockSyncCh, - blockSyncOutBridgeCh: make(chan p2p.Envelope), - peerUpdates: peerUpdates, - closeCh: make(chan struct{}), - metrics: metrics, - syncStartTime: time.Time{}, - nodeProTxHash: nodeProTxHash, + logger: logger, + stateStore: stateStore, + blockExec: blockExec, + store: store, + consReactor: consReactor, + blockSync: newAtomicBool(blockSync), + chCreator: channelCreator, + peerEvents: peerEvents, + metrics: metrics, + eventBus: eventBus, + nodeProTxHash: nodeProTxHash, } r.BaseService = *service.NewBaseService(logger, "BlockSync", r) - return r, nil + return r } // OnStart starts separate go routines for each p2p Channel and listens for @@ -169,20 +135,45 @@ func NewReactor( // // If blockSync is enabled, we also start the pool and the pool processing // goroutine. If the pool fails to start, an error is returned. -func (r *Reactor) OnStart() error { +func (r *Reactor) OnStart(ctx context.Context) error { + blockSyncCh, err := r.chCreator(ctx, GetChannelDescriptor()) + if err != nil { + return err + } + r.chCreator = func(context.Context, *conn.ChannelDescriptor) (*p2p.Channel, error) { return blockSyncCh, nil } + + state, err := r.stateStore.Load() + if err != nil { + return err + } + r.initialState = state + + if state.LastBlockHeight != r.store.Height() { + return fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, r.store.Height()) + } + + startHeight := r.store.Height() + 1 + if startHeight == 1 { + startHeight = state.InitialHeight + } + + requestsCh := make(chan BlockRequest, maxTotalRequesters) + errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count. + r.pool = NewBlockPool(r.logger, startHeight, requestsCh, errorsCh) + r.requestsCh = requestsCh + r.errorsCh = errorsCh + if r.blockSync.IsSet() { - if err := r.pool.Start(); err != nil { + if err := r.pool.Start(ctx); err != nil { return err } - r.poolWG.Add(1) - go r.requestRoutine() + go r.requestRoutine(ctx, blockSyncCh) - r.poolWG.Add(1) - go r.poolRoutine(false) + go r.poolRoutine(ctx, false, blockSyncCh) } - go r.processBlockSyncCh() - go r.processPeerUpdates() + go r.processBlockSyncCh(ctx, blockSyncCh) + go r.processPeerUpdates(ctx, r.peerEvents(ctx), blockSyncCh) return nil } @@ -191,100 +182,43 @@ func (r *Reactor) OnStart() error { // blocking until they all exit. func (r *Reactor) OnStop() { if r.blockSync.IsSet() { - if err := r.pool.Stop(); err != nil { - r.Logger.Error("failed to stop pool", "err", err) - } + r.pool.Stop() } - - // wait for the poolRoutine and requestRoutine goroutines to gracefully exit - r.poolWG.Wait() - - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - // Wait for all p2p Channels to be closed before returning. This ensures we - // can easily reason about synchronization of all p2p Channels and ensure no - // panics will occur. - <-r.blockSyncCh.Done() - <-r.peerUpdates.Done() } // respondToPeer loads a block and sends it to the requesting peer, if we have it. // Otherwise, we'll respond saying we do not have it. -func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID) { +func (r *Reactor) respondToPeer(ctx context.Context, msg *bcproto.BlockRequest, peerID types.NodeID, blockSyncCh *p2p.Channel) error { block := r.store.LoadBlock(msg.Height) if block != nil { blockProto, err := block.ToProto() if err != nil { - r.Logger.Error("failed to convert msg to protobuf", "err", err) - return + r.logger.Error("failed to convert msg to protobuf", "err", err) + return err } - r.blockSyncCh.Out <- p2p.Envelope{ + return blockSyncCh.Send(ctx, p2p.Envelope{ To: peerID, Message: &bcproto.BlockResponse{Block: blockProto}, - } - - return + }) } - r.Logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height) - r.blockSyncCh.Out <- p2p.Envelope{ + r.logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height) + + return blockSyncCh.Send(ctx, p2p.Envelope{ To: peerID, Message: &bcproto.NoBlockResponse{Height: msg.Height}, - } -} - -// handleBlockSyncMessage handles envelopes sent from peers on the -// BlockSyncChannel. It returns an error only if the Envelope.Message is unknown -// for this channel. This should never be called outside of handleMessage. -func (r *Reactor) handleBlockSyncMessage(envelope p2p.Envelope) error { - logger := r.Logger.With("peer", envelope.From) - - switch msg := envelope.Message.(type) { - case *bcproto.BlockRequest: - r.respondToPeer(msg, envelope.From) - - case *bcproto.BlockResponse: - block, err := types.BlockFromProto(msg.Block) - if err != nil { - logger.Error("failed to convert block from proto", "err", err) - return err - } - - r.pool.AddBlock(envelope.From, block, block.Size()) - - case *bcproto.StatusRequest: - r.blockSyncCh.Out <- p2p.Envelope{ - To: envelope.From, - Message: &bcproto.StatusResponse{ - Height: r.store.Height(), - Base: r.store.Base(), - }, - } - - case *bcproto.StatusResponse: - r.pool.SetPeerRange(envelope.From, msg.Base, msg.Height) - - case *bcproto.NoBlockResponse: - logger.Debug("peer does not have the requested block", "height", msg.Height) - - default: - return fmt.Errorf("received unknown message: %T", msg) - } - - return nil + }) } // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, blockSyncCh *p2p.Channel) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) - r.Logger.Error( + r.logger.Error( "recovering from processing message panic", "err", err, "stack", string(debug.Stack()), @@ -292,14 +226,46 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err } }() - // r.Logger.Debug("received message", "msg", envelope.Message, "peer", envelope.From) + //r.logger.Debug("received message", "message", envelope.Message, "peer", envelope.From) - switch chID { + switch envelope.ChannelID { case BlockSyncChannel: - err = r.handleBlockSyncMessage(envelope) + switch msg := envelope.Message.(type) { + case *bcproto.BlockRequest: + return r.respondToPeer(ctx, msg, envelope.From, blockSyncCh) + case *bcproto.BlockResponse: + block, err := types.BlockFromProto(msg.Block) + if err != nil { + r.logger.Error("failed to convert block from proto", + "peer", envelope.From, + "err", err) + return err + } + + r.pool.AddBlock(envelope.From, block, block.Size()) + + case *bcproto.StatusRequest: + return blockSyncCh.Send(ctx, p2p.Envelope{ + To: envelope.From, + Message: &bcproto.StatusResponse{ + Height: r.store.Height(), + Base: r.store.Base(), + }, + }) + case *bcproto.StatusResponse: + r.pool.SetPeerRange(envelope.From, msg.Base, msg.Height) + + case *bcproto.NoBlockResponse: + r.logger.Debug("peer does not have the requested block", + "peer", envelope.From, + "height", msg.Height) + + default: + return fmt.Errorf("received unknown message: %T", msg) + } default: - err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) + err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", envelope.ChannelID, envelope) } return err @@ -310,34 +276,29 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err // message execution will result in a PeerError being sent on the BlockSyncChannel. // When the reactor is stopped, we will catch the signal and close the p2p Channel // gracefully. -func (r *Reactor) processBlockSyncCh() { - defer r.blockSyncCh.Close() - - for { - select { - case envelope := <-r.blockSyncCh.In: - if err := r.handleMessage(r.blockSyncCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.blockSyncCh.ID, "envelope", envelope, "err", err) - r.blockSyncCh.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } +func (r *Reactor) processBlockSyncCh(ctx context.Context, blockSyncCh *p2p.Channel) { + iter := blockSyncCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, envelope, blockSyncCh); err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return } - case envelope := <-r.blockSyncOutBridgeCh: - r.blockSyncCh.Out <- envelope - - case <-r.closeCh: - r.Logger.Debug("stopped listening on block sync channel; closing...") - return - + r.logger.Error("failed to process message", "ch_id", envelope.ChannelID, "envelope", envelope, "err", err) + if serr := blockSyncCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return + } } } } // processPeerUpdate processes a PeerUpdate. -func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) +func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, blockSyncCh *p2p.Channel) { + r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) // XXX: Pool#RedoRequest can sometimes give us an empty peer. if len(peerUpdate.NodeID) == 0 { @@ -347,12 +308,20 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { switch peerUpdate.Status { case p2p.PeerStatusUp: // send a status update the newly added peer - r.blockSyncOutBridgeCh <- p2p.Envelope{ + if err := blockSyncCh.Send(ctx, p2p.Envelope{ To: peerUpdate.NodeID, Message: &bcproto.StatusResponse{ Base: r.store.Base(), Height: r.store.Height(), }, + }); err != nil { + r.pool.RemovePeer(peerUpdate.NodeID) + if err := blockSyncCh.SendError(ctx, p2p.PeerError{ + NodeID: peerUpdate.NodeID, + Err: err, + }); err != nil { + return + } } case p2p.PeerStatusDown: @@ -363,80 +332,82 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates() { - defer r.peerUpdates.Close() - +func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, blockSyncCh *p2p.Channel) { for { select { - case peerUpdate := <-r.peerUpdates.Updates(): - r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.Logger.Debug("stopped listening on peer updates channel; closing...") + case <-ctx.Done(): return + case peerUpdate := <-peerUpdates.Updates(): + r.processPeerUpdate(ctx, peerUpdate, blockSyncCh) } } } // SwitchToBlockSync is called by the state sync reactor when switching to fast // sync. -func (r *Reactor) SwitchToBlockSync(state sm.State) error { +func (r *Reactor) SwitchToBlockSync(ctx context.Context, state sm.State) error { r.blockSync.Set() r.initialState = state r.pool.height = state.LastBlockHeight + 1 - if err := r.pool.Start(); err != nil { + if err := r.pool.Start(ctx); err != nil { return err } r.syncStartTime = time.Now() - r.poolWG.Add(1) - go r.requestRoutine() + bsCh, err := r.chCreator(ctx, GetChannelDescriptor()) + if err != nil { + return err + } + + go r.requestRoutine(ctx, bsCh) + go r.poolRoutine(ctx, true, bsCh) - r.poolWG.Add(1) - go r.poolRoutine(true) + if err := r.PublishStatus(types.EventDataBlockSyncStatus{ + Complete: false, + Height: state.LastBlockHeight, + }); err != nil { + return err + } return nil } -func (r *Reactor) requestRoutine() { +func (r *Reactor) requestRoutine(ctx context.Context, blockSyncCh *p2p.Channel) { statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) defer statusUpdateTicker.Stop() - defer r.poolWG.Done() - for { select { - case <-r.closeCh: - return - - case <-r.pool.Quit(): + case <-ctx.Done(): return - case request := <-r.requestsCh: - r.blockSyncOutBridgeCh <- p2p.Envelope{ + if err := blockSyncCh.Send(ctx, p2p.Envelope{ To: request.PeerID, Message: &bcproto.BlockRequest{Height: request.Height}, + }); err != nil { + if err := blockSyncCh.SendError(ctx, p2p.PeerError{ + NodeID: request.PeerID, + Err: err, + }); err != nil { + return + } } - case pErr := <-r.errorsCh: - r.blockSyncCh.Error <- p2p.PeerError{ + if err := blockSyncCh.SendError(ctx, p2p.PeerError{ NodeID: pErr.peerID, Err: pErr.err, + }); err != nil { + return } - case <-statusUpdateTicker.C: - r.poolWG.Add(1) - - go func() { - defer r.poolWG.Done() - - r.blockSyncOutBridgeCh <- p2p.Envelope{ - Broadcast: true, - Message: &bcproto.StatusRequest{}, - } - }() + if err := blockSyncCh.Send(ctx, p2p.Envelope{ + Broadcast: true, + Message: &bcproto.StatusRequest{}, + }); err != nil { + return + } } } } @@ -445,7 +416,7 @@ func (r *Reactor) requestRoutine() { // do. // // NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! -func (r *Reactor) poolRoutine(stateSynced bool) { +func (r *Reactor) poolRoutine(ctx context.Context, stateSynced bool, blockSyncCh *p2p.Channel) { var ( trySyncTicker = time.NewTicker(trySyncIntervalMS * time.Millisecond) switchToConsensusTicker = time.NewTicker(switchToConsensusIntervalSeconds * time.Second) @@ -464,18 +435,17 @@ func (r *Reactor) poolRoutine(stateSynced bool) { defer trySyncTicker.Stop() defer switchToConsensusTicker.Stop() - defer r.poolWG.Done() - -FOR_LOOP: for { select { + case <-ctx.Done(): + return case <-switchToConsensusTicker.C: var ( height, numPending, lenRequesters = r.pool.GetStatus() lastAdvance = r.pool.LastAdvance() ) - r.Logger.Debug( + r.logger.Debug( "consensus ticker", "num_pending", numPending, "total", lenRequesters, @@ -484,13 +454,13 @@ FOR_LOOP: switch { case r.pool.IsCaughtUp(): - r.Logger.Info("switching to consensus reactor", "height", height) + r.logger.Info("switching to consensus reactor", "height", height) case time.Since(lastAdvance) > syncTimeout: - r.Logger.Error("no progress since last advance", "last_advance", lastAdvance) + r.logger.Error("no progress since last advance", "last_advance", lastAdvance) default: - r.Logger.Info( + r.logger.Info( "not caught up yet", "height", height, "max_peer_height", r.pool.MaxPeerHeight(), @@ -499,24 +469,21 @@ FOR_LOOP: continue } - if err := r.pool.Stop(); err != nil { - r.Logger.Error("failed to stop pool", "err", err) - } + r.pool.Stop() r.blockSync.UnSet() if r.consReactor != nil { - r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced) + r.consReactor.SwitchToConsensus(ctx, state, blocksSynced > 0 || stateSynced) } - break FOR_LOOP + return case <-trySyncTicker.C: select { case didProcessCh <- struct{}{}: default: } - case <-didProcessCh: // NOTE: It is a subtle mistake to process more than a single block at a // time (e.g. 10) here, because we only send one BlockRequest per loop @@ -531,14 +498,21 @@ FOR_LOOP: first, second := r.pool.PeekTwoBlocks() if first == nil || second == nil { // we need both to sync the first block - continue FOR_LOOP + continue } else { // try again quickly next loop didProcessCh <- struct{}{} } + firstParts, err := first.MakePartSet(types.BlockPartSizeBytes) + if err != nil { + r.logger.Error("failed to make ", + "height", first.Height, + "err", err.Error()) + return + } + var ( - firstParts = first.MakePartSet(types.BlockPartSizeBytes) firstPartSetHeader = firstParts.Header() firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader} stateID = types.StateID{ @@ -552,10 +526,10 @@ FOR_LOOP: // NOTE: We can probably make this more efficient, but note that calling // first.Hash() doesn't verify the tx contents, so MakePartSet() is // currently necessary. - err := state.Validators.VerifyCommit(chainID, firstID, stateID, first.Height, second.LastCommit) + err = state.Validators.VerifyCommit(chainID, firstID, stateID, first.Height, second.LastCommit) if err != nil { err = fmt.Errorf("invalid last commit: %w", err) - r.Logger.Error( + r.logger.Error( err.Error(), "last_commit", second.LastCommit, "block_id", firstID, @@ -565,20 +539,22 @@ FOR_LOOP: // NOTE: We've already removed the peer's request, but we still need // to clean up the rest. peerID := r.pool.RedoRequest(first.Height) - r.blockSyncCh.Error <- p2p.PeerError{ + if serr := blockSyncCh.SendError(ctx, p2p.PeerError{ NodeID: peerID, Err: err, + }); serr != nil { + return } peerID2 := r.pool.RedoRequest(second.Height) if peerID2 != peerID { - r.blockSyncCh.Error <- p2p.PeerError{ + if serr := blockSyncCh.SendError(ctx, p2p.PeerError{ NodeID: peerID2, Err: err, + }); serr != nil { + return } } - - continue FOR_LOOP } else { r.pool.PopRequest() @@ -589,7 +565,7 @@ FOR_LOOP: // TODO: Same thing for app - but we would need a way to get the hash // without persisting the state. - state, err = r.blockExec.ApplyBlock(state, r.nodeProTxHash, firstID, first) + state, err = r.blockExec.ApplyBlock(ctx, state, r.nodeProTxHash, firstID, first) if err != nil { // TODO: This is bad, are we zombie? panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) @@ -601,7 +577,7 @@ FOR_LOOP: if blocksSynced%100 == 0 { lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) - r.Logger.Info( + r.logger.Info( "block sync rate", "height", r.pool.height, "max_peer_height", r.pool.MaxPeerHeight(), @@ -611,13 +587,6 @@ FOR_LOOP: lastHundred = time.Now() } } - - continue FOR_LOOP - - case <-r.closeCh: - break FOR_LOOP - case <-r.pool.Quit(): - break FOR_LOOP } } } @@ -649,3 +618,36 @@ func (r *Reactor) GetRemainingSyncTime() time.Duration { return time.Duration(int64(remain * float64(time.Second))) } + +func (r *Reactor) PublishStatus(event types.EventDataBlockSyncStatus) error { + if r.eventBus == nil { + return errors.New("event bus is not configured") + } + return r.eventBus.PublishEventBlockSyncStatus(event) +} + +// atomicBool is an atomic Boolean, safe for concurrent use by multiple +// goroutines. +type atomicBool int32 + +// newAtomicBool creates an atomicBool with given initial value. +func newAtomicBool(ok bool) *atomicBool { + ab := new(atomicBool) + if ok { + ab.Set() + } + return ab +} + +// Set sets the Boolean to true. +func (ab *atomicBool) Set() { + atomic.StoreInt32((*int32)(ab), 1) +} + +// UnSet sets the Boolean to false. +func (ab *atomicBool) UnSet() { + atomic.StoreInt32((*int32)(ab), 0) +} + +// IsSet returns whether the Boolean is true. +func (ab *atomicBool) IsSet() bool { return atomic.LoadInt32((*int32)(ab))&1 == 1 } diff --git a/internal/blocksync/v0/reactor_test.go b/internal/blocksync/reactor_test.go similarity index 66% rename from internal/blocksync/v0/reactor_test.go rename to internal/blocksync/reactor_test.go index b65f93767b..ade7abb5f0 100644 --- a/internal/blocksync/v0/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -1,10 +1,13 @@ -package v0 +package blocksync import ( + "context" "os" "testing" "time" + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -12,7 +15,8 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/consensus" - "github.com/tendermint/tendermint/internal/mempool/mock" + "github.com/tendermint/tendermint/internal/eventbus" + mpmocks "github.com/tendermint/tendermint/internal/mempool/mocks" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/internal/proxy" @@ -31,7 +35,7 @@ type reactorTestSuite struct { nodes []types.NodeID reactors map[types.NodeID]*Reactor - app map[types.NodeID]proxy.AppConns + app map[types.NodeID]abciclient.Client blockSyncChannels map[types.NodeID]*p2p.Channel peerChans map[types.NodeID]chan p2p.PeerUpdate @@ -41,55 +45,60 @@ type reactorTestSuite struct { } func setup( + ctx context.Context, t *testing.T, genDoc *types.GenesisDoc, privVal types.PrivValidator, maxBlockHeights []int64, - chBuf uint, ) *reactorTestSuite { t.Helper() + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + numNodes := len(maxBlockHeights) - require.True(t, numNodes >= 1, - "must specify at least one block height (nodes)") + require.True(t, numNodes >= 1, "must specify at least one block height (nodes)") rts := &reactorTestSuite{ - logger: log.TestingLogger().With("module", "block_sync", "testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), + logger: log.NewNopLogger().With("module", "block_sync", "testCase", t.Name()), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}), nodes: make([]types.NodeID, 0, numNodes), reactors: make(map[types.NodeID]*Reactor, numNodes), - app: make(map[types.NodeID]proxy.AppConns, numNodes), + app: make(map[types.NodeID]abciclient.Client, numNodes), blockSyncChannels: make(map[types.NodeID]*p2p.Channel, numNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), blockSync: true, } - chDesc := p2p.ChannelDescriptor{ID: byte(BlockSyncChannel)} - rts.blockSyncChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(bcproto.Message), int(chBuf)) + chDesc := &p2p.ChannelDescriptor{ID: BlockSyncChannel, MessageType: new(bcproto.Message)} + rts.blockSyncChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc) i := 0 for nodeID := range rts.network.Nodes { - rts.addNode(t, nodeID, genDoc, privVal, maxBlockHeights[i]) + rts.addNode(ctx, t, nodeID, genDoc, privVal, maxBlockHeights[i]) i++ } t.Cleanup(func() { + cancel() for _, nodeID := range rts.nodes { - rts.peerUpdates[nodeID].Close() - if rts.reactors[nodeID].IsRunning() { - require.NoError(t, rts.reactors[nodeID].Stop()) - require.NoError(t, rts.app[nodeID].Stop()) + rts.reactors[nodeID].Wait() + rts.app[nodeID].Wait() + require.False(t, rts.reactors[nodeID].IsRunning()) } } }) + t.Cleanup(leaktest.Check(t)) return rts } -func (rts *reactorTestSuite) addNode(t *testing.T, +func (rts *reactorTestSuite) addNode( + ctx context.Context, + t *testing.T, nodeID types.NodeID, genDoc *types.GenesisDoc, privVal types.PrivValidator, @@ -97,9 +106,11 @@ func (rts *reactorTestSuite) addNode(t *testing.T, ) { t.Helper() + logger := log.NewNopLogger() + rts.nodes = append(rts.nodes, nodeID) - rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{}), proxy.NopMetrics()) - require.NoError(t, rts.app[nodeID].Start()) + rts.app[nodeID] = proxy.New(abciclient.NewLocalClient(logger, &abci.BaseApplication{}), logger, proxy.NopMetrics()) + require.NoError(t, rts.app[nodeID].Start(ctx)) blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() @@ -110,16 +121,30 @@ func (rts *reactorTestSuite) addNode(t *testing.T, state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) require.NoError(t, stateStore.Save(state)) + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), - rts.app[nodeID].Consensus(), - rts.app[nodeID].Query(), - mock.Mempool{}, + logger, + rts.app[nodeID], + mp, sm.EmptyEvidencePool{}, blockStore, - nil, + eventBus, + sm.NopMetrics(), ) for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { @@ -130,6 +155,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T, lastBlock := blockStore.LoadBlock(blockHeight - 1) vote, err := factory.MakeVote( + ctx, privVal, state.Validators, lastBlock.Header.ChainID, 0, @@ -138,7 +164,6 @@ func (rts *reactorTestSuite) addNode(t *testing.T, state.LastStateID, ) require.NoError(t, err) - lastCommit = types.NewCommit( vote.Height, vote.Round, @@ -152,10 +177,11 @@ func (rts *reactorTestSuite) addNode(t *testing.T, thisBlock, err := sf.MakeBlock(state, blockHeight, lastCommit, nil, 0) require.NoError(t, err) - thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) + thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} - state, err = blockExec.ApplyBlock(state, proTxHash, blockID, thisBlock) + state, err = blockExec.ApplyBlock(ctx, state, proTxHash, blockID, thisBlock) require.NoError(t, err) blockStore.SaveBlock(thisBlock, thisParts, lastCommit) @@ -163,27 +189,32 @@ func (rts *reactorTestSuite) addNode(t *testing.T, rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) - rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID]) - rts.reactors[nodeID], err = NewReactor( + rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID]) + + chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + return rts.blockSyncChannels[nodeID], nil + } + rts.reactors[nodeID] = NewReactor( rts.logger.With("nodeID", nodeID), - state.Copy(), + stateStore, blockExec, blockStore, proTxHash, nil, - rts.blockSyncChannels[nodeID], - rts.peerUpdates[nodeID], + chCreator, + func(ctx context.Context) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] }, rts.blockSync, - consensus.NopMetrics()) - require.NoError(t, err) + consensus.NopMetrics(), + nil, // eventbus, can be nil + ) - require.NoError(t, rts.reactors[nodeID].Start()) + require.NoError(t, rts.reactors[nodeID].Start(ctx)) require.True(t, rts.reactors[nodeID].IsRunning()) } -func (rts *reactorTestSuite) start(t *testing.T) { +func (rts *reactorTestSuite) start(ctx context.Context, t *testing.T) { t.Helper() - rts.network.Start(t) + rts.network.Start(ctx, t) require.Len(t, rts.network.RandomNode().PeerManager.Peers(), len(rts.nodes)-1, @@ -191,18 +222,21 @@ func (rts *reactorTestSuite) start(t *testing.T) { } func TestReactor_AbruptDisconnect(t *testing.T) { - cfg, err := config.ResetTestRoot("block_sync_reactor_test") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot(t.TempDir(), "block_sync_reactor_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) - genDoc, privVals := factory.RandGenesisDoc(cfg, 1, 1) + genDoc, privVals := factory.RandGenesisDoc(cfg, 1, 1, factory.ConsensusParams()) maxBlockHeight := int64(64) - rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) + rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}) require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height()) - rts.start(t) + rts.start(ctx, t) secondaryPool := rts.reactors[rts.nodes[1]].pool @@ -223,20 +257,23 @@ func TestReactor_AbruptDisconnect(t *testing.T) { Status: p2p.PeerStatusDown, NodeID: rts.nodes[0], } - rts.network.Nodes[rts.nodes[1]].PeerManager.Disconnected(rts.nodes[0]) + rts.network.Nodes[rts.nodes[1]].PeerManager.Disconnected(ctx, rts.nodes[0]) } func TestReactor_SyncTime(t *testing.T) { - cfg, err := config.ResetTestRoot("block_sync_reactor_test") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot(t.TempDir(), "block_sync_reactor_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) - genDoc, privVals := factory.RandGenesisDoc(cfg, 1, 1) + genDoc, privVals := factory.RandGenesisDoc(cfg, 1, 1, factory.ConsensusParams()) maxBlockHeight := int64(101) - rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) + rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}) require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height()) - rts.start(t) + rts.start(ctx, t) require.Eventually( t, @@ -251,19 +288,21 @@ func TestReactor_SyncTime(t *testing.T) { } func TestReactor_NoBlockResponse(t *testing.T) { - cfg, err := config.ResetTestRoot("block_sync_reactor_test") - require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg, err := config.ResetTestRoot(t.TempDir(), "block_sync_reactor_test") + require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) - genDoc, privVals := factory.RandGenesisDoc(cfg, 1, 1) + genDoc, privVals := factory.RandGenesisDoc(cfg, 1, 1, factory.ConsensusParams()) maxBlockHeight := int64(65) - rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) + rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}) require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height()) - rts.start(t) + rts.start(ctx, t) testCases := []struct { height int64 @@ -300,18 +339,21 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { // See: https://github.com/tendermint/tendermint/issues/6005 t.SkipNow() - cfg, err := config.ResetTestRoot("block_sync_reactor_test") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot(t.TempDir(), "block_sync_reactor_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) maxBlockHeight := int64(48) - genDoc, privVals := factory.RandGenesisDoc(cfg, 1, 1) + genDoc, privVals := factory.RandGenesisDoc(cfg, 1, 1, factory.ConsensusParams()) - rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000) + rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}) require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height()) - rts.start(t) + rts.start(ctx, t) require.Eventually( t, @@ -339,12 +381,12 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { // // XXX: This causes a potential race condition. // See: https://github.com/tendermint/tendermint/issues/6005 - otherGenDoc, otherPrivVals := factory.RandGenesisDoc(cfg, 1, 1) - newNode := rts.network.MakeNode(t, nil, p2ptest.NodeOptions{ + otherGenDoc, otherPrivVals := factory.RandGenesisDoc(cfg, 1, 1, factory.ConsensusParams()) + newNode := rts.network.MakeNode(ctx, t, nil, p2ptest.NodeOptions{ MaxPeers: uint16(len(rts.nodes) + 1), MaxConnected: uint16(len(rts.nodes) + 1), - }, log.TestingLogger()) - rts.addNode(t, newNode.NodeID, otherGenDoc, otherPrivVals[0], maxBlockHeight) + }) + rts.addNode(ctx, t, newNode.NodeID, otherGenDoc, otherPrivVals[0], maxBlockHeight) // add a fake peer just so we do not wait for the consensus ticker to timeout rts.reactors[newNode.NodeID].pool.SetPeerRange("00ff", 10, 10) diff --git a/internal/blocksync/v2/internal/behavior/doc.go b/internal/blocksync/v2/internal/behavior/doc.go deleted file mode 100644 index c4bd06ccee..0000000000 --- a/internal/blocksync/v2/internal/behavior/doc.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Package Behavior provides a mechanism for reactors to report behavior of peers. - -Instead of a reactor calling the switch directly it will call the behavior module which will -handle the stoping and marking peer as good on behalf of the reactor. - -There are four different behaviors a reactor can report. - -1. bad message - -type badMessage struct { - explanation string -} - -This message will request the peer be stopped for an error - -2. message out of order - -type messageOutOfOrder struct { - explanation string -} - -This message will request the peer be stopped for an error - -3. consesnsus Vote - -type consensusVote struct { - explanation string -} - -This message will request the peer be marked as good - -4. block part - -type blockPart struct { - explanation string -} - -This message will request the peer be marked as good - -*/ -package behavior diff --git a/internal/blocksync/v2/internal/behavior/peer_behaviour.go b/internal/blocksync/v2/internal/behavior/peer_behaviour.go deleted file mode 100644 index 90948d888c..0000000000 --- a/internal/blocksync/v2/internal/behavior/peer_behaviour.go +++ /dev/null @@ -1,47 +0,0 @@ -package behavior - -import "github.com/tendermint/tendermint/types" - -// PeerBehavior is a struct describing a behavior a peer performed. -// `peerID` identifies the peer and reason characterizes the specific -// behavior performed by the peer. -type PeerBehavior struct { - peerID types.NodeID - reason interface{} -} - -type badMessage struct { - explanation string -} - -// BadMessage returns a badMessage PeerBehavior. -func BadMessage(peerID types.NodeID, explanation string) PeerBehavior { - return PeerBehavior{peerID: peerID, reason: badMessage{explanation}} -} - -type messageOutOfOrder struct { - explanation string -} - -// MessageOutOfOrder returns a messagOutOfOrder PeerBehavior. -func MessageOutOfOrder(peerID types.NodeID, explanation string) PeerBehavior { - return PeerBehavior{peerID: peerID, reason: messageOutOfOrder{explanation}} -} - -type consensusVote struct { - explanation string -} - -// ConsensusVote returns a consensusVote PeerBehavior. -func ConsensusVote(peerID types.NodeID, explanation string) PeerBehavior { - return PeerBehavior{peerID: peerID, reason: consensusVote{explanation}} -} - -type blockPart struct { - explanation string -} - -// BlockPart returns blockPart PeerBehavior. -func BlockPart(peerID types.NodeID, explanation string) PeerBehavior { - return PeerBehavior{peerID: peerID, reason: blockPart{explanation}} -} diff --git a/internal/blocksync/v2/internal/behavior/reporter.go b/internal/blocksync/v2/internal/behavior/reporter.go deleted file mode 100644 index c150a98d53..0000000000 --- a/internal/blocksync/v2/internal/behavior/reporter.go +++ /dev/null @@ -1,87 +0,0 @@ -package behavior - -import ( - "errors" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" -) - -// Reporter provides an interface for reactors to report the behavior -// of peers synchronously to other components. -type Reporter interface { - Report(behavior PeerBehavior) error -} - -// SwitchReporter reports peer behavior to an internal Switch. -type SwitchReporter struct { - sw *p2p.Switch -} - -// NewSwitchReporter return a new SwitchReporter instance which wraps the Switch. -func NewSwitchReporter(sw *p2p.Switch) *SwitchReporter { - return &SwitchReporter{ - sw: sw, - } -} - -// Report reports the behavior of a peer to the Switch. -func (spbr *SwitchReporter) Report(behavior PeerBehavior) error { - peer := spbr.sw.Peers().Get(behavior.peerID) - if peer == nil { - return errors.New("peer not found") - } - - switch reason := behavior.reason.(type) { - case consensusVote, blockPart: - spbr.sw.MarkPeerAsGood(peer) - case badMessage: - spbr.sw.StopPeerForError(peer, reason.explanation) - case messageOutOfOrder: - spbr.sw.StopPeerForError(peer, reason.explanation) - default: - return errors.New("unknown reason reported") - } - - return nil -} - -// MockReporter is a concrete implementation of the Reporter -// interface used in reactor tests to ensure reactors report the correct -// behavior in manufactured scenarios. -type MockReporter struct { - mtx tmsync.RWMutex - pb map[types.NodeID][]PeerBehavior -} - -// NewMockReporter returns a Reporter which records all reported -// behaviors in memory. -func NewMockReporter() *MockReporter { - return &MockReporter{ - pb: map[types.NodeID][]PeerBehavior{}, - } -} - -// Report stores the PeerBehavior produced by the peer identified by peerID. -func (mpbr *MockReporter) Report(behavior PeerBehavior) error { - mpbr.mtx.Lock() - defer mpbr.mtx.Unlock() - mpbr.pb[behavior.peerID] = append(mpbr.pb[behavior.peerID], behavior) - - return nil -} - -// GetBehaviors returns all behaviors reported on the peer identified by peerID. -func (mpbr *MockReporter) GetBehaviors(peerID types.NodeID) []PeerBehavior { - mpbr.mtx.RLock() - defer mpbr.mtx.RUnlock() - if items, ok := mpbr.pb[peerID]; ok { - result := make([]PeerBehavior, len(items)) - copy(result, items) - - return result - } - - return []PeerBehavior{} -} diff --git a/internal/blocksync/v2/internal/behavior/reporter_test.go b/internal/blocksync/v2/internal/behavior/reporter_test.go deleted file mode 100644 index 861a63df0c..0000000000 --- a/internal/blocksync/v2/internal/behavior/reporter_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package behavior_test - -import ( - "sync" - "testing" - - bh "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" - "github.com/tendermint/tendermint/types" -) - -// TestMockReporter tests the MockReporter's ability to store reported -// peer behavior in memory indexed by the peerID. -func TestMockReporter(t *testing.T) { - var peerID types.NodeID = "MockPeer" - pr := bh.NewMockReporter() - - behaviors := pr.GetBehaviors(peerID) - if len(behaviors) != 0 { - t.Error("Expected to have no behaviors reported") - } - - badMessage := bh.BadMessage(peerID, "bad message") - if err := pr.Report(badMessage); err != nil { - t.Error(err) - } - behaviors = pr.GetBehaviors(peerID) - if len(behaviors) != 1 { - t.Error("Expected the peer have one reported behavior") - } - - if behaviors[0] != badMessage { - t.Error("Expected Bad Message to have been reported") - } -} - -type scriptItem struct { - peerID types.NodeID - behavior bh.PeerBehavior -} - -// equalBehaviors returns true if a and b contain the same PeerBehaviors with -// the same freequencies and otherwise false. -func equalBehaviors(a []bh.PeerBehavior, b []bh.PeerBehavior) bool { - aHistogram := map[bh.PeerBehavior]int{} - bHistogram := map[bh.PeerBehavior]int{} - - for _, behavior := range a { - aHistogram[behavior]++ - } - - for _, behavior := range b { - bHistogram[behavior]++ - } - - if len(aHistogram) != len(bHistogram) { - return false - } - - for _, behavior := range a { - if aHistogram[behavior] != bHistogram[behavior] { - return false - } - } - - for _, behavior := range b { - if bHistogram[behavior] != aHistogram[behavior] { - return false - } - } - - return true -} - -// TestEqualPeerBehaviors tests that equalBehaviors can tell that two slices -// of peer behaviors can be compared for the behaviors they contain and the -// freequencies that those behaviors occur. -func TestEqualPeerBehaviors(t *testing.T) { - var ( - peerID types.NodeID = "MockPeer" - consensusVote = bh.ConsensusVote(peerID, "voted") - blockPart = bh.BlockPart(peerID, "blocked") - equals = []struct { - left []bh.PeerBehavior - right []bh.PeerBehavior - }{ - // Empty sets - {[]bh.PeerBehavior{}, []bh.PeerBehavior{}}, - // Single behaviors - {[]bh.PeerBehavior{consensusVote}, []bh.PeerBehavior{consensusVote}}, - // Equal Frequencies - {[]bh.PeerBehavior{consensusVote, consensusVote}, - []bh.PeerBehavior{consensusVote, consensusVote}}, - // Equal frequencies different orders - {[]bh.PeerBehavior{consensusVote, blockPart}, - []bh.PeerBehavior{blockPart, consensusVote}}, - } - unequals = []struct { - left []bh.PeerBehavior - right []bh.PeerBehavior - }{ - // Comparing empty sets to non empty sets - {[]bh.PeerBehavior{}, []bh.PeerBehavior{consensusVote}}, - // Different behaviors - {[]bh.PeerBehavior{consensusVote}, []bh.PeerBehavior{blockPart}}, - // Same behavior with different frequencies - {[]bh.PeerBehavior{consensusVote}, - []bh.PeerBehavior{consensusVote, consensusVote}}, - } - ) - - for _, test := range equals { - if !equalBehaviors(test.left, test.right) { - t.Errorf("expected %#v and %#v to be equal", test.left, test.right) - } - } - - for _, test := range unequals { - if equalBehaviors(test.left, test.right) { - t.Errorf("expected %#v and %#v to be unequal", test.left, test.right) - } - } -} - -// TestPeerBehaviorConcurrency constructs a scenario in which -// multiple goroutines are using the same MockReporter instance. -// This test reproduces the conditions in which MockReporter will -// be used within a Reactor `Receive` method tests to ensure thread safety. -func TestMockPeerBehaviorReporterConcurrency(t *testing.T) { - var ( - behaviorScript = []struct { - peerID types.NodeID - behaviors []bh.PeerBehavior - }{ - {"1", []bh.PeerBehavior{bh.ConsensusVote("1", "")}}, - {"2", []bh.PeerBehavior{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}}, - { - "3", - []bh.PeerBehavior{bh.BlockPart("3", ""), - bh.ConsensusVote("3", ""), - bh.BlockPart("3", ""), - bh.ConsensusVote("3", "")}}, - { - "4", - []bh.PeerBehavior{bh.ConsensusVote("4", ""), - bh.ConsensusVote("4", ""), - bh.ConsensusVote("4", ""), - bh.ConsensusVote("4", "")}}, - { - "5", - []bh.PeerBehavior{bh.BlockPart("5", ""), - bh.ConsensusVote("5", ""), - bh.BlockPart("5", ""), - bh.ConsensusVote("5", "")}}, - } - ) - - var receiveWg sync.WaitGroup - pr := bh.NewMockReporter() - scriptItems := make(chan scriptItem) - done := make(chan int) - numConsumers := 3 - for i := 0; i < numConsumers; i++ { - receiveWg.Add(1) - go func() { - defer receiveWg.Done() - for { - select { - case pb := <-scriptItems: - if err := pr.Report(pb.behavior); err != nil { - t.Error(err) - } - case <-done: - return - } - } - }() - } - - var sendingWg sync.WaitGroup - sendingWg.Add(1) - go func() { - defer sendingWg.Done() - for _, item := range behaviorScript { - for _, reason := range item.behaviors { - scriptItems <- scriptItem{item.peerID, reason} - } - } - }() - - sendingWg.Wait() - - for i := 0; i < numConsumers; i++ { - done <- 1 - } - - receiveWg.Wait() - - for _, items := range behaviorScript { - reported := pr.GetBehaviors(items.peerID) - if !equalBehaviors(reported, items.behaviors) { - t.Errorf("expected peer %s to have behaved \nExpected: %#v \nGot %#v \n", - items.peerID, items.behaviors, reported) - } - } -} diff --git a/internal/blocksync/v2/io.go b/internal/blocksync/v2/io.go deleted file mode 100644 index b272db3ae5..0000000000 --- a/internal/blocksync/v2/io.go +++ /dev/null @@ -1,187 +0,0 @@ -package v2 - -import ( - "errors" - - "github.com/gogo/protobuf/proto" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/state" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/types" -) - -var ( - errPeerQueueFull = errors.New("peer queue full") -) - -type iIO interface { - sendBlockRequest(peer p2p.Peer, height int64) error - sendBlockToPeer(block *types.Block, peer p2p.Peer) error - sendBlockNotFound(height int64, peer p2p.Peer) error - sendStatusResponse(base, height int64, peer p2p.Peer) error - - sendStatusRequest(peer p2p.Peer) error - broadcastStatusRequest() error - - trySwitchToConsensus(state state.State, skipWAL bool) bool -} - -type switchIO struct { - sw *p2p.Switch -} - -func newSwitchIo(sw *p2p.Switch) *switchIO { - return &switchIO{ - sw: sw, - } -} - -const ( - // BlockchainChannel is a channel for blocks and status updates (`BlockStore` height) - BlockchainChannel = byte(0x40) -) - -type consensusReactor interface { - // for when we switch from blockchain reactor and block sync to - // the consensus machine - SwitchToConsensus(state state.State, skipWAL bool) -} - -func (sio *switchIO) sendBlockRequest(peer p2p.Peer, height int64) error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_BlockRequest{ - BlockRequest: &bcproto.BlockRequest{ - Height: height, - }, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - queued := peer.TrySend(BlockchainChannel, msgBytes) - if !queued { - return errPeerQueueFull - } - return nil -} - -func (sio *switchIO) sendStatusResponse(base int64, height int64, peer p2p.Peer) error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_StatusResponse{ - StatusResponse: &bcproto.StatusResponse{ - Height: height, - Base: base, - }, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) sendBlockToPeer(block *types.Block, peer p2p.Peer) error { - if block == nil { - panic("trying to send nil block") - } - - bpb, err := block.ToProto() - if err != nil { - return err - } - - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_BlockResponse{ - BlockResponse: &bcproto.BlockResponse{ - Block: bpb, - }, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) sendBlockNotFound(height int64, peer p2p.Peer) error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_NoBlockResponse{ - NoBlockResponse: &bcproto.NoBlockResponse{ - Height: height, - }, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool { - conR, ok := sio.sw.Reactor("CONSENSUS").(consensusReactor) - if ok { - conR.SwitchToConsensus(state, skipWAL) - } - return ok -} - -func (sio *switchIO) sendStatusRequest(peer p2p.Peer) error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_StatusRequest{ - StatusRequest: &bcproto.StatusRequest{}, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) broadcastStatusRequest() error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_StatusRequest{ - StatusRequest: &bcproto.StatusRequest{}, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - // XXX: maybe we should use an io specific peer list here - sio.sw.Broadcast(BlockchainChannel, msgBytes) - - return nil -} diff --git a/internal/blocksync/v2/metrics.go b/internal/blocksync/v2/metrics.go deleted file mode 100644 index c68ec64476..0000000000 --- a/internal/blocksync/v2/metrics.go +++ /dev/null @@ -1,125 +0,0 @@ -package v2 - -import ( - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" -) - -const ( - // MetricsSubsystem is a subsystem shared by all metrics exposed by this - // package. - MetricsSubsystem = "blockchain" -) - -// Metrics contains metrics exposed by this package. -type Metrics struct { - // events_in - EventsIn metrics.Counter - // events_in - EventsHandled metrics.Counter - // events_out - EventsOut metrics.Counter - // errors_in - ErrorsIn metrics.Counter - // errors_handled - ErrorsHandled metrics.Counter - // errors_out - ErrorsOut metrics.Counter - // events_shed - EventsShed metrics.Counter - // events_sent - EventsSent metrics.Counter - // errors_sent - ErrorsSent metrics.Counter - // errors_shed - ErrorsShed metrics.Counter -} - -// PrometheusMetrics returns metrics for in and out events, errors, etc. handled by routines. -// Can we burn in the routine name here? -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - EventsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_in", - Help: "Events read from the channel.", - }, labels).With(labelsAndValues...), - EventsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_handled", - Help: "Events handled", - }, labels).With(labelsAndValues...), - EventsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_out", - Help: "Events output from routine.", - }, labels).With(labelsAndValues...), - ErrorsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_in", - Help: "Errors read from the channel.", - }, labels).With(labelsAndValues...), - ErrorsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_handled", - Help: "Errors handled.", - }, labels).With(labelsAndValues...), - ErrorsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_out", - Help: "Errors output from routine.", - }, labels).With(labelsAndValues...), - ErrorsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_sent", - Help: "Errors sent to routine.", - }, labels).With(labelsAndValues...), - ErrorsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_shed", - Help: "Errors dropped from sending.", - }, labels).With(labelsAndValues...), - EventsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_sent", - Help: "Events sent to routine.", - }, labels).With(labelsAndValues...), - EventsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_shed", - Help: "Events dropped from sending.", - }, labels).With(labelsAndValues...), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - EventsIn: discard.NewCounter(), - EventsHandled: discard.NewCounter(), - EventsOut: discard.NewCounter(), - ErrorsIn: discard.NewCounter(), - ErrorsHandled: discard.NewCounter(), - ErrorsOut: discard.NewCounter(), - EventsShed: discard.NewCounter(), - EventsSent: discard.NewCounter(), - ErrorsSent: discard.NewCounter(), - ErrorsShed: discard.NewCounter(), - } -} diff --git a/internal/blocksync/v2/processor.go b/internal/blocksync/v2/processor.go deleted file mode 100644 index 39a06afd17..0000000000 --- a/internal/blocksync/v2/processor.go +++ /dev/null @@ -1,194 +0,0 @@ -package v2 - -import ( - "fmt" - - tmstate "github.com/tendermint/tendermint/internal/state" - "github.com/tendermint/tendermint/types" -) - -// Events generated by the processor: -// block execution failure, event will indicate the peer(s) that caused the error -type pcBlockVerificationFailure struct { - priorityNormal - height int64 - firstPeerID types.NodeID - secondPeerID types.NodeID -} - -func (e pcBlockVerificationFailure) String() string { - return fmt.Sprintf("pcBlockVerificationFailure{%d 1st peer: %v, 2nd peer: %v}", - e.height, e.firstPeerID, e.secondPeerID) -} - -// successful block execution -type pcBlockProcessed struct { - priorityNormal - height int64 - peerID types.NodeID -} - -func (e pcBlockProcessed) String() string { - return fmt.Sprintf("pcBlockProcessed{%d peer: %v}", e.height, e.peerID) -} - -// processor has finished -type pcFinished struct { - priorityNormal - blocksSynced int - tmState tmstate.State -} - -func (p pcFinished) Error() string { - return "finished" -} - -type queueItem struct { - block *types.Block - peerID types.NodeID -} - -type blockQueue map[int64]queueItem - -type pcState struct { - // blocks waiting to be processed - queue blockQueue - - // draining indicates that the next rProcessBlock event with a queue miss constitutes completion - draining bool - - // the number of blocks successfully synced by the processor - blocksSynced int - - // the processorContext which contains the processor dependencies - context processorContext -} - -func (state *pcState) String() string { - return fmt.Sprintf("height: %d queue length: %d draining: %v blocks synced: %d", - state.height(), len(state.queue), state.draining, state.blocksSynced) -} - -// newPcState returns a pcState initialized with the last verified block enqueued -func newPcState(context processorContext) *pcState { - return &pcState{ - queue: blockQueue{}, - draining: false, - blocksSynced: 0, - context: context, - } -} - -// nextTwo returns the next two unverified blocks -func (state *pcState) nextTwo() (queueItem, queueItem, error) { - if first, ok := state.queue[state.height()+1]; ok { - if second, ok := state.queue[state.height()+2]; ok { - return first, second, nil - } - } - return queueItem{}, queueItem{}, fmt.Errorf("not found") -} - -// synced returns true when at most the last verified block remains in the queue -func (state *pcState) synced() bool { - return len(state.queue) <= 1 -} - -func (state *pcState) enqueue(peerID types.NodeID, block *types.Block, height int64) { - if item, ok := state.queue[height]; ok { - panic(fmt.Sprintf( - "duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)", - height, block.Hash(), peerID, item.block.Hash(), item.peerID)) - } - - state.queue[height] = queueItem{block: block, peerID: peerID} -} - -func (state *pcState) height() int64 { - return state.context.tmState().LastBlockHeight -} - -// purgePeer moves all unprocessed blocks from the queue -func (state *pcState) purgePeer(peerID types.NodeID) { - // what if height is less than state.height? - for height, item := range state.queue { - if item.peerID == peerID { - delete(state.queue, height) - } - } -} - -// handle processes FSM events -func (state *pcState) handle(event Event) (Event, error) { - switch event := event.(type) { - case bcResetState: - state.context.setState(event.state) - return noOp, nil - - case scFinishedEv: - if state.synced() { - return pcFinished{tmState: state.context.tmState(), blocksSynced: state.blocksSynced}, nil - } - state.draining = true - return noOp, nil - - case scPeerError: - state.purgePeer(event.peerID) - return noOp, nil - - case scBlockReceived: - if event.block == nil { - return noOp, nil - } - - // enqueue block if height is higher than state height, else ignore it - if event.block.Height > state.height() { - state.enqueue(event.peerID, event.block, event.block.Height) - } - return noOp, nil - - case rProcessBlock: - tmstate := state.context.tmState() - firstItem, secondItem, err := state.nextTwo() - if err != nil { - if state.draining { - return pcFinished{tmState: tmstate, blocksSynced: state.blocksSynced}, nil - } - return noOp, nil - } - - var ( - first, second = firstItem.block, secondItem.block - firstParts = first.MakePartSet(types.BlockPartSizeBytes) - firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()} - firstStateID = types.StateID{Height: first.Height - 1, LastAppHash: first.AppHash} - ) - - // verify if +second+ last commit "confirms" +first+ block - err = state.context.verifyCommit(tmstate.ChainID, firstID, firstStateID, first.Height, second.LastCommit) - if err != nil { - state.purgePeer(firstItem.peerID) - if firstItem.peerID != secondItem.peerID { - state.purgePeer(secondItem.peerID) - } - return pcBlockVerificationFailure{ - height: first.Height, firstPeerID: firstItem.peerID, secondPeerID: secondItem.peerID}, - nil - } - - state.context.saveBlock(first, firstParts, second.LastCommit) - - if err := state.context.applyBlock(firstID, first); err != nil { - panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) - } - - state.context.recordConsMetrics(first) - - delete(state.queue, first.Height) - state.blocksSynced++ - - return pcBlockProcessed{height: first.Height, peerID: firstItem.peerID}, nil - } - - return noOp, nil -} diff --git a/internal/blocksync/v2/processor_context.go b/internal/blocksync/v2/processor_context.go deleted file mode 100644 index d6f6e58d01..0000000000 --- a/internal/blocksync/v2/processor_context.go +++ /dev/null @@ -1,117 +0,0 @@ -package v2 - -import ( - "fmt" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/consensus" - "github.com/tendermint/tendermint/internal/state" - "github.com/tendermint/tendermint/types" -) - -type processorContext interface { - applyBlock(blockID types.BlockID, block *types.Block) error - verifyCommit(chainID string, blockID types.BlockID, stateID types.StateID, height int64, commit *types.Commit) error - saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) - tmState() state.State - setState(state.State) - recordConsMetrics(block *types.Block) -} - -type pContext struct { - store blockStore - nodeProTxHash crypto.ProTxHash - applier blockApplier - state state.State - metrics *consensus.Metrics -} - -func newProcessorContext(st blockStore, nodeProTxHash crypto.ProTxHash, ex blockApplier, s state.State, m *consensus.Metrics) *pContext { - return &pContext{ - store: st, - nodeProTxHash: nodeProTxHash, - applier: ex, - state: s, - metrics: m, - } -} - -func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error { - newState, err := pc.applier.ApplyBlock(pc.state, pc.nodeProTxHash, blockID, block) - pc.state = newState - return err -} - -func (pc pContext) tmState() state.State { - return pc.state -} - -func (pc *pContext) setState(state state.State) { - pc.state = state -} - -func (pc pContext) verifyCommit(chainID string, blockID types.BlockID, stateID types.StateID, - height int64, commit *types.Commit) error { - return pc.state.Validators.VerifyCommit(chainID, blockID, stateID, height, commit) -} - -func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { - pc.store.SaveBlock(block, blockParts, seenCommit) -} - -func (pc *pContext) recordConsMetrics(block *types.Block) { - pc.metrics.RecordConsMetrics(block) -} - -type mockPContext struct { - applicationBL []int64 - verificationBL []int64 - state state.State -} - -func newMockProcessorContext( - state state.State, - verificationBlackList []int64, - applicationBlackList []int64) *mockPContext { - return &mockPContext{ - applicationBL: applicationBlackList, - verificationBL: verificationBlackList, - state: state, - } -} - -func (mpc *mockPContext) applyBlock(blockID types.BlockID, block *types.Block) error { - for _, h := range mpc.applicationBL { - if h == block.Height { - return fmt.Errorf("generic application error") - } - } - mpc.state.LastBlockHeight = block.Height - return nil -} - -func (mpc *mockPContext) verifyCommit(chainID string, blockID types.BlockID, stateID types.StateID, - height int64, commit *types.Commit) error { - for _, h := range mpc.verificationBL { - if h == height { - return fmt.Errorf("generic verification error") - } - } - return nil -} - -func (mpc *mockPContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { - -} - -func (mpc *mockPContext) setState(state state.State) { - mpc.state = state -} - -func (mpc *mockPContext) tmState() state.State { - return mpc.state -} - -func (mpc *mockPContext) recordConsMetrics(block *types.Block) { - -} diff --git a/internal/blocksync/v2/processor_test.go b/internal/blocksync/v2/processor_test.go deleted file mode 100644 index 7c12b36108..0000000000 --- a/internal/blocksync/v2/processor_test.go +++ /dev/null @@ -1,305 +0,0 @@ -package v2 - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - tmstate "github.com/tendermint/tendermint/internal/state" - "github.com/tendermint/tendermint/types" -) - -// pcBlock is a test helper structure with simple types. Its purpose is to help with test readability. -type pcBlock struct { - pid string - height int64 -} - -// params is a test structure used to create processor state. -type params struct { - height int64 - items []pcBlock - blocksSynced int - verBL []int64 - appBL []int64 - draining bool -} - -// makePcBlock makes an empty block. -func makePcBlock(height int64) *types.Block { - return &types.Block{Header: types.Header{Height: height}} -} - -// makeState takes test parameters and creates a specific processor state. -func makeState(p *params) *pcState { - var ( - tmState = tmstate.State{LastBlockHeight: p.height} - context = newMockProcessorContext(tmState, p.verBL, p.appBL) - ) - state := newPcState(context) - - for _, item := range p.items { - state.enqueue(types.NodeID(item.pid), makePcBlock(item.height), item.height) - } - - state.blocksSynced = p.blocksSynced - state.draining = p.draining - return state -} - -func mBlockResponse(peerID types.NodeID, height int64) scBlockReceived { - return scBlockReceived{ - peerID: peerID, - block: makePcBlock(height), - } -} - -type pcFsmMakeStateValues struct { - currentState *params - event Event - wantState *params - wantNextEvent Event - wantErr error - wantPanic bool -} - -type testFields struct { - name string - steps []pcFsmMakeStateValues -} - -func executeProcessorTests(t *testing.T, tests []testFields) { - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - var state *pcState - for _, step := range tt.steps { - defer func() { - r := recover() - if (r != nil) != step.wantPanic { - t.Errorf("recover = %v, wantPanic = %v", r, step.wantPanic) - } - }() - - // First step must always initialize the currentState as state. - if step.currentState != nil { - state = makeState(step.currentState) - } - if state == nil { - panic("Bad (initial?) step") - } - - nextEvent, err := state.handle(step.event) - t.Log(state) - assert.Equal(t, step.wantErr, err) - assert.Equal(t, makeState(step.wantState), state) - assert.Equal(t, step.wantNextEvent, nextEvent) - // Next step may use the wantedState as their currentState. - state = makeState(step.wantState) - } - }) - } -} - -func TestRProcessPeerError(t *testing.T) { - tests := []testFields{ - { - name: "error for existing peer", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, - event: scPeerError{peerID: "P2"}, - wantState: ¶ms{items: []pcBlock{{"P1", 1}}}, - wantNextEvent: noOp, - }, - }, - }, - { - name: "error for unknown peer", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, - event: scPeerError{peerID: "P3"}, - wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, - wantNextEvent: noOp, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestPcBlockResponse(t *testing.T) { - tests := []testFields{ - { - name: "add one block", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{}, event: mBlockResponse("P1", 1), - wantState: ¶ms{items: []pcBlock{{"P1", 1}}}, wantNextEvent: noOp, - }, - }, - }, - - { - name: "add two blocks", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{}, event: mBlockResponse("P1", 3), - wantState: ¶ms{items: []pcBlock{{"P1", 3}}}, wantNextEvent: noOp, - }, - { // use previous wantState as currentState, - event: mBlockResponse("P1", 4), - wantState: ¶ms{items: []pcBlock{{"P1", 3}, {"P1", 4}}}, wantNextEvent: noOp, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestRProcessBlockSuccess(t *testing.T) { - tests := []testFields{ - { - name: "noop - no blocks over current height", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{}, event: rProcessBlock{}, - wantState: ¶ms{}, wantNextEvent: noOp, - }, - }, - }, - { - name: "noop - high new blocks", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, event: rProcessBlock{}, - wantState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, wantNextEvent: noOp, - }, - }, - }, - { - name: "blocks H+1 and H+2 present", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, event: rProcessBlock{}, - wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}}, blocksSynced: 1}, - wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"}, - }, - }, - }, - { - name: "blocks H+1 and H+2 present after draining", - steps: []pcFsmMakeStateValues{ - { // some contiguous blocks - on stop check draining is set - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}}, - event: scFinishedEv{}, - wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}, draining: true}, - wantNextEvent: noOp, - }, - { - event: rProcessBlock{}, - wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true}, - wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"}, - }, - { // finish when H+1 or/and H+2 are missing - event: rProcessBlock{}, - wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true}, - wantNextEvent: pcFinished{tmState: tmstate.State{LastBlockHeight: 1}, blocksSynced: 1}, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestRProcessBlockFailures(t *testing.T) { - tests := []testFields{ - { - name: "blocks H+1 and H+2 present from different peers - H+1 verification fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, verBL: []int64{1}}, event: rProcessBlock{}, - wantState: ¶ms{items: []pcBlock{}, verBL: []int64{1}}, - wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P2"}, - }, - }, - }, - { - name: "blocks H+1 and H+2 present from same peer - H+1 applyBlock fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, appBL: []int64{1}}, event: rProcessBlock{}, - wantState: ¶ms{items: []pcBlock{}, appBL: []int64{1}}, wantPanic: true, - }, - }, - }, - { - name: "blocks H+1 and H+2 present from same peers - H+1 verification fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 0, items: []pcBlock{{"P1", 1}, {"P1", 2}, {"P2", 3}}, - verBL: []int64{1}}, event: rProcessBlock{}, - wantState: ¶ms{height: 0, items: []pcBlock{{"P2", 3}}, verBL: []int64{1}}, - wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}, - }, - }, - }, - { - name: "blocks H+1 and H+2 present from different peers - H+1 applyBlock fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P2", 3}}, appBL: []int64{1}}, - event: rProcessBlock{}, - wantState: ¶ms{items: []pcBlock{{"P2", 3}}, appBL: []int64{1}}, wantPanic: true, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestScFinishedEv(t *testing.T) { - tests := []testFields{ - { - name: "no blocks", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, event: scFinishedEv{}, - wantState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, - wantNextEvent: pcFinished{tmState: tmstate.State{LastBlockHeight: 100}, blocksSynced: 100}, - }, - }, - }, - { - name: "maxHeight+1 block present", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 100, items: []pcBlock{ - {"P1", 101}}, blocksSynced: 100}, event: scFinishedEv{}, - wantState: ¶ms{height: 100, items: []pcBlock{{"P1", 101}}, blocksSynced: 100}, - wantNextEvent: pcFinished{tmState: tmstate.State{LastBlockHeight: 100}, blocksSynced: 100}, - }, - }, - }, - { - name: "more blocks present", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 100, items: []pcBlock{ - {"P1", 101}, {"P1", 102}}, blocksSynced: 100}, event: scFinishedEv{}, - wantState: ¶ms{height: 100, items: []pcBlock{ - {"P1", 101}, {"P1", 102}}, blocksSynced: 100, draining: true}, - wantNextEvent: noOp, - wantErr: nil, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} diff --git a/internal/blocksync/v2/reactor.go b/internal/blocksync/v2/reactor.go deleted file mode 100644 index bc2a317952..0000000000 --- a/internal/blocksync/v2/reactor.go +++ /dev/null @@ -1,650 +0,0 @@ -package v2 - -import ( - "errors" - "fmt" - "time" - - "github.com/gogo/protobuf/proto" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/blocksync" - "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" - "github.com/tendermint/tendermint/internal/consensus" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/state" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/sync" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/types" -) - -const ( - // chBufferSize is the buffer size of all event channels. - chBufferSize int = 1000 -) - -type blockStore interface { - LoadBlock(height int64) *types.Block - SaveBlock(*types.Block, *types.PartSet, *types.Commit) - Base() int64 - Height() int64 -} - -// BlockchainReactor handles block sync protocol. -type BlockchainReactor struct { - p2p.BaseReactor - - blockSync *sync.AtomicBool // enable block sync on start when it's been Set - stateSynced bool // set to true when SwitchToBlockSync is called by state sync - scheduler *Routine - processor *Routine - logger log.Logger - - mtx tmsync.RWMutex - maxPeerHeight int64 - syncHeight int64 - events chan Event // non-nil during a block sync - - reporter behavior.Reporter - io iIO - store blockStore - - syncStartTime time.Time - syncStartHeight int64 - lastSyncRate float64 // # blocks sync per sec base on the last 100 blocks -} - -type blockApplier interface { - ApplyBlock( - state state.State, - nodeProTxHash crypto.ProTxHash, - blockID types.BlockID, - block *types.Block, - ) (state.State, error) -} - -// XXX: unify naming in this package around tmState -func newReactor(state state.State, nodeProTxHash crypto.ProTxHash, store blockStore, reporter behavior.Reporter, - blockApplier blockApplier, blockSync bool, metrics *consensus.Metrics) *BlockchainReactor { - initHeight := state.LastBlockHeight + 1 - if initHeight == 1 { - initHeight = state.InitialHeight - } - scheduler := newScheduler(initHeight, time.Now()) - pContext := newProcessorContext(store, nodeProTxHash, blockApplier, state, metrics) - // TODO: Fix naming to just newProcesssor - // newPcState requires a processorContext - processor := newPcState(pContext) - - return &BlockchainReactor{ - scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize), - processor: newRoutine("processor", processor.handle, chBufferSize), - store: store, - reporter: reporter, - logger: log.NewNopLogger(), - blockSync: sync.NewBool(blockSync), - syncStartHeight: initHeight, - syncStartTime: time.Time{}, - lastSyncRate: 0, - } -} - -// NewBlockchainReactor creates a new reactor instance. -func NewBlockchainReactor( - state state.State, - blockApplier blockApplier, - store blockStore, - nodeProTxHash crypto.ProTxHash, - blockSync bool, - metrics *consensus.Metrics) *BlockchainReactor { - reporter := behavior.NewMockReporter() - return newReactor(state, nodeProTxHash, store, reporter, blockApplier, blockSync, metrics) -} - -// SetSwitch implements Reactor interface. -func (r *BlockchainReactor) SetSwitch(sw *p2p.Switch) { - r.Switch = sw - if sw != nil { - r.io = newSwitchIo(sw) - } else { - r.io = nil - } -} - -func (r *BlockchainReactor) setMaxPeerHeight(height int64) { - r.mtx.Lock() - defer r.mtx.Unlock() - if height > r.maxPeerHeight { - r.maxPeerHeight = height - } -} - -func (r *BlockchainReactor) setSyncHeight(height int64) { - r.mtx.Lock() - defer r.mtx.Unlock() - r.syncHeight = height -} - -// SyncHeight returns the height to which the BlockchainReactor has synced. -func (r *BlockchainReactor) SyncHeight() int64 { - r.mtx.RLock() - defer r.mtx.RUnlock() - return r.syncHeight -} - -// SetLogger sets the logger of the reactor. -func (r *BlockchainReactor) SetLogger(logger log.Logger) { - r.logger = logger - r.scheduler.setLogger(logger) - r.processor.setLogger(logger) -} - -// Start implements cmn.Service interface -func (r *BlockchainReactor) Start() error { - r.reporter = behavior.NewSwitchReporter(r.BaseReactor.Switch) - if r.blockSync.IsSet() { - err := r.startSync(nil) - if err != nil { - return fmt.Errorf("failed to start block sync: %w", err) - } - } - return nil -} - -// startSync begins a block sync, signaled by r.events being non-nil. If state is non-nil, -// the scheduler and processor is updated with this state on startup. -func (r *BlockchainReactor) startSync(state *state.State) error { - r.mtx.Lock() - defer r.mtx.Unlock() - if r.events != nil { - return errors.New("block sync already in progress") - } - r.events = make(chan Event, chBufferSize) - go r.scheduler.start() - go r.processor.start() - if state != nil { - <-r.scheduler.ready() - <-r.processor.ready() - r.scheduler.send(bcResetState{state: *state}) - r.processor.send(bcResetState{state: *state}) - } - go r.demux(r.events) - return nil -} - -// endSync ends a block sync -func (r *BlockchainReactor) endSync() { - r.mtx.Lock() - defer r.mtx.Unlock() - if r.events != nil { - close(r.events) - } - r.events = nil - r.scheduler.stop() - r.processor.stop() -} - -// SwitchToBlockSync is called by the state sync reactor when switching to block sync. -func (r *BlockchainReactor) SwitchToBlockSync(state state.State) error { - r.stateSynced = true - state = state.Copy() - - err := r.startSync(&state) - if err == nil { - r.syncStartTime = time.Now() - } - - return err -} - -// reactor generated ticker events: -// ticker for cleaning peers -type rTryPrunePeer struct { - priorityHigh - time time.Time -} - -func (e rTryPrunePeer) String() string { - return fmt.Sprintf("rTryPrunePeer{%v}", e.time) -} - -// ticker event for scheduling block requests -type rTrySchedule struct { - priorityHigh - time time.Time -} - -func (e rTrySchedule) String() string { - return fmt.Sprintf("rTrySchedule{%v}", e.time) -} - -// ticker for block processing -type rProcessBlock struct { - priorityNormal -} - -func (e rProcessBlock) String() string { - return "rProcessBlock" -} - -// reactor generated events based on blockchain related messages from peers: -// blockResponse message received from a peer -type bcBlockResponse struct { - priorityNormal - time time.Time - peerID types.NodeID - size int64 - block *types.Block -} - -func (resp bcBlockResponse) String() string { - return fmt.Sprintf("bcBlockResponse{%d#%X (size: %d bytes) from %v at %v}", - resp.block.Height, resp.block.Hash(), resp.size, resp.peerID, resp.time) -} - -// blockNoResponse message received from a peer -type bcNoBlockResponse struct { - priorityNormal - time time.Time - peerID types.NodeID - height int64 -} - -func (resp bcNoBlockResponse) String() string { - return fmt.Sprintf("bcNoBlockResponse{%v has no block at height %d at %v}", - resp.peerID, resp.height, resp.time) -} - -// statusResponse message received from a peer -type bcStatusResponse struct { - priorityNormal - time time.Time - peerID types.NodeID - base int64 - height int64 -} - -func (resp bcStatusResponse) String() string { - return fmt.Sprintf("bcStatusResponse{%v is at height %d (base: %d) at %v}", - resp.peerID, resp.height, resp.base, resp.time) -} - -// new peer is connected -type bcAddNewPeer struct { - priorityNormal - peerID types.NodeID -} - -func (resp bcAddNewPeer) String() string { - return fmt.Sprintf("bcAddNewPeer{%v}", resp.peerID) -} - -// existing peer is removed -type bcRemovePeer struct { - priorityHigh - peerID types.NodeID - reason interface{} -} - -func (resp bcRemovePeer) String() string { - return fmt.Sprintf("bcRemovePeer{%v due to %v}", resp.peerID, resp.reason) -} - -// resets the scheduler and processor state, e.g. following a switch from state syncing -type bcResetState struct { - priorityHigh - state state.State -} - -func (e bcResetState) String() string { - return fmt.Sprintf("bcResetState{%v}", e.state) -} - -// Takes the channel as a parameter to avoid race conditions on r.events. -func (r *BlockchainReactor) demux(events <-chan Event) { - var lastHundred = time.Now() - - var ( - processBlockFreq = 20 * time.Millisecond - doProcessBlockCh = make(chan struct{}, 1) - doProcessBlockTk = time.NewTicker(processBlockFreq) - ) - defer doProcessBlockTk.Stop() - - var ( - prunePeerFreq = 1 * time.Second - doPrunePeerCh = make(chan struct{}, 1) - doPrunePeerTk = time.NewTicker(prunePeerFreq) - ) - defer doPrunePeerTk.Stop() - - var ( - scheduleFreq = 20 * time.Millisecond - doScheduleCh = make(chan struct{}, 1) - doScheduleTk = time.NewTicker(scheduleFreq) - ) - defer doScheduleTk.Stop() - - var ( - statusFreq = 10 * time.Second - doStatusCh = make(chan struct{}, 1) - doStatusTk = time.NewTicker(statusFreq) - ) - defer doStatusTk.Stop() - doStatusCh <- struct{}{} // immediately broadcast to get status of existing peers - - // Memoize the scSchedulerFail error to avoid printing it every scheduleFreq. - var scSchedulerFailErr error - - // XXX: Extract timers to make testing atemporal - for { - select { - // Pacers: send at most per frequency but don't saturate - case <-doProcessBlockTk.C: - select { - case doProcessBlockCh <- struct{}{}: - default: - } - case <-doPrunePeerTk.C: - select { - case doPrunePeerCh <- struct{}{}: - default: - } - case <-doScheduleTk.C: - select { - case doScheduleCh <- struct{}{}: - default: - } - case <-doStatusTk.C: - select { - case doStatusCh <- struct{}{}: - default: - } - - // Tickers: perform tasks periodically - case <-doScheduleCh: - r.scheduler.send(rTrySchedule{time: time.Now()}) - case <-doPrunePeerCh: - r.scheduler.send(rTryPrunePeer{time: time.Now()}) - case <-doProcessBlockCh: - r.processor.send(rProcessBlock{}) - case <-doStatusCh: - if err := r.io.broadcastStatusRequest(); err != nil { - r.logger.Error("Error broadcasting status request", "err", err) - } - - // Events from peers. Closing the channel signals event loop termination. - case event, ok := <-events: - if !ok { - r.logger.Info("Stopping event processing") - return - } - switch event := event.(type) { - case bcStatusResponse: - r.setMaxPeerHeight(event.height) - r.scheduler.send(event) - case bcAddNewPeer, bcRemovePeer, bcBlockResponse, bcNoBlockResponse: - r.scheduler.send(event) - default: - r.logger.Error("Received unexpected event", "event", fmt.Sprintf("%T", event)) - } - - // Incremental events from scheduler - case event := <-r.scheduler.next(): - switch event := event.(type) { - case scBlockReceived: - r.processor.send(event) - case scPeerError: - r.processor.send(event) - if err := r.reporter.Report(behavior.BadMessage(event.peerID, "scPeerError")); err != nil { - r.logger.Error("Error reporting peer", "err", err) - } - case scBlockRequest: - peer := r.Switch.Peers().Get(event.peerID) - if peer == nil { - r.logger.Error("Wanted to send block request, but no such peer", "peerID", event.peerID) - continue - } - if err := r.io.sendBlockRequest(peer, event.height); err != nil { - r.logger.Error("Error sending block request", "err", err) - } - case scFinishedEv: - r.processor.send(event) - r.scheduler.stop() - case scSchedulerFail: - if scSchedulerFailErr != event.reason { - r.logger.Error("Scheduler failure", "err", event.reason.Error()) - scSchedulerFailErr = event.reason - } - case scPeersPruned: - // Remove peers from the processor. - for _, peerID := range event.peers { - r.processor.send(scPeerError{peerID: peerID, reason: errors.New("peer was pruned")}) - } - r.logger.Debug("Pruned peers", "count", len(event.peers)) - case noOpEvent: - default: - r.logger.Error("Received unexpected scheduler event", "event", fmt.Sprintf("%T", event)) - } - - // Incremental events from processor - case event := <-r.processor.next(): - switch event := event.(type) { - case pcBlockProcessed: - r.setSyncHeight(event.height) - if (r.syncHeight-r.syncStartHeight)%100 == 0 { - newSyncRate := 100 / time.Since(lastHundred).Seconds() - if r.lastSyncRate == 0 { - r.lastSyncRate = newSyncRate - } else { - r.lastSyncRate = 0.9*r.lastSyncRate + 0.1*newSyncRate - } - r.logger.Info("block sync Rate", "height", r.syncHeight, - "max_peer_height", r.maxPeerHeight, "blocks/s", r.lastSyncRate) - lastHundred = time.Now() - } - r.scheduler.send(event) - case pcBlockVerificationFailure: - r.scheduler.send(event) - case pcFinished: - r.logger.Info("block sync complete, switching to consensus") - if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) { - r.logger.Error("Failed to switch to consensus reactor") - } - r.endSync() - r.blockSync.UnSet() - return - case noOpEvent: - default: - r.logger.Error("Received unexpected processor event", "event", fmt.Sprintf("%T", event)) - } - - // Terminal event from scheduler - case err := <-r.scheduler.final(): - switch err { - case nil: - r.logger.Info("Scheduler stopped") - default: - r.logger.Error("Scheduler aborted with error", "err", err) - } - - // Terminal event from processor - case err := <-r.processor.final(): - switch err { - case nil: - r.logger.Info("Processor stopped") - default: - r.logger.Error("Processor aborted with error", "err", err) - } - } - } -} - -// Stop implements cmn.Service interface. -func (r *BlockchainReactor) Stop() error { - r.logger.Info("reactor stopping") - r.endSync() - r.logger.Info("reactor stopped") - return nil -} - -// Receive implements Reactor by handling different message types. -// XXX: do not call any methods that can block or incur heavy processing. -// https://github.com/tendermint/tendermint/issues/2888 -func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - logger := r.logger.With("src", src.ID(), "chID", chID) - - msgProto := new(bcproto.Message) - - if err := proto.Unmarshal(msgBytes, msgProto); err != nil { - logger.Error("error decoding message", "err", err) - _ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error())) - return - } - - if err := msgProto.Validate(); err != nil { - logger.Error("peer sent us an invalid msg", "msg", msgProto, "err", err) - _ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error())) - return - } - - // r.logger.Debug("received", "msg", msgProto) - - switch msg := msgProto.Sum.(type) { - case *bcproto.Message_StatusRequest: - if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src); err != nil { - logger.Error("Could not send status message to src peer") - } - - case *bcproto.Message_BlockRequest: - block := r.store.LoadBlock(msg.BlockRequest.Height) - if block != nil { - if err := r.io.sendBlockToPeer(block, src); err != nil { - logger.Error("Could not send block message to src peer", "err", err) - } - } else { - logger.Info("peer asking for a block we don't have", "height", msg.BlockRequest.Height) - if err := r.io.sendBlockNotFound(msg.BlockRequest.Height, src); err != nil { - logger.Error("Couldn't send block not found msg", "err", err) - } - } - - case *bcproto.Message_StatusResponse: - r.mtx.RLock() - if r.events != nil { - r.events <- bcStatusResponse{ - peerID: src.ID(), - base: msg.StatusResponse.Base, - height: msg.StatusResponse.Height, - } - } - r.mtx.RUnlock() - - case *bcproto.Message_BlockResponse: - bi, err := types.BlockFromProto(msg.BlockResponse.Block) - if err != nil { - logger.Error("error transitioning block from protobuf", "err", err) - _ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error())) - return - } - r.mtx.RLock() - if r.events != nil { - r.events <- bcBlockResponse{ - peerID: src.ID(), - block: bi, - size: int64(len(msgBytes)), - time: time.Now(), - } - } - r.mtx.RUnlock() - - case *bcproto.Message_NoBlockResponse: - r.mtx.RLock() - if r.events != nil { - r.events <- bcNoBlockResponse{ - peerID: src.ID(), - height: msg.NoBlockResponse.Height, - time: time.Now(), - } - } - r.mtx.RUnlock() - } -} - -// AddPeer implements Reactor interface -func (r *BlockchainReactor) AddPeer(peer p2p.Peer) { - err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer) - if err != nil { - r.logger.Error("could not send our status to the new peer", "peer", peer.ID, "err", err) - } - - err = r.io.sendStatusRequest(peer) - if err != nil { - r.logger.Error("could not send status request to the new peer", "peer", peer.ID, "err", err) - } - - r.mtx.RLock() - defer r.mtx.RUnlock() - if r.events != nil { - r.events <- bcAddNewPeer{peerID: peer.ID()} - } -} - -// RemovePeer implements Reactor interface. -func (r *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) { - r.mtx.RLock() - defer r.mtx.RUnlock() - if r.events != nil { - r.events <- bcRemovePeer{ - peerID: peer.ID(), - reason: reason, - } - } -} - -// GetChannels implements Reactor -func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { - return []*p2p.ChannelDescriptor{ - { - ID: BlockchainChannel, - Priority: 5, - SendQueueCapacity: 2000, - RecvBufferCapacity: 1024, - RecvMessageCapacity: blocksync.MaxMsgSize, - }, - } -} - -func (r *BlockchainReactor) GetMaxPeerBlockHeight() int64 { - r.mtx.RLock() - defer r.mtx.RUnlock() - return r.maxPeerHeight -} - -func (r *BlockchainReactor) GetTotalSyncedTime() time.Duration { - if !r.blockSync.IsSet() || r.syncStartTime.IsZero() { - return time.Duration(0) - } - return time.Since(r.syncStartTime) -} - -func (r *BlockchainReactor) GetRemainingSyncTime() time.Duration { - if !r.blockSync.IsSet() { - return time.Duration(0) - } - - r.mtx.RLock() - defer r.mtx.RUnlock() - - targetSyncs := r.maxPeerHeight - r.syncStartHeight - currentSyncs := r.syncHeight - r.syncStartHeight + 1 - if currentSyncs < 0 || r.lastSyncRate < 0.001 { - return time.Duration(0) - } - - remain := float64(targetSyncs-currentSyncs) / r.lastSyncRate - - return time.Duration(int64(remain * float64(time.Second))) -} diff --git a/internal/blocksync/v2/reactor_test.go b/internal/blocksync/v2/reactor_test.go deleted file mode 100644 index 5fe6d6d47f..0000000000 --- a/internal/blocksync/v2/reactor_test.go +++ /dev/null @@ -1,540 +0,0 @@ -package v2 - -import ( - "context" - "fmt" - "net" - "os" - "sync" - "testing" - - "github.com/gogo/protobuf/proto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - abciclient "github.com/tendermint/tendermint/abci/client" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" - "github.com/tendermint/tendermint/internal/consensus" - "github.com/tendermint/tendermint/internal/mempool/mock" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/internal/proxy" - sm "github.com/tendermint/tendermint/internal/state" - sf "github.com/tendermint/tendermint/internal/state/test/factory" - tmstore "github.com/tendermint/tendermint/internal/store" - "github.com/tendermint/tendermint/internal/test/factory" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" -) - -type mockPeer struct { - service.Service - id types.NodeID -} - -func (mp mockPeer) FlushStop() {} -func (mp mockPeer) ID() types.NodeID { return mp.id } -func (mp mockPeer) RemoteIP() net.IP { return net.IP{} } -func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} } - -func (mp mockPeer) IsOutbound() bool { return true } -func (mp mockPeer) IsPersistent() bool { return true } -func (mp mockPeer) CloseConn() error { return nil } - -func (mp mockPeer) NodeInfo() types.NodeInfo { - return types.NodeInfo{ - NodeID: "", - ListenAddr: "", - } -} -func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } -func (mp mockPeer) SocketAddr() *p2p.NetAddress { return &p2p.NetAddress{} } - -func (mp mockPeer) Send(byte, []byte) bool { return true } -func (mp mockPeer) TrySend(byte, []byte) bool { return true } - -func (mp mockPeer) Set(string, interface{}) {} -func (mp mockPeer) Get(string) interface{} { return struct{}{} } - -//nolint:unused -type mockBlockStore struct { - blocks map[int64]*types.Block -} - -//nolint:unused -func (ml *mockBlockStore) Height() int64 { - return int64(len(ml.blocks)) -} - -//nolint:unused -func (ml *mockBlockStore) LoadBlock(height int64) *types.Block { - return ml.blocks[height] -} - -//nolint:unused -func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) { - ml.blocks[block.Height] = block -} - -type mockBlockApplier struct { -} - -// XXX: Add whitelist/blacklist? -func (mba *mockBlockApplier) ApplyBlock( - state sm.State, nodeProTxHash crypto.ProTxHash, blockID types.BlockID, block *types.Block, -) (sm.State, error) { - state.LastBlockHeight++ - return state, nil -} - -type mockSwitchIo struct { - mtx sync.Mutex - switchedToConsensus bool - numStatusResponse int - numBlockResponse int - numNoBlockResponse int - numStatusRequest int -} - -var _ iIO = (*mockSwitchIo)(nil) - -func (sio *mockSwitchIo) sendBlockRequest(_ p2p.Peer, _ int64) error { - return nil -} - -func (sio *mockSwitchIo) sendStatusResponse(_, _ int64, _ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numStatusResponse++ - return nil -} - -func (sio *mockSwitchIo) sendBlockToPeer(_ *types.Block, _ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numBlockResponse++ - return nil -} - -func (sio *mockSwitchIo) sendBlockNotFound(_ int64, _ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numNoBlockResponse++ - return nil -} - -func (sio *mockSwitchIo) trySwitchToConsensus(_ sm.State, _ bool) bool { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.switchedToConsensus = true - return true -} - -func (sio *mockSwitchIo) broadcastStatusRequest() error { - return nil -} - -func (sio *mockSwitchIo) sendStatusRequest(_ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numStatusRequest++ - return nil -} - -type testReactorParams struct { - logger log.Logger - genDoc *types.GenesisDoc - privVals []types.PrivValidator - startHeight int64 - mockA bool -} - -func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor { - store, state, _ := newReactorStore(t, p.genDoc, p.privVals, p.startHeight) - reporter := behavior.NewMockReporter() - - var appl blockApplier - - if p.mockA { - appl = &mockBlockApplier{} - } else { - app := &testApp{} - cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() - require.NoError(t, err) - db := dbm.NewMemDB() - stateStore := sm.NewStore(db) - blockStore := tmstore.NewBlockStore(dbm.NewMemDB()) - appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), proxyApp.Query(), mock.Mempool{}, - sm.EmptyEvidencePool{}, blockStore, nil) - err = stateStore.Save(state) - require.NoError(t, err) - } - proTxHash := crypto.RandProTxHash() - r := newReactor(state, proTxHash, store, reporter, appl, true, consensus.NopMetrics()) - logger := log.TestingLogger() - r.SetLogger(logger.With("module", "blockchain")) - - return r -} - -// This test is left here and not deleted to retain the termination cases for -// future improvement in [#4482](https://github.com/tendermint/tendermint/issues/4482). -// func TestReactorTerminationScenarios(t *testing.T) { - -// config := cfg.ResetTestRoot("blockchain_reactor_v2_test") -// defer os.RemoveAll(config.RootDir) -// genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) -// refStore, _, _ := newReactorStore(genDoc, privVals, 20) - -// params := testReactorParams{ -// logger: log.TestingLogger(), -// genDoc: genDoc, -// privVals: privVals, -// startHeight: 10, -// bufferSize: 100, -// mockA: true, -// } - -// type testEvent struct { -// evType string -// peer string -// height int64 -// } - -// tests := []struct { -// name string -// params testReactorParams -// msgs []testEvent -// }{ -// { -// name: "simple termination on max peer height - one peer", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 11}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 12}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P1", height: 13}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "simple termination on max peer height - two peers", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 11}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "termination on max peer height - two peers, noBlock error", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveNB", peer: "P1", height: 11}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "ReceiveB", peer: "P2", height: 11}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "termination on max peer height - two peers, remove one peer", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "RemovePeer", peer: "P1"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "ReceiveB", peer: "P2", height: 11}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// } - -// for _, tt := range tests { -// tt := tt -// t.Run(tt.name, func(t *testing.T) { -// reactor := newTestReactor(params) -// reactor.Start() -// reactor.reporter = behavior.NewMockReporter() -// mockSwitch := &mockSwitchIo{switchedToConsensus: false} -// reactor.io = mockSwitch -// // time for go routines to start -// time.Sleep(time.Millisecond) - -// for _, step := range tt.msgs { -// switch step.evType { -// case "AddPeer": -// reactor.scheduler.send(bcAddNewPeer{peerID: types.NodeID(step.peer)}) -// case "RemovePeer": -// reactor.scheduler.send(bcRemovePeer{peerID: types.NodeID(step.peer)}) -// case "ReceiveS": -// reactor.scheduler.send(bcStatusResponse{ -// peerID: types.NodeID(step.peer), -// height: step.height, -// time: time.Now(), -// }) -// case "ReceiveB": -// reactor.scheduler.send(bcBlockResponse{ -// peerID: types.NodeID(step.peer), -// block: refStore.LoadBlock(step.height), -// size: 10, -// time: time.Now(), -// }) -// case "ReceiveNB": -// reactor.scheduler.send(bcNoBlockResponse{ -// peerID: types.NodeID(step.peer), -// height: step.height, -// time: time.Now(), -// }) -// case "BlockReq": -// reactor.scheduler.send(rTrySchedule{time: time.Now()}) -// case "Process": -// reactor.processor.send(rProcessBlock{}) -// } -// // give time for messages to propagate between routines -// time.Sleep(time.Millisecond) -// } - -// // time for processor to finish and reactor to switch to consensus -// time.Sleep(20 * time.Millisecond) -// assert.True(t, mockSwitch.hasSwitchedToConsensus()) -// reactor.Stop() -// }) -// } -// } - -func TestReactorHelperMode(t *testing.T) { - var ( - channelID = byte(0x40) - ) - - cfg, err := config.ResetTestRoot("blockchain_reactor_v2_test") - require.NoError(t, err) - defer os.RemoveAll(cfg.RootDir) - genDoc, privVals := factory.RandGenesisDoc(cfg, 1, 1) - - params := testReactorParams{ - logger: log.TestingLogger(), - genDoc: genDoc, - privVals: privVals, - startHeight: 20, - mockA: true, - } - - type testEvent struct { - peer string - event interface{} - } - - tests := []struct { - name string - params testReactorParams - msgs []testEvent - }{ - { - name: "status request", - params: params, - msgs: []testEvent{ - {"P1", bcproto.StatusRequest{}}, - {"P1", bcproto.BlockRequest{Height: 13}}, - {"P1", bcproto.BlockRequest{Height: 20}}, - {"P1", bcproto.BlockRequest{Height: 22}}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - reactor := newTestReactor(t, params) - mockSwitch := &mockSwitchIo{switchedToConsensus: false} - reactor.io = mockSwitch - err := reactor.Start() - require.NoError(t, err) - - for i := 0; i < len(tt.msgs); i++ { - step := tt.msgs[i] - switch ev := step.event.(type) { - case bcproto.StatusRequest: - old := mockSwitch.numStatusResponse - - msgProto := new(bcproto.Message) - require.NoError(t, msgProto.Wrap(&ev)) - - msgBz, err := proto.Marshal(msgProto) - require.NoError(t, err) - - reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz) - assert.Equal(t, old+1, mockSwitch.numStatusResponse) - case bcproto.BlockRequest: - if ev.Height > params.startHeight { - old := mockSwitch.numNoBlockResponse - - msgProto := new(bcproto.Message) - require.NoError(t, msgProto.Wrap(&ev)) - - msgBz, err := proto.Marshal(msgProto) - require.NoError(t, err) - - reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz) - assert.Equal(t, old+1, mockSwitch.numNoBlockResponse) - } else { - old := mockSwitch.numBlockResponse - - msgProto := new(bcproto.Message) - require.NoError(t, msgProto.Wrap(&ev)) - - msgBz, err := proto.Marshal(msgProto) - require.NoError(t, err) - - reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz) - assert.Equal(t, old+1, mockSwitch.numBlockResponse) - } - } - } - err = reactor.Stop() - require.NoError(t, err) - }) - } -} - -func TestReactorSetSwitchNil(t *testing.T) { - cfg, err := config.ResetTestRoot("blockchain_reactor_v2_test") - require.NoError(t, err) - defer os.RemoveAll(cfg.RootDir) - genDoc, privVals := factory.RandGenesisDoc(cfg, 1, 1) - - reactor := newTestReactor(t, testReactorParams{ - logger: log.TestingLogger(), - genDoc: genDoc, - privVals: privVals, - }) - reactor.SetSwitch(nil) - - assert.Nil(t, reactor.Switch) - assert.Nil(t, reactor.io) -} - -type testApp struct { - abci.BaseApplication -} - -func newReactorStore( - t *testing.T, - genDoc *types.GenesisDoc, - privVals []types.PrivValidator, - maxBlockHeight int64) (*tmstore.BlockStore, sm.State, *sm.BlockExecutor) { - t.Helper() - - require.Len(t, privVals, 1) - app := &testApp{} - cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() - if err != nil { - panic(fmt.Errorf("error start app: %w", err)) - } - - stateDB := dbm.NewMemDB() - blockStore := tmstore.NewBlockStore(dbm.NewMemDB()) - stateStore := sm.NewStore(stateDB) - state, err := sm.MakeGenesisState(genDoc) - require.NoError(t, err) - - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), proxyApp.Query(), - mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore, nil) - err = stateStore.Save(state) - require.NoError(t, err) - - // add blocks in - for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { - lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, types.StateID{}, state.Validators.QuorumHash, nil, nil) - if blockHeight > 1 { - lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) - lastBlock := blockStore.LoadBlock(blockHeight - 1) - vote, err := factory.MakeVote( - privVals[0], - state.Validators, - lastBlock.Header.ChainID, 0, - lastBlock.Header.Height, 0, 2, - lastBlockMeta.BlockID, - state.LastStateID, // todo: figure out using state.StateID() instead - ) - require.NoError(t, err) - lastCommit = types.NewCommit(vote.Height, vote.Round, - lastBlockMeta.BlockID, state.LastStateID, state.Validators.QuorumHash, vote.BlockSignature, vote.StateSignature) - } - - thisBlock, err := sf.MakeBlock(state, blockHeight, lastCommit, nil, 0) - require.NoError(t, err) - - thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) - blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} - - proTxHash, err := privVals[0].GetProTxHash(context.Background()) - require.NoError(t, err) - - state, err = blockExec.ApplyBlock(state, proTxHash, blockID, thisBlock) - require.NoError(t, err) - - blockStore.SaveBlock(thisBlock, thisParts, lastCommit) - } - return blockStore, state, blockExec -} diff --git a/internal/blocksync/v2/routine.go b/internal/blocksync/v2/routine.go deleted file mode 100644 index e4ca52add7..0000000000 --- a/internal/blocksync/v2/routine.go +++ /dev/null @@ -1,166 +0,0 @@ -package v2 - -import ( - "fmt" - "strings" - "sync/atomic" - - "github.com/Workiva/go-datastructures/queue" - - "github.com/tendermint/tendermint/libs/log" -) - -type handleFunc = func(event Event) (Event, error) - -const historySize = 25 - -// Routine is a structure that models a finite state machine as serialized -// stream of events processed by a handle function. This Routine structure -// handles the concurrency and messaging guarantees. Events are sent via -// `send` are handled by the `handle` function to produce an iterator -// `next()`. Calling `stop()` on a routine will conclude processing of all -// sent events and produce `final()` event representing the terminal state. -type Routine struct { - name string - handle handleFunc - queue *queue.PriorityQueue - history []Event - out chan Event - fin chan error - rdy chan struct{} - running *uint32 - logger log.Logger - metrics *Metrics -} - -func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine { - return &Routine{ - name: name, - handle: handleFunc, - queue: queue.NewPriorityQueue(bufferSize, true), - history: make([]Event, 0, historySize), - out: make(chan Event, bufferSize), - rdy: make(chan struct{}, 1), - fin: make(chan error, 1), - running: new(uint32), - logger: log.NewNopLogger(), - metrics: NopMetrics(), - } -} - -func (rt *Routine) setLogger(logger log.Logger) { - rt.logger = logger -} - -// nolint:unused -func (rt *Routine) setMetrics(metrics *Metrics) { - rt.metrics = metrics -} - -func (rt *Routine) start() { - rt.logger.Info(fmt.Sprintf("%s: run", rt.name)) - running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1)) - if !running { - panic(fmt.Sprintf("%s is already running", rt.name)) - } - close(rt.rdy) - defer func() { - if r := recover(); r != nil { - var ( - b strings.Builder - j int - ) - for i := len(rt.history) - 1; i >= 0; i-- { - fmt.Fprintf(&b, "%d: %+v\n", j, rt.history[i]) - j++ - } - panic(fmt.Sprintf("%v\nlast events:\n%v", r, b.String())) - } - stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0)) - if !stopped { - panic(fmt.Sprintf("%s is failed to stop", rt.name)) - } - }() - - for { - events, err := rt.queue.Get(1) - if err == queue.ErrDisposed { - rt.terminate(nil) - return - } else if err != nil { - rt.terminate(err) - return - } - oEvent, err := rt.handle(events[0].(Event)) - rt.metrics.EventsHandled.With("routine", rt.name).Add(1) - if err != nil { - rt.terminate(err) - return - } - rt.metrics.EventsOut.With("routine", rt.name).Add(1) - rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v", rt.name, oEvent, oEvent)) - - // Skip rTrySchedule and rProcessBlock events as they clutter the history - // due to their frequency. - switch events[0].(type) { - case rTrySchedule: - case rProcessBlock: - default: - rt.history = append(rt.history, events[0].(Event)) - if len(rt.history) > historySize { - rt.history = rt.history[1:] - } - } - - rt.out <- oEvent - } -} - -// XXX: look into returning OpError in the net package -func (rt *Routine) send(event Event) bool { - rt.logger.Debug(fmt.Sprintf("%s: received %T %+v", rt.name, event, event)) - if !rt.isRunning() { - return false - } - err := rt.queue.Put(event) - if err != nil { - rt.metrics.EventsShed.With("routine", rt.name).Add(1) - rt.logger.Error(fmt.Sprintf("%s: send failed, queue was full/stopped", rt.name)) - return false - } - - rt.metrics.EventsSent.With("routine", rt.name).Add(1) - return true -} - -func (rt *Routine) isRunning() bool { - return atomic.LoadUint32(rt.running) == 1 -} - -func (rt *Routine) next() chan Event { - return rt.out -} - -func (rt *Routine) ready() chan struct{} { - return rt.rdy -} - -func (rt *Routine) stop() { - if !rt.isRunning() { // XXX: this should check rt.queue.Disposed() - return - } - - rt.logger.Info(fmt.Sprintf("%s: stop", rt.name)) - rt.queue.Dispose() // this should block until all queue items are free? -} - -func (rt *Routine) final() chan error { - return rt.fin -} - -// XXX: Maybe get rid of this -func (rt *Routine) terminate(reason error) { - // We don't close the rt.out channel here, to avoid spinning on the closed channel - // in the event loop. - rt.fin <- reason -} diff --git a/internal/blocksync/v2/routine_test.go b/internal/blocksync/v2/routine_test.go deleted file mode 100644 index 8f92bee3ee..0000000000 --- a/internal/blocksync/v2/routine_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package v2 - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -type eventA struct { - priorityNormal -} - -var errDone = fmt.Errorf("done") - -func simpleHandler(event Event) (Event, error) { - if _, ok := event.(eventA); ok { - return noOp, errDone - } - return noOp, nil -} - -func TestRoutineFinal(t *testing.T) { - var ( - bufferSize = 10 - routine = newRoutine("simpleRoutine", simpleHandler, bufferSize) - ) - - assert.False(t, routine.isRunning(), - "expected an initialized routine to not be running") - go routine.start() - <-routine.ready() - assert.True(t, routine.isRunning(), - "expected an started routine") - - assert.True(t, routine.send(eventA{}), - "expected sending to a ready routine to succeed") - - assert.Equal(t, errDone, <-routine.final(), - "expected the final event to be done") - - assert.False(t, routine.isRunning(), - "expected an completed routine to no longer be running") -} - -func TestRoutineStop(t *testing.T) { - var ( - bufferSize = 10 - routine = newRoutine("simpleRoutine", simpleHandler, bufferSize) - ) - - assert.False(t, routine.send(eventA{}), - "expected sending to an unstarted routine to fail") - - go routine.start() - <-routine.ready() - - assert.True(t, routine.send(eventA{}), - "expected sending to a running routine to succeed") - - routine.stop() - - assert.False(t, routine.send(eventA{}), - "expected sending to a stopped routine to fail") -} - -type finalCount struct { - count int -} - -func (f finalCount) Error() string { - return "end" -} - -func genStatefulHandler(maxCount int) handleFunc { - counter := 0 - return func(event Event) (Event, error) { - if _, ok := event.(eventA); ok { - counter++ - if counter >= maxCount { - return noOp, finalCount{counter} - } - - return eventA{}, nil - } - return noOp, nil - } -} - -func feedback(r *Routine) { - for event := range r.next() { - r.send(event) - } -} - -func TestStatefulRoutine(t *testing.T) { - var ( - count = 10 - handler = genStatefulHandler(count) - bufferSize = 20 - routine = newRoutine("statefulRoutine", handler, bufferSize) - ) - - go routine.start() - go feedback(routine) - <-routine.ready() - - assert.True(t, routine.send(eventA{}), - "expected sending to a started routine to succeed") - - final := <-routine.final() - if fnl, ok := final.(finalCount); ok { - assert.Equal(t, count, fnl.count, - "expected the routine to count to 10") - } else { - t.Fail() - } -} - -type lowPriorityEvent struct { - priorityLow -} - -type highPriorityEvent struct { - priorityHigh -} - -func handleWithPriority(event Event) (Event, error) { - switch event.(type) { - case lowPriorityEvent: - return noOp, nil - case highPriorityEvent: - return noOp, errDone - } - return noOp, nil -} - -func TestPriority(t *testing.T) { - var ( - bufferSize = 20 - routine = newRoutine("priorityRoutine", handleWithPriority, bufferSize) - ) - - go routine.start() - <-routine.ready() - go func() { - for { - routine.send(lowPriorityEvent{}) - time.Sleep(1 * time.Millisecond) - } - }() - time.Sleep(10 * time.Millisecond) - - assert.True(t, routine.isRunning(), - "expected an started routine") - assert.True(t, routine.send(highPriorityEvent{}), - "expected send to succeed even when saturated") - - assert.Equal(t, errDone, <-routine.final()) - assert.False(t, routine.isRunning(), - "expected an started routine") -} diff --git a/internal/blocksync/v2/scheduler.go b/internal/blocksync/v2/scheduler.go deleted file mode 100644 index b731d96a4d..0000000000 --- a/internal/blocksync/v2/scheduler.go +++ /dev/null @@ -1,711 +0,0 @@ -package v2 - -import ( - "bytes" - "errors" - "fmt" - "math" - "sort" - "time" - - "github.com/tendermint/tendermint/types" -) - -// Events generated by the scheduler: -// all blocks have been processed -type scFinishedEv struct { - priorityNormal - reason string -} - -func (e scFinishedEv) String() string { - return fmt.Sprintf("scFinishedEv{%v}", e.reason) -} - -// send a blockRequest message -type scBlockRequest struct { - priorityNormal - peerID types.NodeID - height int64 -} - -func (e scBlockRequest) String() string { - return fmt.Sprintf("scBlockRequest{%d from %v}", e.height, e.peerID) -} - -// a block has been received and validated by the scheduler -type scBlockReceived struct { - priorityNormal - peerID types.NodeID - block *types.Block -} - -func (e scBlockReceived) String() string { - return fmt.Sprintf("scBlockReceived{%d#%X from %v}", e.block.Height, e.block.Hash(), e.peerID) -} - -// scheduler detected a peer error -type scPeerError struct { - priorityHigh - peerID types.NodeID - reason error -} - -func (e scPeerError) String() string { - return fmt.Sprintf("scPeerError{%v errored with %v}", e.peerID, e.reason) -} - -// scheduler removed a set of peers (timed out or slow peer) -type scPeersPruned struct { - priorityHigh - peers []types.NodeID -} - -func (e scPeersPruned) String() string { - return fmt.Sprintf("scPeersPruned{%v}", e.peers) -} - -// XXX: make this fatal? -// scheduler encountered a fatal error -type scSchedulerFail struct { - priorityHigh - reason error -} - -func (e scSchedulerFail) String() string { - return fmt.Sprintf("scSchedulerFail{%v}", e.reason) -} - -type blockState int - -const ( - blockStateUnknown blockState = iota + 1 // no known peer has this block - blockStateNew // indicates that a peer has reported having this block - blockStatePending // indicates that this block has been requested from a peer - blockStateReceived // indicates that this block has been received by a peer - blockStateProcessed // indicates that this block has been applied -) - -func (e blockState) String() string { - switch e { - case blockStateUnknown: - return "Unknown" - case blockStateNew: - return "New" - case blockStatePending: - return "Pending" - case blockStateReceived: - return "Received" - case blockStateProcessed: - return "Processed" - default: - return fmt.Sprintf("invalid blockState: %d", e) - } -} - -type peerState int - -const ( - peerStateNew = iota + 1 - peerStateReady - peerStateRemoved -) - -func (e peerState) String() string { - switch e { - case peerStateNew: - return "New" - case peerStateReady: - return "Ready" - case peerStateRemoved: - return "Removed" - default: - panic(fmt.Sprintf("unknown peerState: %d", e)) - } -} - -type scPeer struct { - peerID types.NodeID - - // initialized as New when peer is added, updated to Ready when statusUpdate is received, - // updated to Removed when peer is removed - state peerState - - base int64 // updated when statusResponse is received - height int64 // updated when statusResponse is received - lastTouched time.Time - lastRate int64 // last receive rate in bytes -} - -func (p scPeer) String() string { - return fmt.Sprintf("{state %v, base %d, height %d, lastTouched %v, lastRate %d, id %v}", - p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID) -} - -func newScPeer(peerID types.NodeID) *scPeer { - return &scPeer{ - peerID: peerID, - state: peerStateNew, - base: -1, - height: -1, - lastTouched: time.Time{}, - } -} - -// The scheduler keep track of the state of each block and each peer. The -// scheduler will attempt to schedule new block requests with `trySchedule` -// events and remove slow peers with `tryPrune` events. -type scheduler struct { - initHeight int64 - - // next block that needs to be processed. All blocks with smaller height are - // in Processed state. - height int64 - - // lastAdvance tracks the last time a block execution happened. - // syncTimeout is the maximum time the scheduler waits to advance in the block sync process before finishing. - // This covers the cases where there are no peers or all peers have a lower height. - lastAdvance time.Time - syncTimeout time.Duration - - // a map of peerID to scheduler specific peer struct `scPeer` used to keep - // track of peer specific state - peers map[types.NodeID]*scPeer - peerTimeout time.Duration // maximum response time from a peer otherwise prune - minRecvRate int64 // minimum receive rate from peer otherwise prune - - // the maximum number of blocks that should be New, Received or Pending at any point - // in time. This is used to enforce a limit on the blockStates map. - targetPending int - // a list of blocks to be scheduled (New), Pending or Received. Its length should be - // smaller than targetPending. - blockStates map[int64]blockState - - // a map of heights to the peer we are waiting a response from - pendingBlocks map[int64]types.NodeID - - // the time at which a block was put in blockStatePending - pendingTime map[int64]time.Time - - // a map of heights to the peers that put the block in blockStateReceived - receivedBlocks map[int64]types.NodeID -} - -func (sc scheduler) String() string { - return fmt.Sprintf("ih: %d, bst: %v, peers: %v, pblks: %v, ptm %v, rblks: %v", - sc.initHeight, sc.blockStates, sc.peers, sc.pendingBlocks, sc.pendingTime, sc.receivedBlocks) -} - -func newScheduler(initHeight int64, startTime time.Time) *scheduler { - sc := scheduler{ - initHeight: initHeight, - lastAdvance: startTime, - syncTimeout: 60 * time.Second, - height: initHeight, - blockStates: make(map[int64]blockState), - peers: make(map[types.NodeID]*scPeer), - pendingBlocks: make(map[int64]types.NodeID), - pendingTime: make(map[int64]time.Time), - receivedBlocks: make(map[int64]types.NodeID), - targetPending: 10, // TODO - pass as param - peerTimeout: 15 * time.Second, // TODO - pass as param - minRecvRate: 0, // int64(7680), TODO - pass as param - } - - return &sc -} - -func (sc *scheduler) ensurePeer(peerID types.NodeID) *scPeer { - if _, ok := sc.peers[peerID]; !ok { - sc.peers[peerID] = newScPeer(peerID) - } - return sc.peers[peerID] -} - -func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error { - peer, ok := sc.peers[peerID] - if !ok { - return fmt.Errorf("couldn't find peer %s", peerID) - } - - if peer.state != peerStateReady { - return fmt.Errorf("tried to touch peer in state %s, must be Ready", peer.state) - } - - peer.lastTouched = time - - return nil -} - -func (sc *scheduler) removePeer(peerID types.NodeID) { - peer, ok := sc.peers[peerID] - if !ok { - return - } - if peer.state == peerStateRemoved { - return - } - - for height, pendingPeerID := range sc.pendingBlocks { - if pendingPeerID == peerID { - sc.setStateAtHeight(height, blockStateNew) - delete(sc.pendingTime, height) - delete(sc.pendingBlocks, height) - } - } - - for height, rcvPeerID := range sc.receivedBlocks { - if rcvPeerID == peerID { - sc.setStateAtHeight(height, blockStateNew) - delete(sc.receivedBlocks, height) - } - } - - // remove the blocks from blockStates if the peer removal causes the max peer height to be lower. - peer.state = peerStateRemoved - maxPeerHeight := int64(0) - for _, otherPeer := range sc.peers { - if otherPeer.state != peerStateReady { - continue - } - if otherPeer.peerID != peer.peerID && otherPeer.height > maxPeerHeight { - maxPeerHeight = otherPeer.height - } - } - for h := range sc.blockStates { - if h > maxPeerHeight { - delete(sc.blockStates, h) - } - } -} - -// check if the blockPool is running low and add new blocks in New state to be requested. -// This function is called when there is an increase in the maximum peer height or when -// blocks are processed. -func (sc *scheduler) addNewBlocks() { - if len(sc.blockStates) >= sc.targetPending { - return - } - - for i := sc.height; i < int64(sc.targetPending)+sc.height; i++ { - if i > sc.maxHeight() { - break - } - if sc.getStateAtHeight(i) == blockStateUnknown { - sc.setStateAtHeight(i, blockStateNew) - } - } -} - -func (sc *scheduler) setPeerRange(peerID types.NodeID, base int64, height int64) error { - peer := sc.ensurePeer(peerID) - - if peer.state == peerStateRemoved { - return nil // noop - } - - if height < peer.height { - sc.removePeer(peerID) - return fmt.Errorf("cannot move peer height lower. from %d to %d", peer.height, height) - } - - if base > height { - sc.removePeer(peerID) - return fmt.Errorf("cannot set peer base higher than its height") - } - - peer.base = base - peer.height = height - peer.state = peerStateReady - - sc.addNewBlocks() - return nil -} - -func (sc *scheduler) getStateAtHeight(height int64) blockState { - if height < sc.height { - return blockStateProcessed - } else if state, ok := sc.blockStates[height]; ok { - return state - } else { - return blockStateUnknown - } -} - -func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID { - peers := make([]types.NodeID, 0) - for _, peer := range sc.peers { - if peer.state != peerStateReady { - continue - } - if peer.base <= height && peer.height >= height { - peers = append(peers, peer.peerID) - } - } - return peers -} - -func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []types.NodeID { - prunable := make([]types.NodeID, 0) - for peerID, peer := range sc.peers { - if peer.state != peerStateReady { - continue - } - if now.Sub(peer.lastTouched) > peerTimout || peer.lastRate < minRecvRate { - prunable = append(prunable, peerID) - } - } - // Tests for handleTryPrunePeer() may fail without sort due to range non-determinism - sort.Sort(PeerByID(prunable)) - return prunable -} - -func (sc *scheduler) setStateAtHeight(height int64, state blockState) { - sc.blockStates[height] = state -} - -// CONTRACT: peer exists and in Ready state. -func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64, now time.Time) error { - peer := sc.peers[peerID] - - if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID { - return fmt.Errorf("received block %d from peer %s without being requested", height, peerID) - } - - pendingTime, ok := sc.pendingTime[height] - if !ok || now.Sub(pendingTime) <= 0 { - return fmt.Errorf("clock error: block %d received at %s but requested at %s", - height, pendingTime, now) - } - - peer.lastRate = size / now.Sub(pendingTime).Nanoseconds() - - sc.setStateAtHeight(height, blockStateReceived) - delete(sc.pendingBlocks, height) - delete(sc.pendingTime, height) - - sc.receivedBlocks[height] = peerID - - return nil -} - -func (sc *scheduler) markPending(peerID types.NodeID, height int64, time time.Time) error { - state := sc.getStateAtHeight(height) - if state != blockStateNew { - return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state) - } - - peer, ok := sc.peers[peerID] - if !ok { - return fmt.Errorf("cannot find peer %s", peerID) - } - - if peer.state != peerStateReady { - return fmt.Errorf("cannot schedule %d from %s in %s", height, peerID, peer.state) - } - - if height > peer.height { - return fmt.Errorf("cannot request height %d from peer %s that is at height %d", - height, peerID, peer.height) - } - - if height < peer.base { - return fmt.Errorf("cannot request height %d for peer %s with base %d", - height, peerID, peer.base) - } - - sc.setStateAtHeight(height, blockStatePending) - sc.pendingBlocks[height] = peerID - sc.pendingTime[height] = time - - return nil -} - -func (sc *scheduler) markProcessed(height int64) error { - // It is possible that a peer error or timeout is handled after the processor - // has processed the block but before the scheduler received this event, so - // when pcBlockProcessed event is received, the block had been requested - // again => don't check the block state. - sc.lastAdvance = time.Now() - sc.height = height + 1 - delete(sc.pendingBlocks, height) - delete(sc.pendingTime, height) - delete(sc.receivedBlocks, height) - delete(sc.blockStates, height) - sc.addNewBlocks() - return nil -} - -func (sc *scheduler) allBlocksProcessed() bool { - if len(sc.peers) == 0 { - return false - } - return sc.height >= sc.maxHeight() -} - -// returns max peer height or the last processed block, i.e. sc.height -func (sc *scheduler) maxHeight() int64 { - max := sc.height - 1 - for _, peer := range sc.peers { - if peer.state != peerStateReady { - continue - } - if max < peer.height { - max = peer.height - } - } - return max -} - -// lowest block in sc.blockStates with state == blockStateNew or -1 if no new blocks -func (sc *scheduler) nextHeightToSchedule() int64 { - var min int64 = math.MaxInt64 - for height, state := range sc.blockStates { - if state == blockStateNew && height < min { - min = height - } - } - if min == math.MaxInt64 { - min = -1 - } - return min -} - -func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 { - var heights []int64 - for height, pendingPeerID := range sc.pendingBlocks { - if pendingPeerID == peerID { - heights = append(heights, height) - } - } - return heights -} - -func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) { - peers := sc.getPeersWithHeight(height) - if len(peers) == 0 { - return "", fmt.Errorf("cannot find peer for height %d", height) - } - - // create a map from number of pending requests to a list - // of peers having that number of pending requests. - pendingFrom := make(map[int][]types.NodeID) - for _, peerID := range peers { - numPending := len(sc.pendingFrom(peerID)) - pendingFrom[numPending] = append(pendingFrom[numPending], peerID) - } - - // find the set of peers with minimum number of pending requests. - var minPending int64 = math.MaxInt64 - for mp := range pendingFrom { - if int64(mp) < minPending { - minPending = int64(mp) - } - } - - sort.Sort(PeerByID(pendingFrom[int(minPending)])) - return pendingFrom[int(minPending)][0], nil -} - -// PeerByID is a list of peers sorted by peerID. -type PeerByID []types.NodeID - -func (peers PeerByID) Len() int { - return len(peers) -} -func (peers PeerByID) Less(i, j int) bool { - return bytes.Compare([]byte(peers[i]), []byte(peers[j])) == -1 -} - -func (peers PeerByID) Swap(i, j int) { - peers[i], peers[j] = peers[j], peers[i] -} - -// Handlers - -// This handler gets the block, performs some validation and then passes it on to the processor. -func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) { - err := sc.touchPeer(event.peerID, event.time) - if err != nil { - // peer does not exist OR not ready - return noOp, nil - } - - err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time) - if err != nil { - sc.removePeer(event.peerID) - return scPeerError{peerID: event.peerID, reason: err}, nil - } - - return scBlockReceived{peerID: event.peerID, block: event.block}, nil -} - -func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, error) { - // No such peer or peer was removed. - peer, ok := sc.peers[event.peerID] - if !ok || peer.state == peerStateRemoved { - return noOp, nil - } - - // The peer may have been just removed due to errors, low speed or timeouts. - sc.removePeer(event.peerID) - - return scPeerError{peerID: event.peerID, - reason: fmt.Errorf("peer %v with base %d height %d claims no block for %d", - event.peerID, peer.base, peer.height, event.height)}, nil -} - -func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) { - if event.height != sc.height { - panic(fmt.Sprintf("processed height %d, but expected height %d", event.height, sc.height)) - } - - err := sc.markProcessed(event.height) - if err != nil { - return scSchedulerFail{reason: err}, nil - } - - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "processed all blocks"}, nil - } - - return noOp, nil -} - -// Handles an error from the processor. The processor had already cleaned the blocks from -// the peers included in this event. Just attempt to remove the peers. -func (sc *scheduler) handleBlockProcessError(event pcBlockVerificationFailure) (Event, error) { - // The peers may have been just removed due to errors, low speed or timeouts. - sc.removePeer(event.firstPeerID) - if event.firstPeerID != event.secondPeerID { - sc.removePeer(event.secondPeerID) - } - - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "error on last block"}, nil - } - - return noOp, nil -} - -func (sc *scheduler) handleAddNewPeer(event bcAddNewPeer) (Event, error) { - sc.ensurePeer(event.peerID) - return noOp, nil -} - -func (sc *scheduler) handleRemovePeer(event bcRemovePeer) (Event, error) { - sc.removePeer(event.peerID) - - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "removed peer"}, nil - } - - // Return scPeerError so the peer (and all associated blocks) is removed from - // the processor. - return scPeerError{peerID: event.peerID, reason: errors.New("peer was stopped")}, nil -} - -func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) { - // Check behavior of peer responsible to deliver block at sc.height. - timeHeightAsked, ok := sc.pendingTime[sc.height] - if ok && time.Since(timeHeightAsked) > sc.peerTimeout { - // A request was sent to a peer for block at sc.height but a response was not received - // from that peer within sc.peerTimeout. Remove the peer. This is to ensure that a peer - // will be timed out even if it sends blocks at higher heights but prevents progress by - // not sending the block at current height. - sc.removePeer(sc.pendingBlocks[sc.height]) - } - - prunablePeers := sc.prunablePeers(sc.peerTimeout, sc.minRecvRate, event.time) - if len(prunablePeers) == 0 { - return noOp, nil - } - for _, peerID := range prunablePeers { - sc.removePeer(peerID) - } - - // If all blocks are processed we should finish. - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "after try prune"}, nil - } - - return scPeersPruned{peers: prunablePeers}, nil -} - -func (sc *scheduler) handleResetState(event bcResetState) (Event, error) { - initHeight := event.state.LastBlockHeight + 1 - if initHeight == 1 { - initHeight = event.state.InitialHeight - } - sc.initHeight = initHeight - sc.height = initHeight - sc.lastAdvance = time.Now() - sc.addNewBlocks() - return noOp, nil -} - -func (sc *scheduler) handleTrySchedule(event rTrySchedule) (Event, error) { - if time.Since(sc.lastAdvance) > sc.syncTimeout { - return scFinishedEv{reason: "timeout, no advance"}, nil - } - - nextHeight := sc.nextHeightToSchedule() - if nextHeight == -1 { - return noOp, nil - } - - bestPeerID, err := sc.selectPeer(nextHeight) - if err != nil { - return scSchedulerFail{reason: err}, nil - } - if err := sc.markPending(bestPeerID, nextHeight, event.time); err != nil { - return scSchedulerFail{reason: err}, nil // XXX: peerError might be more appropriate - } - return scBlockRequest{peerID: bestPeerID, height: nextHeight}, nil - -} - -func (sc *scheduler) handleStatusResponse(event bcStatusResponse) (Event, error) { - err := sc.setPeerRange(event.peerID, event.base, event.height) - if err != nil { - return scPeerError{peerID: event.peerID, reason: err}, nil - } - return noOp, nil -} - -func (sc *scheduler) handle(event Event) (Event, error) { - switch event := event.(type) { - case bcResetState: - nextEvent, err := sc.handleResetState(event) - return nextEvent, err - case bcStatusResponse: - nextEvent, err := sc.handleStatusResponse(event) - return nextEvent, err - case bcBlockResponse: - nextEvent, err := sc.handleBlockResponse(event) - return nextEvent, err - case bcNoBlockResponse: - nextEvent, err := sc.handleNoBlockResponse(event) - return nextEvent, err - case rTrySchedule: - nextEvent, err := sc.handleTrySchedule(event) - return nextEvent, err - case bcAddNewPeer: - nextEvent, err := sc.handleAddNewPeer(event) - return nextEvent, err - case bcRemovePeer: - nextEvent, err := sc.handleRemovePeer(event) - return nextEvent, err - case rTryPrunePeer: - nextEvent, err := sc.handleTryPrunePeer(event) - return nextEvent, err - case pcBlockProcessed: - nextEvent, err := sc.handleBlockProcessed(event) - return nextEvent, err - case pcBlockVerificationFailure: - nextEvent, err := sc.handleBlockProcessError(event) - return nextEvent, err - default: - return scSchedulerFail{reason: fmt.Errorf("unknown event %v", event)}, nil - } -} diff --git a/internal/blocksync/v2/scheduler_test.go b/internal/blocksync/v2/scheduler_test.go deleted file mode 100644 index d2c4aab03c..0000000000 --- a/internal/blocksync/v2/scheduler_test.go +++ /dev/null @@ -1,2253 +0,0 @@ -package v2 - -import ( - "fmt" - "math" - "sort" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/internal/state" - "github.com/tendermint/tendermint/types" -) - -type scTestParams struct { - peers map[string]*scPeer - initHeight int64 - height int64 - allB []int64 - pending map[int64]types.NodeID - pendingTime map[int64]time.Time - received map[int64]types.NodeID - peerTimeout time.Duration - minRecvRate int64 - targetPending int - startTime time.Time - syncTimeout time.Duration -} - -func verifyScheduler(sc *scheduler) { - missing := 0 - if sc.maxHeight() >= sc.height { - missing = int(math.Min(float64(sc.targetPending), float64(sc.maxHeight()-sc.height+1))) - } - if len(sc.blockStates) != missing { - panic(fmt.Sprintf("scheduler block length %d different than target %d", len(sc.blockStates), missing)) - } -} - -func newTestScheduler(params scTestParams) *scheduler { - peers := make(map[types.NodeID]*scPeer) - var maxHeight int64 - - initHeight := params.initHeight - if initHeight == 0 { - initHeight = 1 - } - sc := newScheduler(initHeight, params.startTime) - if params.height != 0 { - sc.height = params.height - } - - for id, peer := range params.peers { - peer.peerID = types.NodeID(id) - peers[types.NodeID(id)] = peer - if maxHeight < peer.height { - maxHeight = peer.height - } - } - for _, h := range params.allB { - sc.blockStates[h] = blockStateNew - } - for h, pid := range params.pending { - sc.blockStates[h] = blockStatePending - sc.pendingBlocks[h] = pid - } - for h, tm := range params.pendingTime { - sc.pendingTime[h] = tm - } - for h, pid := range params.received { - sc.blockStates[h] = blockStateReceived - sc.receivedBlocks[h] = pid - } - - sc.peers = peers - sc.peerTimeout = params.peerTimeout - if params.syncTimeout == 0 { - sc.syncTimeout = 10 * time.Second - } else { - sc.syncTimeout = params.syncTimeout - } - - if params.targetPending == 0 { - sc.targetPending = 10 - } else { - sc.targetPending = params.targetPending - } - - sc.minRecvRate = params.minRecvRate - - verifyScheduler(sc) - - return sc -} - -func TestScInit(t *testing.T) { - var ( - initHeight int64 = 5 - sc = newScheduler(initHeight, time.Now()) - ) - assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(initHeight-1)) - assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight)) - assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight+1)) -} - -func TestScMaxHeights(t *testing.T) { - - tests := []struct { - name string - sc scheduler - wantMax int64 - }{ - { - name: "no peers", - sc: scheduler{height: 11}, - wantMax: 10, - }, - { - name: "one ready peer", - sc: scheduler{ - height: 3, - peers: map[types.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}}, - }, - wantMax: 6, - }, - { - name: "ready and removed peers", - sc: scheduler{ - height: 1, - peers: map[types.NodeID]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 10, state: peerStateRemoved}}, - }, - wantMax: 4, - }, - { - name: "removed peers", - sc: scheduler{ - height: 1, - peers: map[types.NodeID]*scPeer{ - "P1": {height: 4, state: peerStateRemoved}, - "P2": {height: 10, state: peerStateRemoved}}, - }, - wantMax: 0, - }, - { - name: "new peers", - sc: scheduler{ - height: 1, - peers: map[types.NodeID]*scPeer{ - "P1": {base: -1, height: -1, state: peerStateNew}, - "P2": {base: -1, height: -1, state: peerStateNew}}, - }, - wantMax: 0, - }, - { - name: "mixed peers", - sc: scheduler{ - height: 1, - peers: map[types.NodeID]*scPeer{ - "P1": {height: -1, state: peerStateNew}, - "P2": {height: 10, state: peerStateReady}, - "P3": {height: 20, state: peerStateRemoved}, - "P4": {height: 22, state: peerStateReady}, - }, - }, - wantMax: 22, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - // maxHeight() should not mutate the scheduler - wantSc := tt.sc - - resMax := tt.sc.maxHeight() - assert.Equal(t, tt.wantMax, resMax) - assert.Equal(t, wantSc, tt.sc) - }) - } -} - -func TestScEnsurePeer(t *testing.T) { - - type args struct { - peerID types.NodeID - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - }{ - { - name: "add first peer", - fields: scTestParams{}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {base: -1, height: -1, state: peerStateNew}}}, - }, - { - name: "add second peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {base: -1, height: -1, state: peerStateNew}}}, - args: args{peerID: "P2"}, - wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {base: -1, height: -1, state: peerStateNew}, - "P2": {base: -1, height: -1, state: peerStateNew}}}, - }, - { - name: "add duplicate peer is fine", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - }, - { - name: "add duplicate peer with existing peer in Ready state is noop", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 3}}, - allB: []int64{1, 2, 3}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 3}}, - allB: []int64{1, 2, 3}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - sc.ensurePeer(tt.args.peerID) - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScTouchPeer(t *testing.T) { - now := time.Now() - - type args struct { - peerID types.NodeID - time time.Time - } - - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "attempt to touch non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 5}}, - allB: []int64{1, 2, 3, 4, 5}, - }, - args: args{peerID: "P2", time: now}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 5}}, - allB: []int64{1, 2, 3, 4, 5}, - }, - wantErr: true, - }, - { - name: "attempt to touch peer in state New", - fields: scTestParams{peers: map[string]*scPeer{"P1": {}}}, - args: args{peerID: "P1", time: now}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {}}}, - wantErr: true, - }, - { - name: "attempt to touch peer in state Removed", - fields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateRemoved}, "P2": {state: peerStateReady}}}, - args: args{peerID: "P1", time: now}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateRemoved}, "P2": {state: peerStateReady}}}, - wantErr: true, - }, - { - name: "touch peer in state Ready", - fields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateReady, lastTouched: now}}}, - args: args{peerID: "P1", time: now.Add(3 * time.Second)}, - wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {state: peerStateReady, lastTouched: now.Add(3 * time.Second)}}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - if err := sc.touchPeer(tt.args.peerID, tt.args.time); (err != nil) != tt.wantErr { - t.Errorf("touchPeer() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScPrunablePeers(t *testing.T) { - now := time.Now() - - type args struct { - threshold time.Duration - time time.Time - minSpeed int64 - } - - tests := []struct { - name string - fields scTestParams - args args - wantResult []types.NodeID - }{ - { - name: "no peers", - fields: scTestParams{peers: map[string]*scPeer{}}, - args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100}, - wantResult: []types.NodeID{}, - }, - { - name: "mixed peers", - fields: scTestParams{peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100}, - // V - ready, inactive, equal - "P4": {state: peerStateReady, lastTouched: now, lastRate: 100}, - // V - ready, inactive, slow - "P5": {state: peerStateReady, lastTouched: now, lastRate: 99}, - // V - ready, active, slow - "P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90}, - }}, - args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100}, - wantResult: []types.NodeID{"P4", "P5", "P6"}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // peersSlowerThan should not mutate the scheduler - wantSc := sc - res := sc.prunablePeers(tt.args.threshold, tt.args.minSpeed, tt.args.time) - assert.Equal(t, tt.wantResult, res) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScRemovePeer(t *testing.T) { - - type args struct { - peerID types.NodeID - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "remove non existing peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - args: args{peerID: "P2"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - }, - { - name: "remove single New peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateRemoved}}}, - }, - { - name: "remove one of two New peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}, "P2": {height: -1}}}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateRemoved}, "P2": {height: -1}}}, - }, - { - name: "remove one Ready peer, all peers removed", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 5, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5}, - }, - args: args{peerID: "P2"}, - wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 5, state: peerStateRemoved}}, - }, - }, - { - name: "attempt to remove already removed peer", - fields: scTestParams{ - height: 8, - peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 11, state: peerStateReady}}, - allB: []int64{8, 9, 10, 11}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - height: 8, - peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 11, state: peerStateReady}}, - allB: []int64{8, 9, 10, 11}}, - }, - { - name: "remove Ready peer with blocks requested", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}}, - allB: []int64{}, - pending: map[int64]types.NodeID{}, - }, - }, - { - name: "remove Ready peer with blocks received", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - received: map[int64]types.NodeID{1: "P1"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}}, - allB: []int64{}, - received: map[int64]types.NodeID{}, - }, - }, - { - name: "remove Ready peer with blocks received and requested (not yet received)", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 3: "P1"}, - received: map[int64]types.NodeID{2: "P1", 4: "P1"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}, - allB: []int64{}, - pending: map[int64]types.NodeID{}, - received: map[int64]types.NodeID{}, - }, - }, - { - name: "remove Ready peer from multiple peers set, with blocks received and requested (not yet received)", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 6, state: peerStateReady}, - "P2": {height: 6, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4, 5, 6}, - pending: map[int64]types.NodeID{1: "P1", 3: "P2", 6: "P1"}, - received: map[int64]types.NodeID{2: "P1", 4: "P2", 5: "P2"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 6, state: peerStateRemoved}, - "P2": {height: 6, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4, 5, 6}, - pending: map[int64]types.NodeID{3: "P2"}, - received: map[int64]types.NodeID{4: "P2", 5: "P2"}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - sc.removePeer(tt.args.peerID) - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScSetPeerRange(t *testing.T) { - - type args struct { - peerID types.NodeID - base int64 - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "change height of non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P2", height: 4}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 2, state: peerStateReady}, - "P2": {height: 4, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "increase height of removed peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{peerID: "P1", height: 4}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - }, - { - name: "decrease height of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{peerID: "P1", height: 2}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}, - allB: []int64{}}, - wantErr: true, - }, - { - name: "increase height of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P1", height: 4}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "noop height change of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{peerID: "P1", height: 4}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "add peer with huge height 10**10 ", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: -1, state: peerStateNew}}, - targetPending: 4, - }, - args: args{peerID: "P2", height: 10000000000}, - wantFields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P2": {height: 10000000000, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "add peer with base > height should error", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{peerID: "P1", base: 6, height: 5}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - wantErr: true, - }, - { - name: "add peer with base == height is fine", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateNew}}, - targetPending: 4, - }, - args: args{peerID: "P1", base: 6, height: 6}, - wantFields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {base: 6, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - err := sc.setPeerRange(tt.args.peerID, tt.args.base, tt.args.height) - if (err != nil) != tt.wantErr { - t.Errorf("setPeerHeight() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScGetPeersWithHeight(t *testing.T) { - - type args struct { - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantResult []types.NodeID - }{ - { - name: "no peers", - fields: scTestParams{peers: map[string]*scPeer{}}, - args: args{height: 10}, - wantResult: []types.NodeID{}, - }, - { - name: "only new peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, - args: args{height: 10}, - wantResult: []types.NodeID{}, - }, - { - name: "only Removed peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - args: args{height: 2}, - wantResult: []types.NodeID{}, - }, - { - name: "one Ready shorter peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 5}, - wantResult: []types.NodeID{}, - }, - { - name: "one Ready equal peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []types.NodeID{"P1"}, - }, - { - name: "one Ready higher peer", - fields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {height: 20, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []types.NodeID{"P1"}, - }, - { - name: "one Ready higher peer at base", - fields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {base: 4, height: 20, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []types.NodeID{"P1"}, - }, - { - name: "one Ready higher peer with higher base", - fields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {base: 10, height: 20, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []types.NodeID{}, - }, - { - name: "multiple mixed peers", - fields: scTestParams{ - height: 8, - peers: map[string]*scPeer{ - "P1": {height: -1, state: peerStateNew}, - "P2": {height: 10, state: peerStateReady}, - "P3": {height: 5, state: peerStateReady}, - "P4": {height: 20, state: peerStateRemoved}, - "P5": {height: 11, state: peerStateReady}}, - allB: []int64{8, 9, 10, 11}, - }, - args: args{height: 8}, - wantResult: []types.NodeID{"P2", "P5"}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // getPeersWithHeight should not mutate the scheduler - wantSc := sc - res := sc.getPeersWithHeight(tt.args.height) - sort.Sort(PeerByID(res)) - assert.Equal(t, tt.wantResult, res) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScMarkPending(t *testing.T) { - now := time.Now() - - type args struct { - peerID types.NodeID - height int64 - tm time.Time - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "attempt mark pending an unknown block above height", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P1", height: 3, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - wantErr: true, - }, - { - name: "attempt mark pending an unknown block below base", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}}, - args: args{peerID: "P1", height: 3, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}}, - wantErr: true, - }, - { - name: "attempt mark pending from non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P2", height: 1, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - wantErr: true, - }, - { - name: "mark pending from Removed peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{peerID: "P1", height: 1, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - wantErr: true, - }, - { - name: "mark pending from New peer", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateNew}, - }, - allB: []int64{1, 2, 3, 4}, - }, - args: args{peerID: "P2", height: 2, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateNew}, - }, - allB: []int64{1, 2, 3, 4}, - }, - wantErr: true, - }, - { - name: "mark pending from short peer", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 2, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - }, - args: args{peerID: "P2", height: 3, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 2, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - }, - wantErr: true, - }, - { - name: "mark pending all good", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1"}, - pendingTime: map[int64]time.Time{1: now}, - }, - args: args{peerID: "P1", height: 2, tm: now.Add(time.Millisecond)}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Millisecond)}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - if err := sc.markPending(tt.args.peerID, tt.args.height, tt.args.tm); (err != nil) != tt.wantErr { - t.Errorf("markPending() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScMarkReceived(t *testing.T) { - now := time.Now() - - type args struct { - peerID types.NodeID - height int64 - size int64 - tm time.Time - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "received from non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P2", height: 1, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - wantErr: true, - }, - { - name: "received from removed peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{peerID: "P1", height: 1, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - wantErr: true, - }, - { - name: "received from unsolicited peer", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"}, - }, - wantErr: true, - }, - { - name: "received but blockRequest not sent", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{}, - }, - wantErr: true, - }, - { - name: "received with bad timestamp", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)}, - }, - wantErr: true, - }, - { - name: "received all good", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now.Add(time.Millisecond)}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1"}, - pendingTime: map[int64]time.Time{1: now}, - received: map[int64]types.NodeID{2: "P1"}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - if err := sc.markReceived( - tt.args.peerID, - tt.args.height, - tt.args.size, - now.Add(time.Second)); (err != nil) != tt.wantErr { - t.Errorf("markReceived() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScMarkProcessed(t *testing.T) { - now := time.Now() - - type args struct { - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "processed an unreceived block", - fields: scTestParams{ - height: 2, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{2}, - pending: map[int64]types.NodeID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - targetPending: 1, - }, - args: args{height: 2}, - wantFields: scTestParams{ - height: 3, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{3}, - targetPending: 1, - }, - }, - { - name: "mark processed success", - fields: scTestParams{ - height: 1, - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - received: map[int64]types.NodeID{1: "P1"}}, - args: args{height: 1}, - wantFields: scTestParams{ - height: 2, - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{2}, - pending: map[int64]types.NodeID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - oldBlockState := sc.getStateAtHeight(tt.args.height) - if err := sc.markProcessed(tt.args.height); (err != nil) != tt.wantErr { - t.Errorf("markProcessed() wantErr %v, error = %v", tt.wantErr, err) - } - if tt.wantErr { - assert.Equal(t, oldBlockState, sc.getStateAtHeight(tt.args.height)) - } else { - assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(tt.args.height)) - } - wantSc := newTestScheduler(tt.wantFields) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScResetState(t *testing.T) { - tests := []struct { - name string - fields scTestParams - state state.State - wantFields scTestParams - }{ - { - name: "updates height and initHeight", - fields: scTestParams{ - height: 0, - initHeight: 0, - }, - state: state.State{LastBlockHeight: 7}, - wantFields: scTestParams{ - height: 8, - initHeight: 8, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - e, err := sc.handleResetState(bcResetState{state: tt.state}) - require.NoError(t, err) - assert.Equal(t, e, noOp) - wantSc := newTestScheduler(tt.wantFields) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScAllBlocksProcessed(t *testing.T) { - now := time.Now() - - tests := []struct { - name string - fields scTestParams - wantResult bool - }{ - { - name: "no blocks, no peers", - fields: scTestParams{}, - wantResult: false, - }, - { - name: "only New blocks", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - wantResult: false, - }, - { - name: "only Pending blocks", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now}, - }, - wantResult: false, - }, - { - name: "only Received blocks", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - }, - wantResult: false, - }, - { - name: "only Processed blocks plus highest is received", - fields: scTestParams{ - height: 4, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}}, - allB: []int64{4}, - received: map[int64]types.NodeID{4: "P1"}, - }, - wantResult: true, - }, - { - name: "mixed block states", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{2: "P1", 4: "P1"}, - pendingTime: map[int64]time.Time{2: now, 4: now}, - }, - wantResult: false, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // allBlocksProcessed() should not mutate the scheduler - wantSc := sc - res := sc.allBlocksProcessed() - assert.Equal(t, tt.wantResult, res) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScNextHeightToSchedule(t *testing.T) { - now := time.Now() - - tests := []struct { - name string - fields scTestParams - wantHeight int64 - }{ - { - name: "no blocks", - fields: scTestParams{initHeight: 11, height: 11}, - wantHeight: -1, - }, - { - name: "only New blocks", - fields: scTestParams{ - initHeight: 3, - peers: map[string]*scPeer{"P1": {height: 6, state: peerStateReady}}, - allB: []int64{3, 4, 5, 6}, - }, - wantHeight: 3, - }, - { - name: "only Pending blocks", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now}, - }, - wantHeight: -1, - }, - { - name: "only Received blocks", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - }, - wantHeight: -1, - }, - { - name: "only Processed blocks", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - wantHeight: 1, - }, - { - name: "mixed block states", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - }, - wantHeight: 1, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // nextHeightToSchedule() should not mutate the scheduler - wantSc := sc - - resMin := sc.nextHeightToSchedule() - assert.Equal(t, tt.wantHeight, resMin) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScSelectPeer(t *testing.T) { - - type args struct { - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantResult types.NodeID - wantError bool - }{ - { - name: "no peers", - fields: scTestParams{peers: map[string]*scPeer{}}, - args: args{height: 10}, - wantResult: "", - wantError: true, - }, - { - name: "only new peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, - args: args{height: 10}, - wantResult: "", - wantError: true, - }, - { - name: "only Removed peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - args: args{height: 2}, - wantResult: "", - wantError: true, - }, - { - name: "one Ready shorter peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 5}, - wantResult: "", - wantError: true, - }, - { - name: "one Ready equal peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: "P1", - }, - { - name: "one Ready higher peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}, - }, - args: args{height: 4}, - wantResult: "P1", - }, - { - name: "one Ready higher peer with higher base", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}, - }, - args: args{height: 3}, - wantResult: "", - wantError: true, - }, - { - name: "many Ready higher peers with different number of pending requests", - fields: scTestParams{ - height: 4, - peers: map[string]*scPeer{ - "P1": {height: 8, state: peerStateReady}, - "P2": {height: 9, state: peerStateReady}}, - allB: []int64{4, 5, 6, 7, 8, 9}, - pending: map[int64]types.NodeID{ - 4: "P1", 6: "P1", - 5: "P2", - }, - }, - args: args{height: 4}, - wantResult: "P2", - }, - { - name: "many Ready higher peers with same number of pending requests", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P2": {height: 20, state: peerStateReady}, - "P1": {height: 15, state: peerStateReady}, - "P3": {height: 15, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, - pending: map[int64]types.NodeID{ - 1: "P1", 2: "P1", - 3: "P3", 4: "P3", - 5: "P2", 6: "P2", - }, - }, - args: args{height: 7}, - wantResult: "P1", - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // selectPeer should not mutate the scheduler - wantSc := sc - res, err := sc.selectPeer(tt.args.height) - assert.Equal(t, tt.wantResult, res) - assert.Equal(t, tt.wantError, err != nil) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -// makeScBlock makes an empty block. -func makeScBlock(height int64) *types.Block { - return &types.Block{Header: types.Header{Height: height}} -} - -// used in place of assert.Equal(t, want, actual) to avoid failures due to -// scheduler.lastAdvanced timestamp inequalities. -func checkSameScheduler(t *testing.T, want *scheduler, actual *scheduler) { - assert.Equal(t, want.initHeight, actual.initHeight) - assert.Equal(t, want.height, actual.height) - assert.Equal(t, want.peers, actual.peers) - assert.Equal(t, want.blockStates, actual.blockStates) - assert.Equal(t, want.pendingBlocks, actual.pendingBlocks) - assert.Equal(t, want.pendingTime, actual.pendingTime) - assert.Equal(t, want.blockStates, actual.blockStates) - assert.Equal(t, want.receivedBlocks, actual.receivedBlocks) - assert.Equal(t, want.blockStates, actual.blockStates) -} - -// checkScResults checks scheduler handler test results -func checkScResults(t *testing.T, wantErr bool, err error, wantEvent Event, event Event) { - if (err != nil) != wantErr { - t.Errorf("error = %v, wantErr %v", err, wantErr) - return - } - if !assert.IsType(t, wantEvent, event) { - t.Log(fmt.Sprintf("Wrong type received, got: %v", event)) - } - switch wantEvent := wantEvent.(type) { - case scPeerError: - assert.Equal(t, wantEvent.peerID, event.(scPeerError).peerID) - assert.Equal(t, wantEvent.reason != nil, event.(scPeerError).reason != nil) - case scBlockReceived: - assert.Equal(t, wantEvent.peerID, event.(scBlockReceived).peerID) - assert.Equal(t, wantEvent.block, event.(scBlockReceived).block) - case scSchedulerFail: - assert.Equal(t, wantEvent.reason != nil, event.(scSchedulerFail).reason != nil) - } -} - -func TestScHandleBlockResponse(t *testing.T) { - now := time.Now() - block6FromP1 := bcBlockResponse{ - time: now.Add(time.Millisecond), - peerID: types.NodeID("P1"), - size: 100, - block: makeScBlock(6), - } - - type args struct { - event bcBlockResponse - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{}, - args: args{event: block6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "block from removed peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - args: args{event: block6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "block we haven't asked for", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}}, - args: args{event: block6FromP1}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - }, - { - name: "block from wrong peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P2"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: block6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "block with bad timestamp", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now.Add(time.Second)}, - }, - args: args{event: block6FromP1}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - }, - { - name: "good block, accept", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: block6FromP1}, - wantEvent: scBlockReceived{peerID: "P1", block: block6FromP1.block}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleBlockResponse(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleNoBlockResponse(t *testing.T) { - now := time.Now() - noBlock6FromP1 := bcNoBlockResponse{ - time: now.Add(time.Millisecond), - peerID: types.NodeID("P1"), - height: 6, - } - - tests := []struct { - name string - fields scTestParams - wantEvent Event - wantFields scTestParams - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{}, - wantEvent: noOpEvent{}, - wantFields: scTestParams{}, - }, - { - name: "noBlock from removed peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - wantEvent: noOpEvent{}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - }, - { - name: "for block we haven't asked for", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - }, - { - name: "noBlock from peer we don't have", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P2"}, - pendingTime: map[int64]time.Time{6: now}, - }, - wantEvent: noOpEvent{}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P2"}, - pendingTime: map[int64]time.Time{6: now}, - }, - }, - { - name: "noBlock from existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleNoBlockResponse(noBlock6FromP1) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScHandleBlockProcessed(t *testing.T) { - now := time.Now() - processed6FromP1 := pcBlockProcessed{ - peerID: types.NodeID("P1"), - height: 6, - } - - type args struct { - event pcBlockProcessed - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{height: 6}, - args: args{event: processed6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "processed block we don't have", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: processed6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "processed block ok, we processed all blocks", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}}, - allB: []int64{6, 7}, - received: map[int64]types.NodeID{6: "P1", 7: "P1"}, - }, - args: args{event: processed6FromP1}, - wantEvent: scFinishedEv{}, - }, - { - name: "processed block ok, we still have blocks to process", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{7: "P1", 8: "P1"}, - received: map[int64]types.NodeID{6: "P1"}, - }, - args: args{event: processed6FromP1}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleBlockProcessed(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleBlockVerificationFailure(t *testing.T) { - now := time.Now() - - type args struct { - event pcBlockVerificationFailure - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{}, - args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - }, - { - name: "failed block we don't have, single peer is still removed", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: scFinishedEv{}, - }, - { - name: "failed block we don't have, one of two peers are removed", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - }, - { - name: "failed block, all blocks are processed after removal", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}}, - allB: []int64{6, 7}, - received: map[int64]types.NodeID{6: "P1", 7: "P1"}, - }, - args: args{event: pcBlockVerificationFailure{height: 7, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: scFinishedEv{}, - }, - { - name: "failed block, we still have blocks to process", - fields: scTestParams{ - initHeight: 5, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}}, - allB: []int64{5, 6, 7, 8}, - pending: map[int64]types.NodeID{7: "P1", 8: "P1"}, - received: map[int64]types.NodeID{5: "P1", 6: "P1"}, - }, - args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - }, - { - name: "failed block, H+1 and H+2 delivered by different peers, we still have blocks to process", - fields: scTestParams{ - initHeight: 5, - peers: map[string]*scPeer{ - "P1": {height: 8, state: peerStateReady}, - "P2": {height: 8, state: peerStateReady}, - "P3": {height: 8, state: peerStateReady}, - }, - allB: []int64{5, 6, 7, 8}, - pending: map[int64]types.NodeID{7: "P1", 8: "P1"}, - received: map[int64]types.NodeID{5: "P1", 6: "P1"}, - }, - args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P2"}}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleBlockProcessError(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleAddNewPeer(t *testing.T) { - addP1 := bcAddNewPeer{ - peerID: types.NodeID("P1"), - } - type args struct { - event bcAddNewPeer - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "add P1 to empty scheduler", - fields: scTestParams{}, - args: args{event: addP1}, - wantEvent: noOpEvent{}, - }, - { - name: "add duplicate peer", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - }, - args: args{event: addP1}, - wantEvent: noOpEvent{}, - }, - { - name: "add P1 to non empty scheduler", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - }, - args: args{event: addP1}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleAddNewPeer(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleTryPrunePeer(t *testing.T) { - now := time.Now() - - pruneEv := rTryPrunePeer{ - time: now.Add(time.Second + time.Millisecond), - } - type args struct { - event rTryPrunePeer - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "no peers", - fields: scTestParams{}, - args: args{event: pruneEv}, - wantEvent: noOpEvent{}, - }, - { - name: "no prunable peers", - fields: scTestParams{ - minRecvRate: 100, - peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100}}, - peerTimeout: time.Second, - }, - args: args{event: pruneEv}, - wantEvent: noOpEvent{}, - }, - { - name: "mixed peers", - fields: scTestParams{ - minRecvRate: 100, - peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100, height: 5}, - // V - ready, inactive, equal - "P4": {state: peerStateReady, lastTouched: now, lastRate: 100, height: 7}, - // V - ready, inactive, slow - "P5": {state: peerStateReady, lastTouched: now, lastRate: 99, height: 7}, - // V - ready, active, slow - "P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90, height: 7}, - }, - allB: []int64{1, 2, 3, 4, 5, 6, 7}, - peerTimeout: time.Second}, - args: args{event: pruneEv}, - wantEvent: scPeersPruned{peers: []types.NodeID{"P4", "P5", "P6"}}, - }, - { - name: "mixed peers, finish after pruning", - fields: scTestParams{ - minRecvRate: 100, - height: 6, - peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100, height: 5}, - // V - ready, inactive, equal - "P4": {state: peerStateReady, lastTouched: now, lastRate: 100, height: 7}, - // V - ready, inactive, slow - "P5": {state: peerStateReady, lastTouched: now, lastRate: 99, height: 7}, - // V - ready, active, slow - "P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90, height: 7}, - }, - allB: []int64{6, 7}, - peerTimeout: time.Second}, - args: args{event: pruneEv}, - wantEvent: scFinishedEv{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleTryPrunePeer(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleTrySchedule(t *testing.T) { - now := time.Now() - tryEv := rTrySchedule{ - time: now.Add(time.Second + time.Millisecond), - } - - type args struct { - event rTrySchedule - } - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "no peers", - fields: scTestParams{startTime: now, peers: map[string]*scPeer{}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "only new peers", - fields: scTestParams{startTime: now, peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "only Removed peers", - fields: scTestParams{startTime: now, peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "one Ready shorter peer", - fields: scTestParams{ - startTime: now, - height: 6, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "one Ready equal peer", - fields: scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{event: tryEv}, - wantEvent: scBlockRequest{peerID: "P1", height: 1}, - }, - { - name: "many Ready higher peers with different number of pending requests", - fields: scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 5, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5}, - pending: map[int64]types.NodeID{ - 1: "P1", 2: "P1", - 3: "P2", - }, - }, - args: args{event: tryEv}, - wantEvent: scBlockRequest{peerID: "P2", height: 4}, - }, - - { - name: "many Ready higher peers with same number of pending requests", - fields: scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P2": {height: 8, state: peerStateReady}, - "P1": {height: 8, state: peerStateReady}, - "P3": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{ - 1: "P1", 2: "P1", - 3: "P3", 4: "P3", - 5: "P2", 6: "P2", - }, - }, - args: args{event: tryEv}, - wantEvent: scBlockRequest{peerID: "P1", height: 7}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleTrySchedule(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleStatusResponse(t *testing.T) { - now := time.Now() - statusRespP1Ev := bcStatusResponse{ - time: now.Add(time.Second + time.Millisecond), - peerID: "P1", - height: 6, - } - - type args struct { - event bcStatusResponse - } - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "change height of non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - }, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - - { - name: "increase height of removed peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - - { - name: "decrease height of single peer", - fields: scTestParams{ - height: 5, - peers: map[string]*scPeer{"P1": {height: 10, state: peerStateReady}}, - allB: []int64{5, 6, 7, 8, 9, 10}, - }, - args: args{event: statusRespP1Ev}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - }, - - { - name: "increase height of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - { - name: "noop height change of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}}, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleStatusResponse(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandle(t *testing.T) { - now := time.Now() - - type unknownEv struct { - priorityNormal - } - - block1, block2, block3 := makeScBlock(1), makeScBlock(2), makeScBlock(3) - - t0 := time.Now() - tick := make([]time.Time, 100) - for i := range tick { - tick[i] = t0.Add(time.Duration(i) * time.Millisecond) - } - - type args struct { - event Event - } - type scStep struct { - currentSc *scTestParams - args args - wantEvent Event - wantErr bool - wantSc *scTestParams - } - tests := []struct { - name string - steps []scStep - }{ - { - name: "unknown event", - steps: []scStep{ - { // add P1 - currentSc: &scTestParams{}, - args: args{event: unknownEv{}}, - wantEvent: scSchedulerFail{reason: fmt.Errorf("some error")}, - wantSc: &scTestParams{}, - }, - }, - }, - { - name: "single peer, sync 3 blocks", - steps: []scStep{ - { // add P1 - currentSc: &scTestParams{startTime: now, peers: map[string]*scPeer{}, height: 1}, - args: args{event: bcAddNewPeer{peerID: "P1"}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{startTime: now, peers: map[string]*scPeer{ - "P1": {base: -1, height: -1, state: peerStateNew}}, height: 1}, - }, - { // set height of P1 - args: args{event: bcStatusResponse{peerID: "P1", time: tick[0], height: 3}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - height: 1, - }, - }, - { // schedule block 1 - args: args{event: rTrySchedule{time: tick[1]}}, - wantEvent: scBlockRequest{peerID: "P1", height: 1}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1"}, - pendingTime: map[int64]time.Time{1: tick[1]}, - height: 1, - }, - }, - { // schedule block 2 - args: args{event: rTrySchedule{time: tick[2]}}, - wantEvent: scBlockRequest{peerID: "P1", height: 2}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2]}, - height: 1, - }, - }, - { // schedule block 3 - args: args{event: rTrySchedule{time: tick[3]}}, - wantEvent: scBlockRequest{peerID: "P1", height: 3}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"}, - pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2], 3: tick[3]}, - height: 1, - }, - }, - { // block response 1 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[4], size: 100, block: block1}}, - wantEvent: scBlockReceived{peerID: "P1", block: block1}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[4]}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{2: "P1", 3: "P1"}, - pendingTime: map[int64]time.Time{2: tick[2], 3: tick[3]}, - received: map[int64]types.NodeID{1: "P1"}, - height: 1, - }, - }, - { // block response 2 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[5], size: 100, block: block2}}, - wantEvent: scBlockReceived{peerID: "P1", block: block2}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[5]}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{3: "P1"}, - pendingTime: map[int64]time.Time{3: tick[3]}, - received: map[int64]types.NodeID{1: "P1", 2: "P1"}, - height: 1, - }, - }, - { // block response 3 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[6], size: 100, block: block3}}, - wantEvent: scBlockReceived{peerID: "P1", block: block3}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{1, 2, 3}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"}, - height: 1, - }, - }, - { // processed block 1 - args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 1}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{2, 3}, - received: map[int64]types.NodeID{2: "P1", 3: "P1"}, - height: 2, - }, - }, - { // processed block 2 - args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 2}}, - wantEvent: scFinishedEv{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{3}, - received: map[int64]types.NodeID{3: "P1"}, - height: 3, - }, - }, - }, - }, - { - name: "block verification failure", - steps: []scStep{ - { // failure processing block 1 - currentSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady, lastTouched: tick[6]}, - "P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{1, 2, 3, 4}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"}, - height: 1, - }, - args: args{event: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateRemoved, lastTouched: tick[6]}, - "P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{1, 2, 3}, - received: map[int64]types.NodeID{}, - height: 1, - }, - }, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - var sc *scheduler - for i, step := range tt.steps { - // First step must always initialize the currentState as state. - if step.currentSc != nil { - sc = newTestScheduler(*step.currentSc) - } - if sc == nil { - panic("Bad (initial?) step") - } - - nextEvent, err := sc.handle(step.args.event) - wantSc := newTestScheduler(*step.wantSc) - - t.Logf("step %d(%v): %s", i, step.args.event, sc) - checkSameScheduler(t, wantSc, sc) - - checkScResults(t, step.wantErr, err, step.wantEvent, nextEvent) - - // Next step may use the wantedState as their currentState. - sc = newTestScheduler(*step.wantSc) - } - }) - } -} diff --git a/internal/blocksync/v2/types.go b/internal/blocksync/v2/types.go deleted file mode 100644 index 7a73728e46..0000000000 --- a/internal/blocksync/v2/types.go +++ /dev/null @@ -1,65 +0,0 @@ -package v2 - -import ( - "github.com/Workiva/go-datastructures/queue" -) - -// Event is the type that can be added to the priority queue. -type Event queue.Item - -type priority interface { - Compare(other queue.Item) int - Priority() int -} - -type priorityLow struct{} -type priorityNormal struct{} -type priorityHigh struct{} - -func (p priorityLow) Priority() int { - return 1 -} - -func (p priorityNormal) Priority() int { - return 2 -} - -func (p priorityHigh) Priority() int { - return 3 -} - -func (p priorityLow) Compare(other queue.Item) int { - op := other.(priority) - if p.Priority() > op.Priority() { - return 1 - } else if p.Priority() == op.Priority() { - return 0 - } - return -1 -} - -func (p priorityNormal) Compare(other queue.Item) int { - op := other.(priority) - if p.Priority() > op.Priority() { - return 1 - } else if p.Priority() == op.Priority() { - return 0 - } - return -1 -} - -func (p priorityHigh) Compare(other queue.Item) int { - op := other.(priority) - if p.Priority() > op.Priority() { - return 1 - } else if p.Priority() == op.Priority() { - return 0 - } - return -1 -} - -type noOpEvent struct { - priorityLow -} - -var noOp = noOpEvent{} diff --git a/internal/consensus/README.md b/internal/consensus/README.md deleted file mode 100644 index 3f32d7e469..0000000000 --- a/internal/consensus/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Consensus - -See the [consensus spec](https://github.com/tendermint/spec/tree/master/spec/consensus). diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index c48d5eaeb4..7e849bd657 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -7,21 +7,24 @@ import ( "path" "sync" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" abciclient "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" + tmtime "github.com/tendermint/tendermint/libs/time" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -30,15 +33,21 @@ import ( // Byzantine node sends two different prevotes (nil and blockID) to the same // validator. func TestByzantinePrevoteEquivocation(t *testing.T) { + // empirically, this test either passes in <1s or hits some + // kind of deadlock and hit the larger timeout. This timeout + // can be extended a bunch if needed, but it's good to avoid + // falling back to a much coarser timeout + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + config := configSetup(t) nValidators := 4 prevoteHeight := int64(2) testName := "consensus_byzantine_test" tickerFunc := newMockTickerFunc(true) - appFunc := newKVStore - genDoc, privVals := factory.RandGenesisDoc(config, nValidators, 1) + genDoc, privVals := factory.RandGenesisDoc(config, nValidators, 1, factory.ConsensusParams()) states := make([]*State, nValidators) for i := 0; i < nValidators; i++ { @@ -50,58 +59,57 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { require.NoError(t, err) require.NoError(t, stateStore.Save(state)) - thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i)) require.NoError(t, err) defer os.RemoveAll(thisConfig.RootDir) - ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal - app := appFunc() + ensureDir(t, path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal + app := kvstore.NewApplication() vals := types.TM2PB.ValidatorUpdates(state.Validators) - app.InitChain(abci.RequestInitChain{ValidatorSet: &vals}) + _, err = app.InitChain(ctx, &abci.RequestInitChain{ValidatorSet: &vals}) + require.NoError(t, err) blockDB := dbm.NewMemDB() blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus - mtx := new(tmsync.Mutex) - proxyAppConnMem := abciclient.NewLocalClient(mtx, app) - proxyAppConnCon := abciclient.NewLocalClient(mtx, app) + proxyAppConnMem := abciclient.NewLocalClient(logger, app) + proxyAppConnCon := abciclient.NewLocalClient(logger, app) // Make Mempool - mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) - mempool.SetLogger(log.TestingLogger().With("module", "mempool")) + mp := mempool.NewTxMempool( + log.NewNopLogger().With("module", "mempool"), + thisConfig.Mempool, + proxyAppConnMem, + ) if thisConfig.Consensus.WaitForTxs() { - mempool.EnableTxsAvailable() + mp.EnableTxsAvailable() } + eventBus := eventbus.NewDefault(log.NewNopLogger().With("module", "events")) + require.NoError(t, eventBus.Start(ctx)) + // Make a full instance of the evidence pool evidenceDB := dbm.NewMemDB() - evpool, err := evidence.NewPool(logger.With("module", "evidence"), evidenceDB, stateStore, blockStore) - require.NoError(t, err) + evpool := evidence.NewPool(logger.With("module", "evidence"), evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus) // Make State - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, proxyAppConnCon, mempool, evpool, blockStore, nil) - cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) - cs.SetLogger(cs.Logger) + blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), proxyAppConnCon, mp, evpool, + blockStore, eventBus, sm.NopMetrics()) + cs, err := NewState(logger, thisConfig.Consensus, stateStore, blockExec, blockStore, mp, evpool, eventBus) + require.NoError(t, err) // set private validator pv := privVals[i] - cs.SetPrivValidator(pv) - - eventBus := types.NewEventBus() - eventBus.SetLogger(log.TestingLogger().With("module", "events")) - err = eventBus.Start() - require.NoError(t, err) - cs.SetEventBus(eventBus) + cs.SetPrivValidator(ctx, pv) cs.SetTimeoutTicker(tickerFunc()) - cs.SetLogger(logger) states[i] = cs }() } - rts := setup(t, nValidators, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, nValidators, states, 512) // buffer must be large enough to not deadlock var bzNodeID types.NodeID @@ -118,45 +126,46 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { bzReactor := rts.reactors[bzNodeID] // alter prevote so that the byzantine node double votes when height is 2 - bzNodeState.doPrevote = func(height int64, round int32, allowOldBlocks bool) { + bzNodeState.doPrevote = func(ctx context.Context, height int64, round int32, allowOldBlocks bool) { // allow first height to happen normally so that byzantine validator is no longer proposer if height == prevoteHeight { - prevote1, err := bzNodeState.signVote( + prevote1, err := bzNodeState.signVote(ctx, tmproto.PrevoteType, bzNodeState.ProposalBlock.Hash(), bzNodeState.ProposalBlockParts.Header(), ) require.NoError(t, err) - prevote2, err := bzNodeState.signVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + prevote2, err := bzNodeState.signVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) require.NoError(t, err) // send two votes to all peers (1st to one half, 2nd to another half) i := 0 for _, ps := range bzReactor.peers { + voteCh := rts.voteChannels[bzNodeID] if i < len(bzReactor.peers)/2 { - bzNodeState.Logger.Info("signed and pushed vote", "vote", prevote1, "peer", ps.peerID) - bzReactor.voteCh.Out <- p2p.Envelope{ - To: ps.peerID, - Message: &tmcons.Vote{ - Vote: prevote1.ToProto(), - }, - } + + require.NoError(t, voteCh.Send(ctx, + p2p.Envelope{ + To: ps.peerID, + Message: &tmcons.Vote{ + Vote: prevote1.ToProto(), + }, + })) } else { - bzNodeState.Logger.Info("signed and pushed vote", "vote", prevote2, "peer", ps.peerID) - bzReactor.voteCh.Out <- p2p.Envelope{ - To: ps.peerID, - Message: &tmcons.Vote{ - Vote: prevote2.ToProto(), - }, - } + require.NoError(t, voteCh.Send(ctx, + p2p.Envelope{ + To: ps.peerID, + Message: &tmcons.Vote{ + Vote: prevote2.ToProto(), + }, + })) } i++ } } else { - bzNodeState.Logger.Info("behaving normally") - bzNodeState.defaultDoPrevote(height, round, false) + bzNodeState.defaultDoPrevote(ctx, height, round, false) } } @@ -166,8 +175,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // lazyProposer := states[1] lazyNodeState := states[1] - lazyNodeState.decideProposal = func(height int64, round int32) { - lazyNodeState.Logger.Info("Lazy Proposer proposing condensed commit") + lazyNodeState.decideProposal = func(ctx context.Context, height int64, round int32) { require.NotNil(t, lazyNodeState.privValidator) var commit *types.Commit @@ -179,30 +187,35 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { case lazyNodeState.LastCommit != nil: commit = lazyNodeState.LastCommit default: // This shouldn't happen. - lazyNodeState.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block") + lazyNodeState.logger.Error("enterPropose: Cannot propose anything: No commit for the previous block") return } if lazyNodeState.privValidatorProTxHash == nil { // If this node is a validator & proposer in the current round, it will // miss the opportunity to create a block. - lazyNodeState.Logger.Error(fmt.Sprintf("enterPropose: %v", errProTxHashIsNotSet)) + lazyNodeState.logger.Error("enterPropose", "err", errProTxHashIsNotSet) return } proposerProTxHash := lazyNodeState.privValidatorProTxHash - block, blockParts := lazyNodeState.blockExec.CreateProposalBlock( + block, err := lazyNodeState.blockExec.CreateProposalBlock( + ctx, lazyNodeState.Height, lazyNodeState.state, commit, proposerProTxHash, 0, + nil, ) + require.NoError(t, err) + blockParts, err := block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) // Flush the WAL. Otherwise, we may not recompute the same proposal to sign, // and the privValidator will refuse to sign anything. if err := lazyNodeState.wal.FlushAndSync(); err != nil { - lazyNodeState.Logger.Error("Error flushing to disk") + lazyNodeState.logger.Error("error flushing to disk") } // Make proposal @@ -213,341 +226,93 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { round, lazyNodeState.ValidRound, propBlockID, + block.Header.Time, ) p := proposal.ToProto() - _, err := lazyNodeState.privValidator.SignProposal( - context.Background(), + if _, err := lazyNodeState.privValidator.SignProposal( + ctx, lazyNodeState.state.ChainID, lazyNodeState.state.Validators.QuorumType, lazyNodeState.state.Validators.QuorumHash, p, - ) - if err == nil { + ); err == nil { proposal.Signature = p.Signature // send proposal and block parts on internal msg queue - lazyNodeState.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""}) + lazyNodeState.sendInternalMessage(ctx, msgInfo{&ProposalMessage{proposal}, "", tmtime.Now()}) for i := 0; i < int(blockParts.Total()); i++ { part := blockParts.GetPart(i) - lazyNodeState.sendInternalMessage(msgInfo{&BlockPartMessage{lazyNodeState.Height, lazyNodeState.Round, part}, ""}) + lazyNodeState.sendInternalMessage(ctx, msgInfo{&BlockPartMessage{ + lazyNodeState.Height, lazyNodeState.Round, part, + }, "", tmtime.Now()}) } - lazyNodeState.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal) - lazyNodeState.Logger.Debug("signed proposal block", "block", block) + lazyNodeState.logger.Debug("signed proposal block", "block", block) } else if !lazyNodeState.replayMode { - lazyNodeState.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err) + lazyNodeState.logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err) } } for _, reactor := range rts.reactors { - state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, reactor.state.GetState(), false) } // Evidence should be submitted and committed at the third height but // we will check the first six just in case evidenceFromEachValidator := make([]types.Evidence, nValidators) - wg := new(sync.WaitGroup) + var wg sync.WaitGroup i := 0 + subctx, subcancel := context.WithCancel(ctx) + defer subcancel() for _, sub := range rts.subs { wg.Add(1) - go func(j int, s types.Subscription) { + go func(j int, s eventbus.Subscription) { defer wg.Done() for { - select { - case msg := <-s.Out(): - require.NotNil(t, msg) - block := msg.Data().(types.EventDataNewBlock).Block - if len(block.Evidence.Evidence) != 0 { - evidenceFromEachValidator[j] = block.Evidence.Evidence[0] - return - } - case <-s.Canceled(): - require.Fail(t, "subscription failed for %d", j) + if subctx.Err() != nil { + return + } + + msg, err := s.Next(subctx) + if subctx.Err() != nil { + return + } + + if err != nil { + t.Errorf("waiting for subscription: %v", err) + subcancel() + return + } + + require.NotNil(t, msg) + block := msg.Data().(types.EventDataNewBlock).Block + if len(block.Evidence) != 0 { + evidenceFromEachValidator[j] = block.Evidence[0] return } } }(i, sub) - i++ } wg.Wait() - proTxHash, err := bzNodeState.privValidator.GetProTxHash(context.Background()) + proTxHash, err := bzNodeState.privValidator.GetProTxHash(ctx) require.NoError(t, err) - for idx, ev := range evidenceFromEachValidator { - if assert.NotNil(t, ev, idx) { - ev, ok := ev.(*types.DuplicateVoteEvidence) - assert.True(t, ok) - assert.Equal(t, proTxHash, ev.VoteA.ValidatorProTxHash) - assert.Equal(t, prevoteHeight, ev.Height()) - } + // don't run more assertions if we've encountered a timeout + select { + case <-subctx.Done(): + t.Fatal("encountered timeout") + default: } -} -// 4 validators. 1 is byzantine. The other three are partitioned into A (1 val) and B (2 vals). -// byzantine validator sends conflicting proposals into A and B, -// and prevotes/precommits on both of them. -// B sees a commit, A doesn't. -// Heal partition and ensure A sees the commit -func TestByzantineConflictingProposalsWithPartition(t *testing.T) { - // TODO: https://github.com/tendermint/tendermint/issues/6092 - t.SkipNow() - - // n := 4 - // logger := consensusLogger().With("test", "byzantine") - // app := newCounter - - // states, cleanup := randConsensusState(n, "consensus_byzantine_test", newMockTickerFunc(false), app) - // t.Cleanup(cleanup) - - // // give the byzantine validator a normal ticker - // ticker := NewTimeoutTicker() - // ticker.SetLogger(states[0].Logger) - // states[0].SetTimeoutTicker(ticker) - - // p2pLogger := logger.With("module", "p2p") - - // blocksSubs := make([]types.Subscription, n) - // reactors := make([]p2p.Reactor, n) - // for i := 0; i < n; i++ { - // // enable txs so we can create different proposals - // assertMempool(states[i].txNotifier).EnableTxsAvailable() - - // eventBus := states[i].eventBus - // eventBus.SetLogger(logger.With("module", "events", "validator", i)) - - // var err error - // blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) - // require.NoError(t, err) - - // conR := NewReactor(states[i], true) // so we don't start the consensus states - // conR.SetLogger(logger.With("validator", i)) - // conR.SetEventBus(eventBus) - - // var conRI p2p.Reactor = conR - - // // make first val byzantine - // if i == 0 { - // conRI = NewByzantineReactor(conR) - // } - - // reactors[i] = conRI - // err = states[i].blockExec.Store().Save(states[i].state) // for save height 1's validators info - // require.NoError(t, err) - // } - - // switches := p2p.MakeConnectedSwitches(config.P2P, N, func(i int, sw *p2p.Switch) *p2p.Switch { - // sw.SetLogger(p2pLogger.With("validator", i)) - // sw.AddReactor("CONSENSUS", reactors[i]) - // return sw - // }, func(sws []*p2p.Switch, i, j int) { - // // the network starts partitioned with globally active adversary - // if i != 0 { - // return - // } - // p2p.Connect2Switches(sws, i, j) - // }) - - // // make first val byzantine - // // NOTE: Now, test validators are MockPV, which by default doesn't - // // do any safety checks. - // states[0].privValidator.(types.MockPV).DisableChecks() - // states[0].decideProposal = func(j int32) func(int64, int32) { - // return func(height int64, round int32) { - // byzantineDecideProposalFunc(t, height, round, states[j], switches[j]) - // } - // }(int32(0)) - // // We are setting the prevote function to do nothing because the prevoting - // // and precommitting are done alongside the proposal. - // states[0].doPrevote = func(height int64, round int32) {} - - // defer func() { - // for _, sw := range switches { - // err := sw.Stop() - // require.NoError(t, err) - // } - // }() - - // // start the non-byz state machines. - // // note these must be started before the byz - // for i := 1; i < n; i++ { - // cr := reactors[i].(*Reactor) - // cr.SwitchToConsensus(cr.conS.GetState(), false) - // } - - // // start the byzantine state machine - // byzR := reactors[0].(*ByzantineReactor) - // s := byzR.reactor.conS.GetState() - // byzR.reactor.SwitchToConsensus(s, false) - - // // byz proposer sends one block to peers[0] - // // and the other block to peers[1] and peers[2]. - // // note peers and switches order don't match. - // peers := switches[0].Peers().List() - - // // partition A - // ind0 := getSwitchIndex(switches, peers[0]) - - // // partition B - // ind1 := getSwitchIndex(switches, peers[1]) - // ind2 := getSwitchIndex(switches, peers[2]) - // p2p.Connect2Switches(switches, ind1, ind2) - - // // wait for someone in the big partition (B) to make a block - // <-blocksSubs[ind2].Out() - - // t.Log("A block has been committed. Healing partition") - // p2p.Connect2Switches(switches, ind0, ind1) - // p2p.Connect2Switches(switches, ind0, ind2) - - // // wait till everyone makes the first new block - // // (one of them already has) - // wg := new(sync.WaitGroup) - // for i := 1; i < N-1; i++ { - // wg.Add(1) - // go func(j int) { - // <-blocksSubs[j].Out() - // wg.Done() - // }(i) - // } - - // done := make(chan struct{}) - // go func() { - // wg.Wait() - // close(done) - // }() - - // tick := time.NewTicker(time.Second * 10) - // select { - // case <-done: - // case <-tick.C: - // for i, reactor := range reactors { - // t.Log(fmt.Sprintf("Consensus Reactor %v", i)) - // t.Log(fmt.Sprintf("%v", reactor)) - // } - // t.Fatalf("Timed out waiting for all validators to commit first block") - // } + for idx, ev := range evidenceFromEachValidator { + require.NotNil(t, ev, idx) + ev, ok := ev.(*types.DuplicateVoteEvidence) + require.True(t, ok) + assert.Equal(t, proTxHash, ev.VoteA.ValidatorProTxHash) + assert.Equal(t, prevoteHeight, ev.Height()) + } } - -// func byzantineDecideProposalFunc(t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch) { -// // byzantine user should create two proposals and try to split the vote. -// // Avoid sending on internalMsgQueue and running consensus state. - -// // Create a new proposal block from state/txs from the mempool. -// block1, blockParts1 := cs.createProposalBlock() -// polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartSetHeader: blockParts1.Header()} -// proposal1 := types.NewProposal(height, round, polRound, propBlockID) -// p1 := proposal1.ToProto() -// if err := cs.privValidator.SignProposal(cs.state.ChainID, p1); err != nil { -// t.Error(err) -// } - -// proposal1.Signature = p1.Signature - -// // some new transactions come in (this ensures that the proposals are different) -// deliverTxsRange(cs, 0, 1) - -// // Create a new proposal block from state/txs from the mempool. -// block2, blockParts2 := cs.createProposalBlock() -// polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartSetHeader: blockParts2.Header()} -// proposal2 := types.NewProposal(height, round, polRound, propBlockID) -// p2 := proposal2.ToProto() -// if err := cs.privValidator.SignProposal(cs.state.ChainID, p2); err != nil { -// t.Error(err) -// } - -// proposal2.Signature = p2.Signature - -// block1Hash := block1.Hash() -// block2Hash := block2.Hash() - -// // broadcast conflicting proposals/block parts to peers -// peers := sw.Peers().List() -// t.Logf("Byzantine: broadcasting conflicting proposals to %d peers", len(peers)) -// for i, peer := range peers { -// if i < len(peers)/2 { -// go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1) -// } else { -// go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2) -// } -// } -// } - -// func sendProposalAndParts( -// height int64, -// round int32, -// cs *State, -// peer p2p.Peer, -// proposal *types.Proposal, -// blockHash []byte, -// parts *types.PartSet, -// ) { -// // proposal -// msg := &ProposalMessage{Proposal: proposal} -// peer.Send(DataChannel, MustEncode(msg)) - -// // parts -// for i := 0; i < int(parts.Total()); i++ { -// part := parts.GetPart(i) -// msg := &BlockPartMessage{ -// Height: height, // This tells peer that this part applies to us. -// Round: round, // This tells peer that this part applies to us. -// Part: part, -// } -// peer.Send(DataChannel, MustEncode(msg)) -// } - -// // votes -// cs.mtx.Lock() -// prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header()) -// precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header()) -// cs.mtx.Unlock() - -// peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote})) -// peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit})) -// } - -// type ByzantineReactor struct { -// service.Service -// reactor *Reactor -// } - -// func NewByzantineReactor(conR *Reactor) *ByzantineReactor { -// return &ByzantineReactor{ -// Service: conR, -// reactor: conR, -// } -// } - -// func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) } -// func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() } - -// func (br *ByzantineReactor) AddPeer(peer p2p.Peer) { -// if !br.reactor.IsRunning() { -// return -// } - -// // Create peerState for peer -// peerState := NewPeerState(peer).SetLogger(br.reactor.Logger) -// peer.Set(types.PeerStateKey, peerState) - -// // Send our state to peer. -// // If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). -// if !br.reactor.waitSync { -// br.reactor.sendNewRoundStepMessage(peer) -// } -// } - -// func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) { -// br.reactor.RemovePeer(peer, reason) -// } - -// func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { -// br.reactor.Receive(chID, peer, msgBytes) -// } - -// func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer } diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 85fe1f7bcc..7b2dfbce3f 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -4,36 +4,38 @@ package consensus import ( "bytes" "context" + "errors" "fmt" "io" - "io/ioutil" "os" - "path" "path/filepath" + "sort" "sync" "testing" "time" "github.com/dashevo/dashd-go/btcjson" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/dash/llmq" cstypes "github.com/tendermint/tendermint/internal/consensus/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/mempool" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -51,37 +53,22 @@ type cleanupFunc func() func configSetup(t *testing.T) *config.Config { t.Helper() - cfg, err := ResetConfig("consensus_reactor_test") + cfg, err := ResetConfig(t.TempDir(), "consensus_reactor_test") require.NoError(t, err) - t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) - - consensusReplayConfig, err := ResetConfig("consensus_replay_test") - require.NoError(t, err) - t.Cleanup(func() { os.RemoveAll(consensusReplayConfig.RootDir) }) - - configStateTest, err := ResetConfig("consensus_state_test") - require.NoError(t, err) - t.Cleanup(func() { os.RemoveAll(configStateTest.RootDir) }) - - configMempoolTest, err := ResetConfig("consensus_mempool_test") - require.NoError(t, err) - t.Cleanup(func() { os.RemoveAll(configMempoolTest.RootDir) }) - - configByzantineTest, err := ResetConfig("consensus_byzantine_test") - require.NoError(t, err) - t.Cleanup(func() { os.RemoveAll(configByzantineTest.RootDir) }) + t.Cleanup(func() { _ = os.RemoveAll(cfg.RootDir) }) + walDir := filepath.Dir(cfg.Consensus.WalFile()) + ensureDir(t, walDir, 0700) return cfg } -func ensureDir(dir string, mode os.FileMode) { - if err := tmos.EnsureDir(dir, mode); err != nil { - panic(err) - } +func ensureDir(t *testing.T, dir string, mode os.FileMode) { + t.Helper() + require.NoError(t, tmos.EnsureDir(dir, mode)) } -func ResetConfig(name string) (*config.Config, error) { - return config.ResetTestRoot(name) +func ResetConfig(dir, name string) (*config.Config, error) { + return config.ResetTestRoot(dir, name) } //------------------------------------------------------------------------------- @@ -91,6 +78,7 @@ type validatorStub struct { Index int32 // Validator index. NOTE: we don't assume validator set changes. Height int64 Round int32 + clock tmtime.Source types.PrivValidator VotingPower int64 lastVote *types.Vote @@ -103,31 +91,34 @@ func newValidatorStub(privValidator types.PrivValidator, valIndex int32, initial Index: valIndex, PrivValidator: privValidator, VotingPower: testMinPower, + clock: tmtime.DefaultSource{}, Height: initialHeight, } } func (vs *validatorStub) signVote( - cfg *config.Config, + ctx context.Context, voteType tmproto.SignedMsgType, - hash []byte, + chainID string, + blockID types.BlockID, lastAppHash []byte, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, - header types.PartSetHeader) (*types.Vote, error) { + voteExtension []byte) (*types.Vote, error) { - proTxHash, err := vs.PrivValidator.GetProTxHash(context.Background()) + proTxHash, err := vs.PrivValidator.GetProTxHash(ctx) if err != nil { return nil, fmt.Errorf("can't get proTxHash: %w", err) } vote := &types.Vote{ - ValidatorIndex: vs.Index, - ValidatorProTxHash: proTxHash, + Type: voteType, Height: vs.Height, Round: vs.Round, - Type: voteType, - BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, + BlockID: blockID, + ValidatorProTxHash: proTxHash, + ValidatorIndex: vs.Index, + Extension: voteExtension, } stateID := types.StateID{ @@ -136,29 +127,42 @@ func (vs *validatorStub) signVote( } v := vote.ToProto() - if err := vs.PrivValidator.SignVote(context.Background(), cfg.ChainID(), quorumType, quorumHash, v, stateID, nil); err != nil { + if err := vs.PrivValidator.SignVote(ctx, chainID, quorumType, quorumHash, v, stateID, nil); err != nil { return nil, fmt.Errorf("sign vote failed: %w", err) } - // ref: signVote in FilePV, the vote should use the privious vote info when the sign data is the same. + // ref: signVote in FilePV, the vote should use the previous vote info when the sign data is the same. if signDataIsEqual(vs.lastVote, v) { v.BlockSignature = vs.lastVote.BlockSignature v.StateSignature = vs.lastVote.StateSignature + v.ExtensionSignature = vs.lastVote.ExtensionSignature } vote.BlockSignature = v.BlockSignature vote.StateSignature = v.StateSignature + vote.ExtensionSignature = v.ExtensionSignature return vote, err } -// SignDigest vote for type/hash/header -func signVote(vs *validatorStub, cfg *config.Config, voteType tmproto.SignedMsgType, hash []byte, lastAppHash []byte, quorumType btcjson.LLMQType, - quorumHash crypto.QuorumHash, header types.PartSetHeader) *types.Vote { - v, err := vs.signVote(cfg, voteType, hash, lastAppHash, quorumType, quorumHash, header) - if err != nil { - panic(fmt.Errorf("failed to sign vote: %v", err)) +// Sign vote for type/hash/header +func signVote( + ctx context.Context, + t *testing.T, + vs *validatorStub, + voteType tmproto.SignedMsgType, + chainID string, + blockID types.BlockID, + lastAppHash []byte, + quorumType btcjson.LLMQType, + quorumHash crypto.QuorumHash) *types.Vote { + + var ext []byte + if voteType == tmproto.PrecommitType { + ext = []byte("extension") } + v, err := vs.signVote(ctx, voteType, chainID, blockID, lastAppHash, quorumType, quorumHash, ext) + require.NoError(t, err, "failed to sign vote") vs.lastVote = v @@ -166,17 +170,19 @@ func signVote(vs *validatorStub, cfg *config.Config, voteType tmproto.SignedMsgT } func signVotes( - cfg *config.Config, + ctx context.Context, + t *testing.T, voteType tmproto.SignedMsgType, - hash []byte, + chainID string, + blockID types.BlockID, lastAppHash []byte, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, - header types.PartSetHeader, - vss ...*validatorStub) []*types.Vote { + vss ...*validatorStub, +) []*types.Vote { votes := make([]*types.Vote, len(vss)) for i, vs := range vss { - votes[i] = signVote(vs, cfg, voteType, hash, lastAppHash, quorumType, quorumHash, header) + votes[i] = signVote(ctx, t, vs, voteType, chainID, blockID, lastAppHash, quorumType, quorumHash) } return votes } @@ -193,53 +199,52 @@ func incrementRound(vss ...*validatorStub) { } } -type ValidatorStubsByPower []*validatorStub +func sortVValidatorStubsByPower(ctx context.Context, t *testing.T, vss []*validatorStub) []*validatorStub { + t.Helper() + sort.Slice(vss, func(i, j int) bool { + vssi, err := vss[i].GetProTxHash(ctx) + require.NoError(t, err) -func (vss ValidatorStubsByPower) Len() int { - return len(vss) -} + vssj, err := vss[j].GetProTxHash(ctx) + require.NoError(t, err) -func (vss ValidatorStubsByPower) Less(i, j int) bool { - vssi, err := vss[i].GetProTxHash(context.Background()) - if err != nil { - panic(err) - } - vssj, err := vss[j].GetProTxHash(context.Background()) - if err != nil { - panic(err) - } + if vss[i].VotingPower == vss[j].VotingPower { + return bytes.Compare(vssi.Bytes(), vssj.Bytes()) == -1 + } + return vss[i].VotingPower > vss[j].VotingPower + }) - if vss[i].VotingPower == vss[j].VotingPower { - return bytes.Compare(vssi.Bytes(), vssj.Bytes()) == -1 + for idx, vs := range vss { + vs.Index = int32(idx) } - return vss[i].VotingPower > vss[j].VotingPower -} -func (vss ValidatorStubsByPower) Swap(i, j int) { - it := vss[i] - vss[i] = vss[j] - vss[i].Index = int32(i) - vss[j] = it - vss[j].Index = int32(j) + return vss } //------------------------------------------------------------------------------- // Functions for transitioning the consensus state -func startTestRound(cs *State, height int64, round int32) { - cs.enterNewRound(height, round) - cs.startRoutines(0) +func startTestRound(ctx context.Context, cs *State, height int64, round int32) { + cs.enterNewRound(ctx, height, round) + cs.startRoutines(ctx, 0) } // Create proposal block from cs1 but sign it with vs. func decideProposal( + ctx context.Context, + t *testing.T, cs1 *State, vs *validatorStub, height int64, round int32, ) (proposal *types.Proposal, block *types.Block) { + t.Helper() + cs1.mtx.Lock() - block, blockParts := cs1.createProposalBlock() + block, err := cs1.createProposalBlock(ctx) + require.NoError(t, err) + blockParts, err := block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) validRound := cs1.ValidRound chainID := cs1.state.ChainID @@ -247,26 +252,23 @@ func decideProposal( quorumType := validatorsAtProposalHeight.QuorumType quorumHash := validatorsAtProposalHeight.QuorumHash cs1.mtx.Unlock() - if block == nil { - panic("Failed to createProposalBlock. Did you forget to add commit for previous block?") - } + + require.NotNil(t, block, "Failed to createProposalBlock. Did you forget to add commit for previous block?") // Make proposal polRound, propBlockID := validRound, types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} - proposal = types.NewProposal(height, 1, round, polRound, propBlockID) + proposal = types.NewProposal(height, 1, round, polRound, propBlockID, block.Header.Time) p := proposal.ToProto() - proTxHash, _ := vs.GetProTxHash(context.Background()) - pubKey, _ := vs.GetPubKey(context.Background(), validatorsAtProposalHeight.QuorumHash) + proTxHash, _ := vs.GetProTxHash(ctx) + pubKey, _ := vs.GetPubKey(ctx, validatorsAtProposalHeight.QuorumHash) - signID, err := vs.SignProposal(context.Background(), chainID, quorumType, quorumHash, p) + signID, err := vs.SignProposal(ctx, chainID, quorumType, quorumHash, p) + require.NoError(t, err) - if err != nil { - panic(err) - } - cs1.Logger.Debug("signed proposal common test", "height", proposal.Height, "round", proposal.Round, - "proposerProTxHash", proTxHash.ShortString(), "public key", pubKey.Bytes(), "quorum type", - validatorsAtProposalHeight.QuorumType, "quorum hash", validatorsAtProposalHeight.QuorumHash, "signID", signID) + cs1.logger.Debug("signed proposal common test", "height", proposal.Height, "round", proposal.Round, + "proposerProTxHash", proTxHash.ShortString(), "public key", pubKey.HexString(), "quorum type", + validatorsAtProposalHeight.QuorumType, "quorum hash", validatorsAtProposalHeight.QuorumHash, "signID", signID.String()) proposal.Signature = p.Signature @@ -280,48 +282,55 @@ func addVotes(to *State, votes ...*types.Vote) { } func signAddVotes( - cfg *config.Config, + ctx context.Context, + t *testing.T, to *State, voteType tmproto.SignedMsgType, - hash []byte, - header types.PartSetHeader, + chainID string, + blockID types.BlockID, vss ...*validatorStub, ) { - votes := signVotes(cfg, voteType, hash, to.state.AppHash, to.Validators.QuorumType, to.Validators.QuorumHash, header, vss...) - addVotes(to, votes...) + addVotes(to, signVotes(ctx, t, voteType, chainID, blockID, to.state.AppHash, to.Validators.QuorumType, to.Validators.QuorumHash, vss...)...) } -func validatePrevote(t *testing.T, cs *State, round int32, privVal *validatorStub, blockHash []byte) { +func validatePrevote( + ctx context.Context, + t *testing.T, + cs *State, + round int32, + privVal *validatorStub, + blockHash []byte, +) { + t.Helper() + + cs.mtx.RLock() + defer cs.mtx.RUnlock() + prevotes := cs.Votes.Prevotes(round) - proTxHash, err := privVal.GetProTxHash(context.Background()) + proTxHash, err := privVal.GetProTxHash(ctx) require.NoError(t, err) var vote *types.Vote if vote = prevotes.GetByProTxHash(proTxHash); vote == nil { panic("Failed to find prevote from validator") } if blockHash == nil { - if vote.BlockID.Hash != nil { - panic(fmt.Sprintf("Expected prevote to be for nil, got %X", vote.BlockID.Hash)) - } + require.Nil(t, vote.BlockID.Hash, "Expected prevote to be for nil, got %X", vote.BlockID.Hash) } else { - if !bytes.Equal(vote.BlockID.Hash, blockHash) { - panic(fmt.Sprintf("Expected prevote to be for %X, got %X", blockHash, vote.BlockID.Hash)) - } + require.True(t, bytes.Equal(vote.BlockID.Hash, blockHash), "Expected prevote to be for %X, got %X", blockHash, vote.BlockID.Hash) } } -func validateLastCommit(t *testing.T, cs *State, privVal *validatorStub, blockHash []byte) { +func validateLastCommit(ctx context.Context, t *testing.T, cs *State, privVal *validatorStub, blockHash []byte) { + t.Helper() + commit := cs.LastCommit err := commit.ValidateBasic() - if err != nil { - panic(fmt.Sprintf("Expected commit to be valid %v, %v", commit, err)) - } - if !bytes.Equal(commit.BlockID.Hash, blockHash) { - panic(fmt.Sprintf("Expected commit to be for %X, got %X", blockHash, commit.BlockID.Hash)) - } + require.NoError(t, err, "Expected commit to be valid %v, %v", commit, err) + require.True(t, bytes.Equal(commit.BlockID.Hash, blockHash), "Expected commit to be for %X, got %X", blockHash, commit.BlockID.Hash) } func validatePrecommit( + ctx context.Context, t *testing.T, cs *State, thisRound, @@ -330,73 +339,84 @@ func validatePrecommit( votedBlockHash, lockedBlockHash []byte, ) { + t.Helper() + precommits := cs.Votes.Precommits(thisRound) - proTxHash, err := privVal.GetProTxHash(context.Background()) + proTxHash, err := privVal.GetProTxHash(ctx) require.NoError(t, err) - var vote *types.Vote - if vote = precommits.GetByProTxHash(proTxHash); vote == nil { - panic("Failed to find precommit from validator") - } + vote := precommits.GetByProTxHash(proTxHash) + require.NotNil(t, vote, "Failed to find precommit from validator") if votedBlockHash == nil { - if vote.BlockID.Hash != nil { - panic("Expected precommit to be for nil") - } + require.Nil(t, vote.BlockID.Hash, "Expected precommit to be for nil") } else { - if !bytes.Equal(vote.BlockID.Hash, votedBlockHash) { - panic("Expected precommit to be for proposal block") - } + require.True(t, bytes.Equal(vote.BlockID.Hash, votedBlockHash), "Expected precommit to be for proposal block") } + rs := cs.GetRoundState() if lockedBlockHash == nil { - if cs.LockedRound != lockRound || cs.LockedBlock != nil { - panic(fmt.Sprintf( - "Expected to be locked on nil at round %d. Got locked at round %d with block %v", - lockRound, - cs.LockedRound, - cs.LockedBlock)) - } + require.False(t, rs.LockedRound != lockRound || rs.LockedBlock != nil, + "Expected to be locked on nil at round %d. Got locked at round %d with block %v", + lockRound, + rs.LockedRound, + rs.LockedBlock) } else { - if cs.LockedRound != lockRound || !bytes.Equal(cs.LockedBlock.Hash(), lockedBlockHash) { - panic(fmt.Sprintf( - "Expected block to be locked on round %d, got %d. Got locked block %X, expected %X", - lockRound, - cs.LockedRound, - cs.LockedBlock.Hash(), - lockedBlockHash)) - } + require.False(t, rs.LockedRound != lockRound || !bytes.Equal(rs.LockedBlock.Hash(), lockedBlockHash), + "Expected block to be locked on round %d, got %d. Got locked block %X, expected %X", + lockRound, + rs.LockedRound, + rs.LockedBlock.Hash(), + lockedBlockHash) } } -func validatePrevoteAndPrecommit( - t *testing.T, - cs *State, - thisRound, - lockRound int32, - privVal *validatorStub, - votedBlockHash, - lockedBlockHash []byte, -) { - // verify the prevote - validatePrevote(t, cs, thisRound, privVal, votedBlockHash) - // verify precommit - cs.mtx.Lock() - validatePrecommit(t, cs, thisRound, lockRound, privVal, votedBlockHash, lockedBlockHash) - cs.mtx.Unlock() +func subscribeToVoter(ctx context.Context, t *testing.T, cs *State, proTxHash []byte) <-chan tmpubsub.Message { + t.Helper() + + ch := make(chan tmpubsub.Message, 1) + if err := cs.eventBus.Observe(ctx, func(msg tmpubsub.Message) error { + vote := msg.Data().(types.EventDataVote) + // we only fire for our own votes + if bytes.Equal(proTxHash, vote.Vote.ValidatorProTxHash) { + select { + case <-ctx.Done(): + return ctx.Err() + case ch <- msg: + } + } + return nil + }, types.EventQueryVote); err != nil { + t.Fatalf("Failed to observe query %v: %v", types.EventQueryVote, err) + } + return ch } -func subscribeToVoter(cs *State, proTxHash []byte) <-chan tmpubsub.Message { - votesSub, err := cs.eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, types.EventQueryVote) +func subscribeToVoterBuffered(ctx context.Context, t *testing.T, cs *State, proTxHash []byte) <-chan tmpubsub.Message { + t.Helper() + votesSub, err := cs.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: types.EventQueryVote, + Limit: 10}) if err != nil { - panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote)) + t.Fatalf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote) } - ch := make(chan tmpubsub.Message) + ch := make(chan tmpubsub.Message, 10) go func() { - for msg := range votesSub.Out() { + for { + msg, err := votesSub.Next(ctx) + if err != nil { + if !errors.Is(err, tmpubsub.ErrTerminated) && !errors.Is(err, context.Canceled) { + t.Errorf("error terminating pubsub %s", err) + } + return + } vote := msg.Data().(types.EventDataVote) // we only fire for our own votes if bytes.Equal(proTxHash, vote.Vote.ValidatorProTxHash) { - ch <- msg + select { + case <-ctx.Done(): + case ch <- msg: + } } } }() @@ -406,41 +426,59 @@ func subscribeToVoter(cs *State, proTxHash []byte) <-chan tmpubsub.Message { //------------------------------------------------------------------------------- // consensus states -func newState(state sm.State, pv types.PrivValidator, app abci.Application) (*State, error) { - cfg, err := config.ResetTestRoot("consensus_state_test") - if err != nil { - return nil, err - } - return newStateWithConfig(cfg, state, pv, app), nil +func newState( + ctx context.Context, + t *testing.T, + logger log.Logger, + state sm.State, + pv types.PrivValidator, + app abci.Application, +) *State { + t.Helper() + + cfg, err := config.ResetTestRoot(t.TempDir(), "consensus_state_test") + require.NoError(t, err) + + return newStateWithConfig(ctx, t, logger, cfg, state, pv, app) } func newStateWithConfig( + ctx context.Context, + t *testing.T, + logger log.Logger, thisConfig *config.Config, state sm.State, pv types.PrivValidator, app abci.Application, ) *State { - blockStore := store.NewBlockStore(dbm.NewMemDB()) - return newStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockStore) + t.Helper() + return newStateWithConfigAndBlockStore(ctx, t, logger, thisConfig, state, pv, app, store.NewBlockStore(dbm.NewMemDB())) } func newStateWithConfigAndBlockStore( + ctx context.Context, + t *testing.T, + logger log.Logger, thisConfig *config.Config, state sm.State, pv types.PrivValidator, app abci.Application, blockStore *store.BlockStore, ) *State { + t.Helper() - // one for mempool, one for consensus, one for signature validation - mtx := new(tmsync.Mutex) - proxyAppConnMem := abcicli.NewLocalClient(mtx, app) - proxyAppConnCon := abcicli.NewLocalClient(mtx, app) - proxyAppConnQry := abcicli.NewLocalClient(mtx, app) + // one for mempool, one for consensus + proxyAppConnMem := abciclient.NewLocalClient(logger, app) + proxyAppConnCon := abciclient.NewLocalClient(logger, app) // Make Mempool - mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) - mempool.SetLogger(log.TestingLogger().With("module", "mempool")) + + mempool := mempool.NewTxMempool( + logger.With("module", "mempool"), + thisConfig.Mempool, + proxyAppConnMem, + ) + if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() } @@ -450,275 +488,293 @@ func newStateWithConfigAndBlockStore( // Make State stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - if err := stateStore.Save(state); err != nil { // for save height 1's validators info - panic(err) + require.NoError(t, stateStore.Save(state)) + + eventBus := eventbus.NewDefault(logger.With("module", "events")) + require.NoError(t, eventBus.Start(ctx)) + + blockExec := sm.NewBlockExecutor(stateStore, logger, proxyAppConnCon, mempool, evpool, blockStore, eventBus, sm.NopMetrics()) + cs, err := NewState(logger.With("module", "consensus"), + thisConfig.Consensus, + stateStore, + blockExec, + blockStore, + mempool, + evpool, + eventBus, + ) + if err != nil { + t.Fatal(err) } - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, proxyAppConnQry, mempool, evpool, blockStore, nil) + cs.SetPrivValidator(ctx, pv) - logger := log.TestingLogger().With("module", "consensus") - cs := NewStateWithLogger(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool, logger, 0) - cs.SetLogger(logger) - cs.SetPrivValidator(pv) - - eventBus := types.NewEventBus() - eventBus.SetLogger(log.TestingLogger().With("module", "events")) - err := eventBus.Start() - if err != nil { - panic(err) - } - cs.SetEventBus(eventBus) return cs } -func loadPrivValidator(cfg *config.Config) *privval.FilePV { +func loadPrivValidator(t *testing.T, cfg *config.Config) *privval.FilePV { + t.Helper() privValidatorKeyFile := cfg.PrivValidator.KeyFile() - ensureDir(filepath.Dir(privValidatorKeyFile), 0700) + ensureDir(t, filepath.Dir(privValidatorKeyFile), 0700) privValidatorStateFile := cfg.PrivValidator.StateFile() privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) - if err != nil { - panic(err) - } - privValidator.Reset() + require.NoError(t, err) + require.NoError(t, privValidator.Reset()) return privValidator } -func randState(cfg *config.Config, nValidators int) (*State, []*validatorStub, error) { +type makeStateArgs struct { + config *config.Config + logger log.Logger + validators int + application abci.Application +} + +func makeState(ctx context.Context, t *testing.T, args makeStateArgs) (*State, []*validatorStub) { + t.Helper() // Get State - state, privVals := randGenesisState(cfg, nValidators, false, 10) + validators := 4 + if args.validators != 0 { + validators = args.validators + } + var app abci.Application + app = kvstore.NewApplication() + if args.application != nil { + app = args.application + } + if args.config == nil { + args.config = configSetup(t) + } + if args.logger == nil { + args.logger = log.NewNopLogger() + } - vss := make([]*validatorStub, nValidators) + consensusParams := factory.ConsensusParams() + // vote timeout increased because of bls12381 signing/verifying operations are longer performed than ed25519 + // and 10ms (previous value) is not enough + consensusParams.Timeout.Vote = 50 * time.Millisecond + consensusParams.Timeout.VoteDelta = 5 * time.Millisecond - cs, err := newState(state, privVals[0], kvstore.NewApplication()) - if err != nil { - return nil, nil, err - } + state, privVals := makeGenesisState(ctx, t, args.config, genesisStateArgs{ + Params: consensusParams, + Validators: validators, + }) - for i := 0; i < nValidators; i++ { + vss := make([]*validatorStub, validators) + + cs := newState(ctx, t, args.logger, state, privVals[0], app) + + for i := 0; i < validators; i++ { vss[i] = newValidatorStub(privVals[i], int32(i), cs.state.InitialHeight) } - return cs, vss, nil + return cs, vss } //------------------------------------------------------------------------------- -func ensureNoNewEvent(ch <-chan tmpubsub.Message, timeout time.Duration, +func ensureNoMessageBeforeTimeout(t *testing.T, ch <-chan tmpubsub.Message, timeout time.Duration, errorMessage string) { + t.Helper() select { case <-time.After(timeout): break case <-ch: - panic(errorMessage) + t.Fatal(errorMessage) } } -func ensureNoNewEventOnChannel(ch <-chan tmpubsub.Message) { - ensureNoNewEvent( +func ensureNoNewEventOnChannel(t *testing.T, ch <-chan tmpubsub.Message) { + t.Helper() + ensureNoMessageBeforeTimeout( + t, ch, ensureTimeout, "We should be stuck waiting, not receiving new event on the channel") } -func ensureNoNewRoundStep(stepCh <-chan tmpubsub.Message) { - ensureNoNewEvent( +func ensureNoNewRoundStep(t *testing.T, stepCh <-chan tmpubsub.Message) { + t.Helper() + ensureNoMessageBeforeTimeout( + t, stepCh, ensureTimeout, "We should be stuck waiting, not receiving NewRoundStep event") } -func ensureNoNewUnlock(unlockCh <-chan tmpubsub.Message) { - ensureNoNewEvent( - unlockCh, - ensureTimeout, - "We should be stuck waiting, not receiving Unlock event") -} - -func ensureNoNewTimeout(stepCh <-chan tmpubsub.Message, timeout int64) { +func ensureNoNewTimeout(t *testing.T, stepCh <-chan tmpubsub.Message, timeout int64) { + t.Helper() timeoutDuration := time.Duration(timeout*10) * time.Nanosecond - ensureNoNewEvent( + ensureNoMessageBeforeTimeout( + t, stepCh, timeoutDuration, "We should be stuck waiting, not receiving NewTimeout event") } -func ensureNewEvent(ch <-chan tmpubsub.Message, height int64, round int32, timeout time.Duration, errorMessage string) { - select { - case <-time.After(timeout): - panic(errorMessage) - case msg := <-ch: - roundStateEvent, ok := msg.Data().(types.EventDataRoundState) - if !ok { - panic(fmt.Sprintf("expected a EventDataRoundState, got %T. Wrong subscription channel?", - msg.Data())) - } - if roundStateEvent.Height != height { - panic(fmt.Sprintf("expected height %v, got %v", height, roundStateEvent.Height)) - } - if roundStateEvent.Round != round { - panic(fmt.Sprintf("expected round %v, got %v", round, roundStateEvent.Round)) - } - // TODO: We could check also for a step at this point! - } +func ensureNewEvent(t *testing.T, ch <-chan tmpubsub.Message, height int64, round int32, timeout time.Duration) { + t.Helper() + msg := ensureMessageBeforeTimeout(t, ch, ensureTimeout) + roundStateEvent, ok := msg.Data().(types.EventDataRoundState) + require.True(t, ok, + "expected a EventDataRoundState, got %T. Wrong subscription channel?", + msg.Data()) + + require.Equal(t, height, roundStateEvent.Height) + require.Equal(t, round, roundStateEvent.Round) + // TODO: We could check also for a step at this point! } -func ensureNewRound(roundCh <-chan tmpubsub.Message, height int64, round int32) { - select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for NewRound event") - case msg := <-roundCh: - newRoundEvent, ok := msg.Data().(types.EventDataNewRound) - if !ok { - panic(fmt.Sprintf("expected a EventDataNewRound, got %T. Wrong subscription channel?", - msg.Data())) - } - if newRoundEvent.Height != height { - panic(fmt.Sprintf("expected height %v, got %v", height, newRoundEvent.Height)) - } - if newRoundEvent.Round != round { - panic(fmt.Sprintf("expected round %v, got %v", round, newRoundEvent.Round)) - } - } +func ensureNewRound(t *testing.T, roundCh <-chan tmpubsub.Message, height int64, round int32) { + t.Helper() + msg := ensureMessageBeforeTimeout(t, roundCh, ensureTimeout) + newRoundEvent, ok := msg.Data().(types.EventDataNewRound) + require.True(t, ok, "expected a EventDataNewRound, got %T. Wrong subscription channel?", + msg.Data()) + + require.Equal(t, height, newRoundEvent.Height) + require.Equal(t, round, newRoundEvent.Round) } -func ensureNewTimeout(timeoutCh <-chan tmpubsub.Message, height int64, round int32, timeout int64) { +func ensureNewTimeout(t *testing.T, timeoutCh <-chan tmpubsub.Message, height int64, round int32, timeout int64) { + t.Helper() timeoutDuration := time.Duration(timeout*10) * time.Nanosecond - ensureNewEvent(timeoutCh, height, round, timeoutDuration, - "Timeout expired while waiting for NewTimeout event") + ensureNewEvent(t, timeoutCh, height, round, timeoutDuration) } -func ensureNewProposal(proposalCh <-chan tmpubsub.Message, height int64, round int32) { - select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for NewProposal event") - case msg := <-proposalCh: - proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal) - if !ok { - panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?", - msg.Data())) - } - if proposalEvent.Height != height { - panic(fmt.Sprintf("expected height %v, got %v", height, proposalEvent.Height)) - } - if proposalEvent.Round != round { - panic(fmt.Sprintf("expected round %v, got %v", round, proposalEvent.Round)) - } - } +func ensureNewProposal(t *testing.T, proposalCh <-chan tmpubsub.Message, height int64, round int32) types.BlockID { + t.Helper() + msg := ensureMessageBeforeTimeout(t, proposalCh, ensureTimeout) + proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal) + require.True(t, ok, "expected a EventDataCompleteProposal, got %T. Wrong subscription channel?", + msg.Data()) + require.Equal(t, height, proposalEvent.Height) + require.Equal(t, round, proposalEvent.Round) + return proposalEvent.BlockID } -func ensureNewValidBlock(validBlockCh <-chan tmpubsub.Message, height int64, round int32) { - ensureNewEvent(validBlockCh, height, round, ensureTimeout, - "Timeout expired while waiting for NewValidBlock event") +func ensureNewValidBlock(t *testing.T, validBlockCh <-chan tmpubsub.Message, height int64, round int32) { + t.Helper() + ensureNewEvent(t, validBlockCh, height, round, ensureTimeout) } -func ensureNewBlock(blockCh <-chan tmpubsub.Message, height int64) { - select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for NewBlock event") - case msg := <-blockCh: - blockEvent, ok := msg.Data().(types.EventDataNewBlock) - if !ok { - panic(fmt.Sprintf("expected a EventDataNewBlock, got %T. Wrong subscription channel?", - msg.Data())) - } - if blockEvent.Block.Height != height { - panic(fmt.Sprintf("expected height %v, got %v", height, blockEvent.Block.Height)) - } - } +func ensureNewBlock(t *testing.T, blockCh <-chan tmpubsub.Message, height int64) { + t.Helper() + msg := ensureMessageBeforeTimeout(t, blockCh, ensureTimeout) + blockEvent, ok := msg.Data().(types.EventDataNewBlock) + require.True(t, ok, "expected a EventDataNewBlock, got %T. Wrong subscription channel?", + msg.Data()) + require.Equal(t, height, blockEvent.Block.Height) } -func ensureNewBlockHeader(blockCh <-chan tmpubsub.Message, height int64, blockHash tmbytes.HexBytes) { - select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for NewBlockHeader event") - case msg := <-blockCh: - blockHeaderEvent, ok := msg.Data().(types.EventDataNewBlockHeader) - if !ok { - panic(fmt.Sprintf("expected a EventDataNewBlockHeader, got %T. Wrong subscription channel?", - msg.Data())) - } - if blockHeaderEvent.Header.Height != height { - panic(fmt.Sprintf("expected height %v, got %v", height, blockHeaderEvent.Header.Height)) - } - if !bytes.Equal(blockHeaderEvent.Header.Hash(), blockHash) { - panic(fmt.Sprintf("expected header %X, got %X", blockHash, blockHeaderEvent.Header.Hash())) - } - } +func ensureNewBlockHeader(t *testing.T, blockCh <-chan tmpubsub.Message, height int64, blockHash tmbytes.HexBytes) { + t.Helper() + msg := ensureMessageBeforeTimeout(t, blockCh, ensureTimeout) + blockHeaderEvent, ok := msg.Data().(types.EventDataNewBlockHeader) + require.True(t, ok, "expected a EventDataNewBlockHeader, got %T. Wrong subscription channel?", + msg.Data()) + + require.Equal(t, height, blockHeaderEvent.Header.Height) + require.True(t, bytes.Equal(blockHeaderEvent.Header.Hash(), blockHash)) +} + +func ensureLock(t *testing.T, lockCh <-chan tmpubsub.Message, height int64, round int32) { + t.Helper() + ensureNewEvent(t, lockCh, height, round, ensureTimeout) } -func ensureNewUnlock(unlockCh <-chan tmpubsub.Message, height int64, round int32) { - ensureNewEvent(unlockCh, height, round, ensureTimeout, - "Timeout expired while waiting for NewUnlock event") +func ensureRelock(t *testing.T, relockCh <-chan tmpubsub.Message, height int64, round int32) { + t.Helper() + ensureNewEvent(t, relockCh, height, round, ensureTimeout) } -func ensureProposal(proposalCh <-chan tmpubsub.Message, height int64, round int32, propID types.BlockID) { - select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for NewProposal event") - case msg := <-proposalCh: - proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal) - if !ok { - panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?", - msg.Data())) - } - if proposalEvent.Height != height { - panic(fmt.Sprintf("expected height %v, got %v", height, proposalEvent.Height)) - } - if proposalEvent.Round != round { - panic(fmt.Sprintf("expected round %v, got %v", round, proposalEvent.Round)) - } - if !proposalEvent.BlockID.Equals(propID) { - panic(fmt.Sprintf("Proposed block does not match expected block (%v != %v)", proposalEvent.BlockID, propID)) - } +func ensureProposal(t *testing.T, proposalCh <-chan tmpubsub.Message, height int64, round int32, propID types.BlockID) { + ensureProposalWithTimeout(t, proposalCh, height, round, &propID, ensureTimeout) +} + +func ensureProposalWithTimeout(t *testing.T, proposalCh <-chan tmpubsub.Message, height int64, round int32, propID *types.BlockID, timeout time.Duration) { + t.Helper() + msg := ensureMessageBeforeTimeout(t, proposalCh, timeout) + proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal) + require.True(t, ok, "expected a EventDataCompleteProposal, got %T. Wrong subscription channel?", + msg.Data()) + require.Equal(t, height, proposalEvent.Height) + require.Equal(t, round, proposalEvent.Round) + if propID != nil { + require.True(t, proposalEvent.BlockID.Equals(*propID), + "Proposed block does not match expected block (%v != %v)", proposalEvent.BlockID, propID) } } -func ensurePrecommit(voteCh <-chan tmpubsub.Message, height int64, round int32) { - ensureVote(voteCh, height, round, tmproto.PrecommitType) +func ensurePrecommit(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32) { + t.Helper() + ensureVote(t, voteCh, height, round, tmproto.PrecommitType) } -func ensurePrevote(voteCh <-chan tmpubsub.Message, height int64, round int32) { - ensureVote(voteCh, height, round, tmproto.PrevoteType) +func ensurePrevote(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32) { + t.Helper() + ensureVote(t, voteCh, height, round, tmproto.PrevoteType) } -func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int32, - voteType tmproto.SignedMsgType) { +func ensurePrevoteMatch(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32, hash []byte) { + t.Helper() + ensureVoteMatch(t, voteCh, height, round, hash, tmproto.PrevoteType) +} + +func ensurePrecommitMatch(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32, hash []byte) { + t.Helper() + ensureVoteMatch(t, voteCh, height, round, hash, tmproto.PrecommitType) +} + +func ensureVoteMatch(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32, hash []byte, voteType tmproto.SignedMsgType) { + t.Helper() select { case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for NewVote event") + t.Fatal("Timeout expired while waiting for NewVote event") case msg := <-voteCh: voteEvent, ok := msg.Data().(types.EventDataVote) - if !ok { - panic(fmt.Sprintf("expected a EventDataVote, got %T. Wrong subscription channel?", - msg.Data())) - } + require.True(t, ok, "expected a EventDataVote, got %T. Wrong subscription channel?", + msg.Data()) + vote := voteEvent.Vote - if vote.Height != height { - panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height)) - } - if vote.Round != round { - panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round)) - } - if vote.Type != voteType { - panic(fmt.Sprintf("expected type %v, got %v", voteType, vote.Type)) + assert.Equal(t, height, vote.Height, "expected height %d, but got %d", height, vote.Height) + assert.Equal(t, round, vote.Round, "expected round %d, but got %d", round, vote.Round) + assert.Equal(t, voteType, vote.Type, "expected type %s, but got %s", voteType, vote.Type) + if hash == nil { + require.Nil(t, vote.BlockID.Hash, "Expected prevote to be for nil, got %X", vote.BlockID.Hash) + } else { + require.True(t, bytes.Equal(vote.BlockID.Hash, hash), "Expected prevote to be for %X, got %X", hash, vote.BlockID.Hash) } } } -func ensurePrecommitTimeout(ch <-chan tmpubsub.Message) { - select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for the Precommit to Timeout") - case <-ch: - } +func ensureVote(t *testing.T, voteCh <-chan tmpubsub.Message, height int64, round int32, voteType tmproto.SignedMsgType) { + t.Helper() + msg := ensureMessageBeforeTimeout(t, voteCh, ensureTimeout) + voteEvent, ok := msg.Data().(types.EventDataVote) + require.True(t, ok, "expected a EventDataVote, got %T. Wrong subscription channel?", + msg.Data()) + + vote := voteEvent.Vote + require.Equal(t, height, vote.Height, "expected height %d, but got %d", height, vote.Height) + require.Equal(t, round, vote.Round, "expected round %d, but got %d", round, vote.Round) + require.Equal(t, voteType, vote.Type, "expected type %s, but got %s", voteType, vote.Type) } -func ensureNewEventOnChannel(ch <-chan tmpubsub.Message) { +func ensureNewEventOnChannel(t *testing.T, ch <-chan tmpubsub.Message) { + t.Helper() + ensureMessageBeforeTimeout(t, ch, ensureTimeout) +} + +func ensureMessageBeforeTimeout(t *testing.T, ch <-chan tmpubsub.Message, to time.Duration) tmpubsub.Message { + t.Helper() select { - case <-time.After(ensureTimeout): - panic("Timeout expired while waiting for new activity on the channel") - case <-ch: + case <-time.After(to): + t.Fatalf("Timeout expired while waiting for message") + case msg := <-ch: + return msg } + panic("unreachable") } //------------------------------------------------------------------------------- @@ -727,20 +783,22 @@ func ensureNewEventOnChannel(ch <-chan tmpubsub.Message) { // consensusLogger is a TestingLogger which uses a different // color for each validator ("validator" key must exist). func consensusLogger() log.Logger { - return log.TestingLogger().With("module", "consensus") + return log.NewNopLogger().With("module", "consensus") } -func randConsensusState( +func makeConsensusState( + ctx context.Context, t *testing.T, cfg *config.Config, nValidators int, testName string, tickerFunc func() TimeoutTicker, - appFunc func() abci.Application, configOpts ...func(*config.Config), ) ([]*State, cleanupFunc) { + t.Helper() + tempDir := t.TempDir() - genDoc, privVals := factory.RandGenesisDoc(cfg, nValidators, 1) + genDoc, privVals := factory.RandGenesisDoc(cfg, nValidators, 1, factory.ConsensusParams()) css := make([]*State, nValidators) logger := consensusLogger() @@ -751,7 +809,7 @@ func randConsensusState( blockStore := store.NewBlockStore(dbm.NewMemDB()) // each state needs its own db state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) - thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + thisConfig, err := ResetConfig(tempDir, fmt.Sprintf("%s_%d", testName, i)) require.NoError(t, err) configRootDirs = append(configRootDirs, thisConfig.RootDir) @@ -760,20 +818,19 @@ func randConsensusState( opt(thisConfig) } - ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal + walDir := filepath.Dir(thisConfig.Consensus.WalFile()) + ensureDir(t, walDir, 0700) - app := appFunc() - - if appCloser, ok := app.(io.Closer); ok { - closeFuncs = append(closeFuncs, appCloser.Close) - } + app := kvstore.NewApplication() + closeFuncs = append(closeFuncs, app.Close) vals := types.TM2PB.ValidatorUpdates(state.Validators) - app.InitChain(abci.RequestInitChain{ValidatorSet: &vals}) + _, err = app.InitChain(ctx, &abci.RequestInitChain{ValidatorSet: &vals}) + require.NoError(t, err) - css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, blockStore) + l := logger.With("validator", i, "module", "consensus") + css[i] = newStateWithConfigAndBlockStore(ctx, t, l, thisConfig, state, privVals[i], app, blockStore) css[i].SetTimeoutTicker(tickerFunc()) - css[i].SetLogger(logger.With("validator", i, "module", "consensus")) } return css, func() { @@ -788,28 +845,34 @@ func randConsensusState( // nPeers = nValidators + nNotValidator func randConsensusNetWithPeers( + ctx context.Context, + t *testing.T, cfg *config.Config, - nValidators, + nValidators int, nPeers int, testName string, tickerFunc func() TimeoutTicker, - appFunc func(string) abci.Application, + appFunc func(log.Logger, string) abci.Application, ) ([]*State, *types.GenesisDoc, *config.Config, cleanupFunc) { - genDoc, privVals := factory.RandGenesisDoc(cfg, nValidators, 1) + t.Helper() + + consParams := factory.ConsensusParams() + consParams.Timeout.Propose = 1 * time.Second + + genDoc, privVals := factory.RandGenesisDoc(cfg, nValidators, 1, consParams) css := make([]*State, nPeers) + t.Helper() logger := consensusLogger() var peer0Config *config.Config closeFuncs := make([]func() error, 0, nValidators) configRootDirs := make([]string, 0, nPeers) for i := 0; i < nPeers; i++ { state, _ := sm.MakeGenesisState(genDoc) - thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) - if err != nil { - panic(err) - } + thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i)) + require.NoError(t, err) configRootDirs = append(configRootDirs, thisConfig.RootDir) - ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal + ensureDir(t, filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal if i == 0 { peer0Config = thisConfig } @@ -817,36 +880,40 @@ func randConsensusNetWithPeers( if i < nValidators { privVal = privVals[i] } else { - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") - if err != nil { - panic(err) - } - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") - if err != nil { - panic(err) - } + tempKeyFile, err := os.CreateTemp(t.TempDir(), "priv_validator_key_") + require.NoError(t, err) + + tempStateFile, err := os.CreateTemp(t.TempDir(), "priv_validator_state_") + require.NoError(t, err) + privVal = privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) + require.NoError(t, err) // These validator might not have the public keys, for testing purposes let's assume they don't state.Validators.HasPublicKeys = false state.NextValidators.HasPublicKeys = false } - app := appFunc(path.Join(cfg.DBDir(), fmt.Sprintf("%s_%d", testName, i))) + app := appFunc(logger, filepath.Join(cfg.DBDir(), fmt.Sprintf("%s_%d", testName, i))) if appCloser, ok := app.(io.Closer); ok { closeFuncs = append(closeFuncs, appCloser.Close) } vals := types.TM2PB.ValidatorUpdates(state.Validators) - if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok { - // simulate handshake, receive app version. If don't do this, replay test will fail + switch app.(type) { + // simulate handshake, receive app version. If don't do this, replay test will fail + case *kvstore.PersistentKVStoreApplication: + state.Version.Consensus.App = kvstore.ProtocolVersion + case *kvstore.Application: state.Version.Consensus.App = kvstore.ProtocolVersion } - app.InitChain(abci.RequestInitChain{ValidatorSet: &vals}) + _, err = app.InitChain(ctx, &abci.RequestInitChain{ValidatorSet: &vals}) + require.NoError(t, err) // sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above - proTxHash, _ := privVal.GetProTxHash(context.Background()) - css[i] = newStateWithConfig(thisConfig, state, privVal, app) - css[i].SetLogger(logger.With("validator", i, "node_proTxHash", proTxHash.ShortString(), "module", "consensus")) + proTxHash, _ := privVal.GetProTxHash(ctx) + css[i] = newStateWithConfig(ctx, t, + logger.With("validator", i, "node_proTxHash", proTxHash.ShortString(), "module", "consensus"), + thisConfig, state, privVal, app) css[i].SetTimeoutTicker(tickerFunc()) } return css, genDoc, peer0Config, func() { @@ -859,24 +926,47 @@ func randConsensusNetWithPeers( } } -//------------------------------------------------------------------------------- -// genesis +type genesisStateArgs struct { + Validators int + Power int64 + Params *types.ConsensusParams + Time time.Time +} -func randGenesisState(cfg *config.Config, numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) { - genDoc, privValidators := factory.RandGenesisDoc(cfg, numValidators, 1) - s0, _ := sm.MakeGenesisState(genDoc) - return s0, privValidators +func makeGenesisState(ctx context.Context, t *testing.T, cfg *config.Config, args genesisStateArgs) (sm.State, []types.PrivValidator) { + t.Helper() + if args.Power == 0 { + args.Power = 1 + } + if args.Validators == 0 { + args.Power = 4 + } + if args.Params == nil { + args.Params = types.DefaultConsensusParams() + } + if args.Time.IsZero() { + args.Time = time.Now() + } + genDoc, privVals := factory.RandGenesisDoc(cfg, args.Validators, 1, args.Params) + genDoc.GenesisTime = args.Time + s0, err := sm.MakeGenesisState(genDoc) + require.NoError(t, err) + return s0, privVals } func newMockTickerFunc(onlyOnce bool) func() TimeoutTicker { return func() TimeoutTicker { return &mockTicker{ - c: make(chan timeoutInfo, 10), + c: make(chan timeoutInfo, 100), onlyOnce: onlyOnce, } } } +func newTickerFunc() func() TimeoutTicker { + return func() TimeoutTicker { return NewTimeoutTicker(log.NewNopLogger()) } +} + // mock ticker only fires on RoundStepNewHeight // and only once if onlyOnce=true type mockTicker struct { @@ -887,13 +977,9 @@ type mockTicker struct { fired bool } -func (m *mockTicker) Start() error { - return nil -} - -func (m *mockTicker) Stop() error { - return nil -} +func (m *mockTicker) Start(context.Context) error { return nil } +func (m *mockTicker) Stop() {} +func (m *mockTicker) IsRunning() bool { return false } func (m *mockTicker) ScheduleTimeout(ti timeoutInfo) { m.mtx.Lock() @@ -911,16 +997,10 @@ func (m *mockTicker) Chan() <-chan timeoutInfo { return m.c } -func (*mockTicker) SetLogger(log.Logger) {} - -func newKVStore() abci.Application { +func newEpehemeralKVStore(_ log.Logger, _ string) abci.Application { return kvstore.NewApplication() } -func newPersistentKVStoreWithPath(dbDir string) abci.Application { - return kvstore.NewPersistentKVStoreApplication(dbDir) -} - func signDataIsEqual(v1 *types.Vote, v2 *tmproto.Vote) bool { if v1 == nil || v2 == nil { return false @@ -931,7 +1011,8 @@ func signDataIsEqual(v1 *types.Vote, v2 *tmproto.Vote) bool { v1.Height == v2.GetHeight() && v1.Round == v2.Round && bytes.Equal(v1.ValidatorProTxHash.Bytes(), v2.GetValidatorProTxHash()) && - v1.ValidatorIndex == v2.GetValidatorIndex() + v1.ValidatorIndex == v2.GetValidatorIndex() && + bytes.Equal(v1.Extension, v2.Extension) } type stateQuorumManager struct { diff --git a/internal/consensus/core_chainlock_test.go b/internal/consensus/core_chainlock_test.go index 156455f5e4..ca2fef600a 100644 --- a/internal/consensus/core_chainlock_test.go +++ b/internal/consensus/core_chainlock_test.go @@ -12,8 +12,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/example/counter" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/libs/log" + tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/types" ) @@ -22,29 +26,26 @@ func TestValidProposalChainLocks(t *testing.T) { nVals = 4 nPeers = nVals ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() conf := configSetup(t) - states, _, _, cleanup := randConsensusNetWithPeers( + states, _, _, cleanup := randConsensusNetWithPeers(ctx, t, conf, nVals, nPeers, "consensus_chainlocks_test", - newMockTickerFunc(true), + newTickerFunc(), newCounterWithCoreChainLocks, ) t.Cleanup(cleanup) - for i := 0; i < nVals; i++ { - ticker := NewTimeoutTicker() - ticker.SetLogger(states[i].Logger) - states[i].SetTimeoutTicker(ticker) - } - - rts := setupReactor(t, nVals, states, 40) + rts := setupReactor(ctx, t, nVals, states, 100) for i := 0; i < 3; i++ { - timeoutWaitGroup(t, rts.subs, states, func(sub types.Subscription) { - msg := <-sub.Out() + timeoutWaitGroup(t, rts.subs, states, func(sub eventbus.Subscription) { + msg, err := sub.Next(ctx) + require.NoError(t, err) block := msg.Data().(types.EventDataNewBlock).Block // this is true just because of this test where each new height has a new chain lock that is incremented by 1 state := states[0].GetState() @@ -60,40 +61,37 @@ func TestReactorInvalidProposalHeightForChainLocks(t *testing.T) { nVals = 4 nPeers = nVals ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() conf := configSetup(t) - states, _, _, cleanup := randConsensusNetWithPeers( + states, _, _, cleanup := randConsensusNetWithPeers(ctx, t, conf, nVals, nPeers, "consensus_chainlocks_test", - newMockTickerFunc(true), + newTickerFunc(), newCounterWithCoreChainLocks, ) t.Cleanup(cleanup) - for i := 0; i < nVals; i++ { - ticker := NewTimeoutTicker() - ticker.SetLogger(states[i].Logger) - states[i].SetTimeoutTicker(ticker) - } - // this proposer sends a chain lock at each height byzProposerID := 0 byzProposer := states[byzProposerID] // update the decide proposal to propose the incorrect height - byzProposer.decideProposal = func(j int32) func(int64, int32) { - return func(height int64, round int32) { - invalidProposeCoreChainLockFunc(height, round, states[j]) + byzProposer.decideProposal = func() func(context.Context, int64, int32) { + return func(_ context.Context, height int64, round int32) { + invalidProposeCoreChainLockFunc(ctx, t, height, round, states[byzProposerID]) } - }(int32(0)) + }() - rts := setupReactor(t, nVals, states, 40) + rts := setupReactor(ctx, t, nVals, states, 100) for i := 0; i < 3; i++ { - timeoutWaitGroup(t, rts.subs, states, func(sub types.Subscription) { - msg := <-sub.Out() + timeoutWaitGroup(t, rts.subs, states, func(sub eventbus.Subscription) { + msg, err := sub.Next(ctx) + require.NoError(t, err) block := msg.Data().(types.EventDataNewBlock).Block // this is true just because of this test where each new height has a new chain lock that is incremented by 1 state := states[0].GetState() @@ -109,29 +107,26 @@ func TestReactorInvalidBlockChainLock(t *testing.T) { nVals = 4 nPeers = nVals ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() conf := configSetup(t) - states, _, _, cleanup := randConsensusNetWithPeers( + states, _, _, cleanup := randConsensusNetWithPeers(ctx, t, conf, nVals, nPeers, "consensus_chainlocks_test", - newMockTickerFunc(true), + newTickerFunc(), newCounterWithBackwardsCoreChainLocks, ) t.Cleanup(cleanup) - for i := 0; i < nVals; i++ { - ticker := NewTimeoutTicker() - ticker.SetLogger(states[i].Logger) - states[i].SetTimeoutTicker(ticker) - } - - rts := setupReactor(t, nVals, states, 100) + rts := setupReactor(ctx, t, nVals, states, 100) for i := 0; i < 10; i++ { - timeoutWaitGroup(t, rts.subs, states, func(sub types.Subscription) { - msg := <-sub.Out() + timeoutWaitGroup(t, rts.subs, states, func(sub eventbus.Subscription) { + msg, err := sub.Next(ctx) + require.NoError(t, err) block := msg.Data().(types.EventDataNewBlock).Block // this is true just because of this test where each new height has a new chain lock that is incremented by 1 state := states[0].GetState() @@ -146,14 +141,14 @@ func TestReactorInvalidBlockChainLock(t *testing.T) { } } -func newCounterWithCoreChainLocks(_ string) abci.Application { +func newCounterWithCoreChainLocks(logger log.Logger, _ string) abci.Application { counterApp := counter.NewApplication(true) counterApp.HasCoreChainLocks = true counterApp.CurrentCoreChainLockHeight = 1 return counterApp } -func newCounterWithBackwardsCoreChainLocks(_ string) abci.Application { +func newCounterWithBackwardsCoreChainLocks(logger log.Logger, _ string) abci.Application { counterApp := counter.NewApplication(true) counterApp.HasCoreChainLocks = true counterApp.CurrentCoreChainLockHeight = 100 @@ -161,24 +156,27 @@ func newCounterWithBackwardsCoreChainLocks(_ string) abci.Application { return counterApp } -func setupReactor(t *testing.T, n int, states []*State, size int) *reactorTestSuite { +func setupReactor(ctx context.Context, t *testing.T, n int, states []*State, size int) *reactorTestSuite { t.Helper() - rts := setup(t, n, states, size) + rts := setup(ctx, t, n, states, size) for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } return rts } -func invalidProposeCoreChainLockFunc(height int64, round int32, cs *State) { +func invalidProposeCoreChainLockFunc(ctx context.Context, t *testing.T, height int64, round int32, cs *State) { // routine to: // - precommit for a random block // - send precommit to all peers // - disable privValidator (so we don't do normal precommits) - var block *types.Block - var blockParts *types.PartSet + var ( + block *types.Block + blockParts *types.PartSet + err error + ) // Decide on block if cs.ValidBlock != nil { @@ -186,57 +184,59 @@ func invalidProposeCoreChainLockFunc(height int64, round int32, cs *State) { block, blockParts = cs.ValidBlock, cs.ValidBlockParts } else { // Create a new proposal block from state/txs from the mempool. - block, blockParts = cs.createProposalBlock() - if block == nil { - return - } + block, err = cs.createProposalBlock(ctx) + require.NoError(t, err) + blockParts, err = block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) } // Flush the WAL. Otherwise, we may not recompute the same proposal to sign, // and the privValidator will refuse to sign anything. if err := cs.wal.FlushAndSync(); err != nil { - cs.Logger.Error("Error flushing to disk") + cs.logger.Error("Error flushing to disk") } // Make proposal propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} // It is byzantine because it is not updating the LastCoreChainLockedBlockHeight - proposal := types.NewProposal(height, cs.state.LastCoreChainLockedBlockHeight, round, cs.ValidRound, propBlockID) + proposal := types.NewProposal(height, cs.state.LastCoreChainLockedBlockHeight, round, cs.ValidRound, propBlockID, block.Header.Time) p := proposal.ToProto() - _, err := cs.privValidator.SignProposal( - context.Background(), - cs.state.ChainID, - cs.Validators.QuorumType, - cs.Validators.QuorumHash, - p, - ) - if err == nil { - proposal.Signature = p.Signature - - // send proposal and block parts on internal msg queue - cs.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""}) - for i := 0; i < int(blockParts.Total()); i++ { - part := blockParts.GetPart(i) - cs.sendInternalMessage(msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, ""}) + + validatorsAtProposalHeight := cs.state.ValidatorsAtHeight(p.Height) + quorumHash := validatorsAtProposalHeight.QuorumHash + + _, err = cs.privValidator.SignProposal(ctx, cs.state.ChainID, cs.Validators.QuorumType, quorumHash, p) + if err != nil { + if !cs.replayMode { + cs.logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err) } - cs.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal) - cs.Logger.Debug(fmt.Sprintf("Signed proposal block: %v", block)) - } else if !cs.replayMode { - cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err) + return } + + proposal.Signature = p.Signature + + // send proposal and block parts on internal msg queue + cs.sendInternalMessage(ctx, msgInfo{&ProposalMessage{proposal}, "", tmtime.Now()}) + for i := 0; i < int(blockParts.Total()); i++ { + part := blockParts.GetPart(i) + cs.sendInternalMessage(ctx, msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, "", tmtime.Now()}) + } + + cs.logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal) + cs.logger.Debug(fmt.Sprintf("Signed proposal block: %v", block)) } func timeoutWaitGroup( t *testing.T, - subs map[types.NodeID]types.Subscription, + subs map[types.NodeID]eventbus.Subscription, states []*State, - f func(types.Subscription), + f func(eventbus.Subscription), ) { var wg sync.WaitGroup wg.Add(len(subs)) for _, sub := range subs { - go func(sub types.Subscription) { + go func(sub eventbus.Subscription) { f(sub) wg.Done() }(sub) diff --git a/internal/consensus/invalid_test.go b/internal/consensus/invalid_test.go index c66cf6ab1c..fcbd5ce3e9 100644 --- a/internal/consensus/invalid_test.go +++ b/internal/consensus/invalid_test.go @@ -2,10 +2,15 @@ package consensus import ( "context" + "errors" "sync" "testing" + "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" @@ -16,25 +21,27 @@ import ( ) func TestReactorInvalidPrecommit(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + config := configSetup(t) - n := 4 - states, cleanup := randConsensusState(t, + const n = 2 + states, cleanup := makeConsensusState(ctx, t, config, n, "consensus_reactor_test", - newMockTickerFunc(true), newKVStore) + newMockTickerFunc(true)) t.Cleanup(cleanup) - for i := 0; i < 4; i++ { - ticker := NewTimeoutTicker() - ticker.SetLogger(states[i].Logger) + for i := 0; i < n; i++ { + ticker := NewTimeoutTicker(states[i].logger) states[i].SetTimeoutTicker(ticker) } - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // this val sends a random precommit at each height @@ -43,12 +50,14 @@ func TestReactorInvalidPrecommit(t *testing.T) { byzState := rts.states[node.NodeID] byzReactor := rts.reactors[node.NodeID] + signal := make(chan struct{}) // Update the doPrevote function to just send a valid precommit for a random // block and otherwise disable the priv validator. byzState.mtx.Lock() privVal := byzState.privValidator - byzState.doPrevote = func(height int64, round int32, _ bool) { - invalidDoPrevoteFunc(t, height, round, byzState, byzReactor, privVal) + byzState.doPrevote = func(ctx context.Context, height int64, round int32, _ bool) { + defer close(signal) + invalidDoPrevoteFunc(ctx, t, height, round, byzState, byzReactor, rts.voteChannels[node.NodeID], privVal) } byzState.mtx.Unlock() @@ -56,21 +65,51 @@ func TestReactorInvalidPrecommit(t *testing.T) { // // TODO: Make this tighter by ensuring the halt happens by block 2. var wg sync.WaitGroup + for i := 0; i < 10; i++ { for _, sub := range rts.subs { wg.Add(1) - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if ctx.Err() != nil { + return + } + if !assert.NoError(t, err) { + cancel() // cancel other subscribers on failure + } }(sub) } } + wait := make(chan struct{}) + go func() { defer close(wait); wg.Wait() }() - wg.Wait() + select { + case <-wait: + if _, ok := <-signal; !ok { + t.Fatal("test condition did not fire") + } + case <-ctx.Done(): + if _, ok := <-signal; !ok { + t.Fatal("test condition did not fire after timeout") + return + } + case <-signal: + // test passed + } } -func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r *Reactor, pv types.PrivValidator) { +func invalidDoPrevoteFunc( + ctx context.Context, + t *testing.T, + height int64, + round int32, + cs *State, + r *Reactor, + voteCh *p2p.Channel, + pv types.PrivValidator, +) { // routine to: // - precommit for a random block // - send precommit to all peers @@ -79,7 +118,7 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r cs.mtx.Lock() cs.privValidator = pv - proTxHash, err := cs.privValidator.GetProTxHash(context.Background()) + proTxHash, err := cs.privValidator.GetProTxHash(ctx) require.NoError(t, err) valIndex, _ := cs.Validators.GetByProTxHash(proTxHash) @@ -99,7 +138,7 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r p := precommit.ToProto() err = cs.privValidator.SignVote( - context.Background(), + ctx, cs.state.ChainID, cs.Validators.QuorumType, cs.Validators.QuorumHash, @@ -114,14 +153,30 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r cs.privValidator = nil // disable priv val so we don't do normal votes cs.mtx.Unlock() + r.mtx.Lock() + ids := make([]types.NodeID, 0, len(r.peers)) for _, ps := range r.peers { - cs.Logger.Info("sending bad vote", "block", blockHash, "peer", ps.peerID) - _ = r.voteCh.Send(p2p.Envelope{ - To: ps.peerID, + ids = append(ids, ps.peerID) + } + r.mtx.Unlock() + + count := 0 + for _, peerID := range ids { + count++ + err := voteCh.Send(ctx, p2p.Envelope{ + To: peerID, Message: &tmcons.Vote{ Vote: precommit.ToProto(), }, }) + // we want to have sent some of these votes, + // but if the test completes without erroring + // or not sending any messages, then we should + // error. + if errors.Is(err, context.Canceled) && count > 0 { + break + } + require.NoError(t, err) } }() } diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 2932ff1536..93133c9a88 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -17,134 +17,163 @@ import ( "github.com/tendermint/tendermint/internal/mempool" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" + "github.com/tendermint/tendermint/internal/test/factory" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) // for testing -func assertMempool(txn txNotifier) mempool.Mempool { - return txn.(mempool.Mempool) +func assertMempool(t *testing.T, txn txNotifier) mempool.Mempool { + t.Helper() + mp, ok := txn.(mempool.Mempool) + require.True(t, ok) + return mp } func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + baseConfig := configSetup(t) for proofBlockRange := int64(1); proofBlockRange <= 3; proofBlockRange++ { t.Logf("Checking proof block range %d", proofBlockRange) - config, err := ResetConfig("consensus_mempool_txs_available_test") + config, err := ResetConfig(t.TempDir(), "consensus_mempool_txs_available_test") require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) config.Consensus.CreateEmptyBlocks = false config.Consensus.CreateProofBlockRange = proofBlockRange - state, privVals := randGenesisState(baseConfig, 1, false, types.DefaultDashVotingPower) - cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) - assertMempool(cs.txNotifier).EnableTxsAvailable() + state, privVals := makeGenesisState(ctx, t, baseConfig, genesisStateArgs{ + Validators: 1, + Power: types.DefaultDashVotingPower, + Params: factory.ConsensusParams()}) + cs := newStateWithConfig(ctx, t, log.NewNopLogger(), config, state, privVals[0], NewCounterApplication()) + assertMempool(t, cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - startTestRound(cs, height, round) + newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock) + startTestRound(ctx, cs, height, round) - ensureNewEventOnChannel(newBlockCh) // first block gets committed - ensureNoNewEventOnChannel(newBlockCh) - deliverTxsRange(cs, 0, 1) - ensureNewEventOnChannel(newBlockCh) // commit txs + ensureNewEventOnChannel(t, newBlockCh) // first block gets committed + ensureNoNewEventOnChannel(t, newBlockCh) + checkTxsRange(ctx, t, cs, 0, 1) + ensureNewEventOnChannel(t, newBlockCh) // commit txs for i := int64(0); i < proofBlockRange; i++ { - ensureNewEventOnChannel(newBlockCh) // commit updated app hash + ensureNewEventOnChannel(t, newBlockCh) // commit updated app hash } - ensureNoNewEventOnChannel(newBlockCh) + ensureNoNewEventOnChannel(t, newBlockCh) } } func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { baseConfig := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - config, err := ResetConfig("consensus_mempool_txs_available_test") + config, err := ResetConfig(t.TempDir(), "consensus_mempool_txs_available_test") require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) config.Consensus.CreateEmptyBlocksInterval = ensureTimeout + state, privVals := makeGenesisState(ctx, t, baseConfig, genesisStateArgs{ + Validators: 1, + Power: types.DefaultDashVotingPower, + Params: factory.ConsensusParams()}) + cs := newStateWithConfig(ctx, t, log.NewNopLogger(), config, state, privVals[0], NewCounterApplication()) - state, privVals := randGenesisState(baseConfig, 1, false, types.DefaultDashVotingPower) - - cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) - - assertMempool(cs.txNotifier).EnableTxsAvailable() + assertMempool(t, cs.txNotifier).EnableTxsAvailable() - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - startTestRound(cs, cs.Height, cs.Round) + newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock) + startTestRound(ctx, cs, cs.Height, cs.Round) - ensureNewEventOnChannel(newBlockCh) // first block gets committed - ensureNoNewEventOnChannel(newBlockCh) // then we dont make a block ... - ensureNewEventOnChannel(newBlockCh) // until the CreateEmptyBlocksInterval has passed + ensureNewEventOnChannel(t, newBlockCh) // first block gets committed + ensureNoNewEventOnChannel(t, newBlockCh) // then we dont make a block ... + ensureNewEventOnChannel(t, newBlockCh) // until the CreateEmptyBlocksInterval has passed } func TestMempoolProgressInHigherRound(t *testing.T) { baseConfig := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - config, err := ResetConfig("consensus_mempool_txs_available_test") + config, err := ResetConfig(t.TempDir(), "consensus_mempool_txs_available_test") require.NoError(t, err) t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) }) config.Consensus.CreateEmptyBlocks = false - state, privVals := randGenesisState(baseConfig, 1, false, 10) - cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) - assertMempool(cs.txNotifier).EnableTxsAvailable() + state, privVals := makeGenesisState(ctx, t, baseConfig, genesisStateArgs{ + Validators: 1, + Power: 10, + Params: factory.ConsensusParams()}) + cs := newStateWithConfig(ctx, t, log.NewNopLogger(), config, state, privVals[0], NewCounterApplication()) + assertMempool(t, cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) - cs.setProposal = func(proposal *types.Proposal) error { + newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock) + newRoundCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewRound) + timeoutCh := subscribe(ctx, t, cs.eventBus, types.EventQueryTimeoutPropose) + cs.setProposal = func(proposal *types.Proposal, recvTime time.Time) error { if cs.Height == cs.state.InitialHeight+1 && cs.Round == 0 { // dont set the proposal in round 0 so we timeout and // go to next round - cs.Logger.Info("Ignoring set proposal at height 2, round 0") return nil } - return cs.defaultSetProposal(proposal) + return cs.defaultSetProposal(proposal, recvTime) } - startTestRound(cs, height, round) + startTestRound(ctx, cs, height, round) - ensureNewRound(newRoundCh, height, round) // first round at first height - ensureNewEventOnChannel(newBlockCh) // first block gets committed + ensureNewRound(t, newRoundCh, height, round) // first round at first height + ensureNewEventOnChannel(t, newBlockCh) // first block gets committed height++ // moving to the next height round = 0 - ensureNewRound(newRoundCh, height, round) // first round at next height - deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round - ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) + ensureNewRound(t, newRoundCh, height, round) // first round at next height + checkTxsRange(ctx, t, cs, 0, 1) // we deliver txs, but don't set a proposal so we get the next round + ensureNewTimeout(t, timeoutCh, height, round, cs.state.ConsensusParams.Timeout.ProposeTimeout(round).Nanoseconds()) - round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) // wait for the next round - ensureNewEventOnChannel(newBlockCh) // now we can commit the block + round++ // moving to the next round + ensureNewRound(t, newRoundCh, height, round) // wait for the next round + ensureNewEventOnChannel(t, newBlockCh) // now we can commit the block } -func deliverTxsRange(cs *State, start, end int) { +func checkTxsRange(ctx context.Context, t *testing.T, cs *State, start, end int) { + t.Helper() // Deliver some txs. for i := start; i < end; i++ { txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(i)) - err := assertMempool(cs.txNotifier).CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) - if err != nil { - panic(fmt.Sprintf("Error after CheckTx: %v", err)) - } + err := assertMempool(t, cs.txNotifier).CheckTx(ctx, txBytes, nil, mempool.TxInfo{}) + require.NoError(t, err, "error after checkTx") } } func TestMempoolTxConcurrentWithCommit(t *testing.T) { - config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - state, privVals := randGenesisState(config, 1, false, 10) + config := configSetup(t) + logger := log.NewNopLogger() + state, privVals := makeGenesisState(ctx, t, config, genesisStateArgs{ + Validators: 1, + Power: 10, + Params: factory.ConsensusParams(), + }) stateStore := sm.NewStore(dbm.NewMemDB()) blockStore := store.NewBlockStore(dbm.NewMemDB()) - cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockStore) + + cs := newStateWithConfigAndBlockStore( + ctx, + t, + logger, config, state, privVals[0], NewCounterApplication(), blockStore) + err := stateStore.Save(state) require.NoError(t, err) - newBlockHeaderCh := subscribe(cs.eventBus, types.EventQueryNewBlockHeader) + newBlockHeaderCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlockHeader) const numTxs int64 = 3000 - go deliverTxsRange(cs, 0, int(numTxs)) + go checkTxsRange(ctx, t, cs, 0, int(numTxs)) - startTestRound(cs, cs.Height, cs.Round) + startTestRound(ctx, cs, cs.Height, cs.Round) for n := int64(0); n < numTxs; { select { case msg := <-newBlockHeaderCh: @@ -158,12 +187,17 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { func TestMempoolRmBadTx(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - state, privVals := randGenesisState(config, 1, false, 10) + state, privVals := makeGenesisState(ctx, t, config, genesisStateArgs{ + Validators: 1, + Power: 10, + Params: factory.ConsensusParams()}) app := NewCounterApplication() stateStore := sm.NewStore(dbm.NewMemDB()) blockStore := store.NewBlockStore(dbm.NewMemDB()) - cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockStore) + cs := newStateWithConfigAndBlockStore(ctx, t, log.NewNopLogger(), config, state, privVals[0], app, blockStore) err := stateStore.Save(state) require.NoError(t, err) @@ -171,10 +205,12 @@ func TestMempoolRmBadTx(t *testing.T) { txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(0)) - resDeliver := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - assert.False(t, resDeliver.IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver)) + resFinalize, err := app.FinalizeBlock(ctx, &abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}}) + require.NoError(t, err) + assert.False(t, resFinalize.TxResults[0].IsErr(), fmt.Sprintf("expected no error. got %v", resFinalize)) - resCommit := app.Commit() + resCommit, err := app.Commit(ctx) + require.NoError(t, err) assert.True(t, len(resCommit.Data) > 0) emptyMempoolCh := make(chan struct{}) @@ -183,8 +219,8 @@ func TestMempoolRmBadTx(t *testing.T) { // Try to send the tx through the mempool. // CheckTx should not err, but the app should return a bad abci code // and the tx should get removed from the pool - err := assertMempool(cs.txNotifier).CheckTx(context.Background(), txBytes, func(r *abci.Response) { - if r.GetCheckTx().Code != code.CodeTypeBadNonce { + err := assertMempool(t, cs.txNotifier).CheckTx(ctx, txBytes, func(r *abci.ResponseCheckTx) { + if r.Code != code.CodeTypeBadNonce { t.Errorf("expected checktx to return bad nonce, got %v", r) return } @@ -197,7 +233,7 @@ func TestMempoolRmBadTx(t *testing.T) { // check for the tx for { - txs := assertMempool(cs.txNotifier).ReapMaxBytesMaxGas(int64(len(txBytes)), -1) + txs := assertMempool(t, cs.txNotifier).ReapMaxBytesMaxGas(int64(len(txBytes)), -1) if len(txs) == 0 { emptyMempoolCh <- struct{}{} return @@ -239,30 +275,37 @@ func NewCounterApplication() *CounterApplication { return &CounterApplication{} } -func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo { - return abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)} +func (app *CounterApplication) Info(_ context.Context, req *abci.RequestInfo) (*abci.ResponseInfo, error) { + return &abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)}, nil } -func (app *CounterApplication) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { - txValue := txAsUint64(req.Tx) - if txValue != uint64(app.txCount) { - return abci.ResponseDeliverTx{ - Code: code.CodeTypeBadNonce, - Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)} +func (app *CounterApplication) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { + respTxs := make([]*abci.ExecTxResult, len(req.Txs)) + for i, tx := range req.Txs { + txValue := txAsUint64(tx) + if txValue != uint64(app.txCount) { + respTxs[i] = &abci.ExecTxResult{ + Code: code.CodeTypeBadNonce, + Log: fmt.Sprintf("Invalid nonce. Expected %d, got %d", app.txCount, txValue), + } + continue + } + app.txCount++ + respTxs[i] = &abci.ExecTxResult{Code: code.CodeTypeOK} } - app.txCount++ - return abci.ResponseDeliverTx{Code: code.CodeTypeOK} + return &abci.ResponseFinalizeBlock{TxResults: respTxs}, nil } -func (app *CounterApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { +func (app *CounterApplication) CheckTx(_ context.Context, req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { txValue := txAsUint64(req.Tx) if txValue != uint64(app.mempoolTxCount) { - return abci.ResponseCheckTx{ + return &abci.ResponseCheckTx{ Code: code.CodeTypeBadNonce, - Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue)} + Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue), + }, nil } app.mempoolTxCount++ - return abci.ResponseCheckTx{Code: code.CodeTypeOK} + return &abci.ResponseCheckTx{Code: code.CodeTypeOK}, nil } func txAsUint64(tx []byte) uint64 { @@ -271,13 +314,33 @@ func txAsUint64(tx []byte) uint64 { return binary.BigEndian.Uint64(tx8) } -func (app *CounterApplication) Commit() abci.ResponseCommit { +func (app *CounterApplication) Commit(context.Context) (*abci.ResponseCommit, error) { app.mempoolTxCount = app.txCount if app.txCount == 0 { - return abci.ResponseCommit{} + return &abci.ResponseCommit{}, nil } hash := make([]byte, 32) binary.BigEndian.PutUint64(hash, uint64(app.txCount)) - return abci.ResponseCommit{Data: hash} + return &abci.ResponseCommit{Data: hash}, nil +} + +func (app *CounterApplication) PrepareProposal(_ context.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { + trs := make([]*abci.TxRecord, 0, len(req.Txs)) + var totalBytes int64 + for _, tx := range req.Txs { + totalBytes += int64(len(tx)) + if totalBytes > req.MaxTxBytes { + break + } + trs = append(trs, &abci.TxRecord{ + Action: abci.TxRecord_UNMODIFIED, + Tx: tx, + }) + } + return &abci.ResponsePrepareProposal{TxRecords: trs}, nil +} + +func (app *CounterApplication) ProcessProposal(_ context.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil } diff --git a/internal/consensus/metrics.go b/internal/consensus/metrics.go index 2ab8f94119..2cf8f2f73c 100644 --- a/internal/consensus/metrics.go +++ b/internal/consensus/metrics.go @@ -6,11 +6,11 @@ import ( "github.com/go-kit/kit/metrics" "github.com/go-kit/kit/metrics/discard" - cstypes "github.com/tendermint/tendermint/internal/consensus/types" - "github.com/tendermint/tendermint/types" - prometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/go-kit/kit/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" + cstypes "github.com/tendermint/tendermint/internal/consensus/types" + "github.com/tendermint/tendermint/types" ) const ( @@ -91,12 +91,17 @@ type Metrics struct { // be above 2/3 of the total voting power of the network defines the endpoint // the endpoint of the interval. Subtract the proposal timestamp from this endpoint // to obtain the quorum delay. - QuorumPrevoteMessageDelay metrics.Gauge + QuorumPrevoteDelay metrics.Gauge - // FullPrevoteMessageDelay is the interval in seconds between the proposal + // FullPrevoteDelay is the interval in seconds between the proposal // timestamp and the timestamp of the latest prevote in a round where 100% // of the voting power on the network issued prevotes. - FullPrevoteMessageDelay metrics.Gauge + FullPrevoteDelay metrics.Gauge + + // ProposalTimestampDifference is the difference between the timestamp in + // the proposal message and the local time of the validator at the time + // that the validator received the message. + ProposalTimestampDifference metrics.Histogram } // PrometheusMetrics returns Metrics build using Prometheus client library. @@ -251,20 +256,29 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Help: "Time spent per step.", Buckets: stdprometheus.ExponentialBucketsRange(0.1, 100, 8), }, append(labels, "step")).With(labelsAndValues...), - QuorumPrevoteMessageDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + QuorumPrevoteDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "quorum_prevote_message_delay", + Name: "quorum_prevote_delay", Help: "Difference in seconds between the proposal timestamp and the timestamp " + "of the latest prevote that achieved a quorum in the prevote step.", - }, labels).With(labelsAndValues...), - FullPrevoteMessageDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + }, append(labels, "proposer_address")).With(labelsAndValues...), + FullPrevoteDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "full_prevote_message_delay", + Name: "full_prevote_delay", Help: "Difference in seconds between the proposal timestamp and the timestamp " + "of the latest prevote that achieved 100% of the voting power in the prevote step.", - }, labels).With(labelsAndValues...), + }, append(labels, "proposer_address")).With(labelsAndValues...), + ProposalTimestampDifference: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "proposal_timestamp_difference", + Help: "Difference in seconds between the timestamp in the proposal " + + "message and the local time when the message was received. " + + "Only calculated when a new block is proposed.", + Buckets: []float64{-10, -.5, -.025, 0, .1, .5, 1, 1.5, 2, 10}, + }, append(labels, "is_timely")).With(labelsAndValues...), } } @@ -290,17 +304,18 @@ func NopMetrics() *Metrics { BlockIntervalSeconds: discard.NewHistogram(), - NumTxs: discard.NewGauge(), - BlockSizeBytes: discard.NewHistogram(), - TotalTxs: discard.NewGauge(), - CommittedHeight: discard.NewGauge(), - BlockSyncing: discard.NewGauge(), - StateSyncing: discard.NewGauge(), - BlockParts: discard.NewCounter(), - BlockGossipReceiveLatency: discard.NewHistogram(), - BlockGossipPartsReceived: discard.NewCounter(), - QuorumPrevoteMessageDelay: discard.NewGauge(), - FullPrevoteMessageDelay: discard.NewGauge(), + NumTxs: discard.NewGauge(), + BlockSizeBytes: discard.NewHistogram(), + TotalTxs: discard.NewGauge(), + CommittedHeight: discard.NewGauge(), + BlockSyncing: discard.NewGauge(), + StateSyncing: discard.NewGauge(), + BlockParts: discard.NewCounter(), + BlockGossipReceiveLatency: discard.NewHistogram(), + BlockGossipPartsReceived: discard.NewCounter(), + QuorumPrevoteDelay: discard.NewGauge(), + FullPrevoteDelay: discard.NewGauge(), + ProposalTimestampDifference: discard.NewHistogram(), } } diff --git a/internal/consensus/mocks/cons_sync_reactor.go b/internal/consensus/mocks/cons_sync_reactor.go deleted file mode 100644 index 5ac592f0d4..0000000000 --- a/internal/consensus/mocks/cons_sync_reactor.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/internal/state" -) - -// ConsSyncReactor is an autogenerated mock type for the ConsSyncReactor type -type ConsSyncReactor struct { - mock.Mock -} - -// SetBlockSyncingMetrics provides a mock function with given fields: _a0 -func (_m *ConsSyncReactor) SetBlockSyncingMetrics(_a0 float64) { - _m.Called(_a0) -} - -// SetStateSyncingMetrics provides a mock function with given fields: _a0 -func (_m *ConsSyncReactor) SetStateSyncingMetrics(_a0 float64) { - _m.Called(_a0) -} - -// SwitchToConsensus provides a mock function with given fields: _a0, _a1 -func (_m *ConsSyncReactor) SwitchToConsensus(_a0 state.State, _a1 bool) { - _m.Called(_a0, _a1) -} diff --git a/internal/consensus/mocks/fast_sync_reactor.go b/internal/consensus/mocks/fast_sync_reactor.go index 9da8510659..06886de27b 100644 --- a/internal/consensus/mocks/fast_sync_reactor.go +++ b/internal/consensus/mocks/fast_sync_reactor.go @@ -4,6 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" + state "github.com/tendermint/tendermint/internal/state" time "time" diff --git a/internal/consensus/msgs.go b/internal/consensus/msgs.go index 026234cba2..3085dbb4bf 100644 --- a/internal/consensus/msgs.go +++ b/internal/consensus/msgs.go @@ -5,8 +5,8 @@ import ( "fmt" cstypes "github.com/tendermint/tendermint/internal/consensus/types" + "github.com/tendermint/tendermint/internal/jsontypes" "github.com/tendermint/tendermint/libs/bits" - tmjson "github.com/tendermint/tendermint/libs/json" tmmath "github.com/tendermint/tendermint/libs/math" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -18,31 +18,35 @@ import ( // converted to a Message via MsgFromProto. type Message interface { ValidateBasic() error + + jsontypes.Tagged } func init() { - tmjson.RegisterType(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage") - tmjson.RegisterType(&NewValidBlockMessage{}, "tendermint/NewValidBlockMessage") - tmjson.RegisterType(&ProposalMessage{}, "tendermint/Proposal") - tmjson.RegisterType(&ProposalPOLMessage{}, "tendermint/ProposalPOL") - tmjson.RegisterType(&BlockPartMessage{}, "tendermint/BlockPart") - tmjson.RegisterType(&VoteMessage{}, "tendermint/Vote") - tmjson.RegisterType(&HasVoteMessage{}, "tendermint/HasVote") - tmjson.RegisterType(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23") - tmjson.RegisterType(&VoteSetBitsMessage{}, "tendermint/VoteSetBits") - tmjson.RegisterType(&CommitMessage{}, "tendermint/CommitMessage") + jsontypes.MustRegister(&NewRoundStepMessage{}) + jsontypes.MustRegister(&NewValidBlockMessage{}) + jsontypes.MustRegister(&ProposalMessage{}) + jsontypes.MustRegister(&ProposalPOLMessage{}) + jsontypes.MustRegister(&BlockPartMessage{}) + jsontypes.MustRegister(&VoteMessage{}) + jsontypes.MustRegister(&HasVoteMessage{}) + jsontypes.MustRegister(&VoteSetMaj23Message{}) + jsontypes.MustRegister(&VoteSetBitsMessage{}) + jsontypes.MustRegister(&CommitMessage{}) } // NewRoundStepMessage is sent for every step taken in the ConsensusState. // For every height/round/step transition type NewRoundStepMessage struct { - Height int64 + Height int64 `json:",string"` Round int32 Step cstypes.RoundStepType - SecondsSinceStartTime int64 + SecondsSinceStartTime int64 `json:",string"` LastCommitRound int32 } +func (*NewRoundStepMessage) TypeTag() string { return "tendermint/NewRoundStepMessage" } + // ValidateBasic performs basic validation. func (m *NewRoundStepMessage) ValidateBasic() error { if m.Height < 0 { @@ -94,13 +98,15 @@ func (m *NewRoundStepMessage) String() string { // i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. // In case the block is also committed, then IsCommit flag is set to true. type NewValidBlockMessage struct { - Height int64 + Height int64 `json:",string"` Round int32 BlockPartSetHeader types.PartSetHeader BlockParts *bits.BitArray IsCommit bool } +func (*NewValidBlockMessage) TypeTag() string { return "tendermint/NewValidBlockMessage" } + // ValidateBasic performs basic validation. func (m *NewValidBlockMessage) ValidateBasic() error { if m.Height < 0 { @@ -110,7 +116,7 @@ func (m *NewValidBlockMessage) ValidateBasic() error { return errors.New("negative Round") } if err := m.BlockPartSetHeader.ValidateBasic(); err != nil { - return fmt.Errorf("wrong BlockPartSetHeader: %v", err) + return fmt.Errorf("wrong BlockPartSetHeader: %w", err) } if m.BlockParts.Size() == 0 { return errors.New("empty blockParts") @@ -137,6 +143,8 @@ type ProposalMessage struct { Proposal *types.Proposal } +func (*ProposalMessage) TypeTag() string { return "tendermint/Proposal" } + // ValidateBasic performs basic validation. func (m *ProposalMessage) ValidateBasic() error { return m.Proposal.ValidateBasic() @@ -149,11 +157,13 @@ func (m *ProposalMessage) String() string { // ProposalPOLMessage is sent when a previous proposal is re-proposed. type ProposalPOLMessage struct { - Height int64 + Height int64 `json:",string"` ProposalPOLRound int32 ProposalPOL *bits.BitArray } +func (*ProposalPOLMessage) TypeTag() string { return "tendermint/ProposalPOL" } + // ValidateBasic performs basic validation. func (m *ProposalPOLMessage) ValidateBasic() error { if m.Height < 0 { @@ -178,11 +188,13 @@ func (m *ProposalPOLMessage) String() string { // BlockPartMessage is sent when gossipping a piece of the proposed block. type BlockPartMessage struct { - Height int64 + Height int64 `json:",string"` Round int32 Part *types.Part } +func (*BlockPartMessage) TypeTag() string { return "tendermint/BlockPart" } + // ValidateBasic performs basic validation. func (m *BlockPartMessage) ValidateBasic() error { if m.Height < 0 { @@ -192,7 +204,7 @@ func (m *BlockPartMessage) ValidateBasic() error { return errors.New("negative Round") } if err := m.Part.ValidateBasic(); err != nil { - return fmt.Errorf("wrong Part: %v", err) + return fmt.Errorf("wrong Part: %w", err) } return nil } @@ -207,9 +219,15 @@ type VoteMessage struct { Vote *types.Vote } -// ValidateBasic performs basic validation. +func (*VoteMessage) TypeTag() string { return "tendermint/Vote" } + +// ValidateBasic checks whether the vote within the message is well-formed. func (m *VoteMessage) ValidateBasic() error { - return m.Vote.ValidateBasic() + // Here we validate votes with vote extensions, since we require vote + // extensions to be sent in precommit messages during consensus. Prevote + // messages should never have vote extensions, and this is also validated + // here. + return m.Vote.ValidateWithExtension() } // String returns a string representation. @@ -219,12 +237,14 @@ func (m *VoteMessage) String() string { // HasVoteMessage is sent to indicate that a particular vote has been received. type HasVoteMessage struct { - Height int64 + Height int64 `json:",string"` Round int32 Type tmproto.SignedMsgType Index int32 } +func (*HasVoteMessage) TypeTag() string { return "tendermint/HasVote" } + // ValidateBasic performs basic validation. func (m *HasVoteMessage) ValidateBasic() error { if m.Height < 0 { @@ -252,6 +272,8 @@ type CommitMessage struct { Commit *types.Commit } +func (*CommitMessage) TypeTag() string { return "tendermint/Commit" } + // ValidateBasic performs basic validation. func (m *CommitMessage) ValidateBasic() error { return m.Commit.ValidateBasic() @@ -263,6 +285,8 @@ type HasCommitMessage struct { Round int32 } +func (*HasCommitMessage) TypeTag() string { return "tendermint/HasCommit" } + // ValidateBasic performs basic validation. func (m *HasCommitMessage) ValidateBasic() error { if m.Height < 0 { @@ -281,12 +305,14 @@ func (m *HasCommitMessage) String() string { // VoteSetMaj23Message is sent to indicate that a given BlockID has seen +2/3 votes. type VoteSetMaj23Message struct { - Height int64 + Height int64 `json:",string"` Round int32 Type tmproto.SignedMsgType BlockID types.BlockID } +func (*VoteSetMaj23Message) TypeTag() string { return "tendermint/VoteSetMaj23" } + // ValidateBasic performs basic validation. func (m *VoteSetMaj23Message) ValidateBasic() error { if m.Height < 0 { @@ -299,7 +325,7 @@ func (m *VoteSetMaj23Message) ValidateBasic() error { return errors.New("invalid Type") } if err := m.BlockID.ValidateBasic(); err != nil { - return fmt.Errorf("wrong BlockID: %v", err) + return fmt.Errorf("wrong BlockID: %w", err) } return nil @@ -313,13 +339,15 @@ func (m *VoteSetMaj23Message) String() string { // VoteSetBitsMessage is sent to communicate the bit-array of votes seen for the // BlockID. type VoteSetBitsMessage struct { - Height int64 + Height int64 `json:",string"` Round int32 Type tmproto.SignedMsgType BlockID types.BlockID Votes *bits.BitArray } +func (*VoteSetBitsMessage) TypeTag() string { return "tendermint/VoteSetBits" } + // ValidateBasic performs basic validation. func (m *VoteSetBitsMessage) ValidateBasic() error { if m.Height < 0 { @@ -329,7 +357,7 @@ func (m *VoteSetBitsMessage) ValidateBasic() error { return errors.New("invalid Type") } if err := m.BlockID.ValidateBasic(); err != nil { - return fmt.Errorf("wrong BlockID: %v", err) + return fmt.Errorf("wrong BlockID: %w", err) } // NOTE: Votes.Size() can be zero if the node does not have any @@ -565,6 +593,8 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) { Part: parts, } case *tmcons.Message_Vote: + // Vote validation will be handled in the vote message ValidateBasic + // call below. vote, err := types.VoteFromProto(msg.Vote.Vote) if err != nil { return nil, fmt.Errorf("vote msg to proto error: %w", err) diff --git a/internal/consensus/msgs_test.go b/internal/consensus/msgs_test.go index 17a0458c0c..69a1552406 100644 --- a/internal/consensus/msgs_test.go +++ b/internal/consensus/msgs_test.go @@ -14,7 +14,6 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/merkle" - "github.com/tendermint/tendermint/crypto/tmhash" cstypes "github.com/tendermint/tendermint/internal/consensus/types" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/bits" @@ -26,6 +25,9 @@ import ( ) func TestMsgToProto(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + psh := types.PartSetHeader{ Total: 1, Hash: tmrand.Bytes(32), @@ -66,11 +68,12 @@ func TestMsgToProto(t *testing.T) { quorumHash := crypto.RandQuorumHash() pv := types.NewMockPVForQuorum(quorumHash) - pk, err := pv.GetPubKey(context.Background(), quorumHash) - require.NoError(t, err) + pk, err := pv.GetPubKey(ctx, quorumHash) val := types.NewValidatorDefaultVotingPower(pk, pv.ProTxHash) + require.NoError(t, err) vote, err := factory.MakeVote( + ctx, pv, &types.ValidatorSet{Proposer: val, Validators: []*types.Validator{val}, QuorumHash: quorumHash, ThresholdPublicKey: pk}, "chainID", @@ -379,6 +382,7 @@ func TestConsMsgsVectors(t *testing.T) { Round: 0, Type: tmproto.PrecommitType, BlockID: bi, + Extension: []byte("extension"), } vpb := v.ToProto() @@ -415,7 +419,7 @@ func TestConsMsgsVectors(t *testing.T) { "2a36080110011a3008011204746573741a26080110011a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d"}, {"Vote", &tmcons.Message{Sum: &tmcons.Message_Vote{ Vote: &tmcons.Vote{Vote: vpb}}}, - "32680a660802100122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d32146164645f6d6f72655f6578636c616d6174696f6e3801"}, + "32730a710802100122480a206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d1224080112206164645f6d6f72655f6578636c616d6174696f6e5f6d61726b735f636f64652d32146164645f6d6f72655f6578636c616d6174696f6e38015a09657874656e73696f6e"}, {"HasVote", &tmcons.Message{Sum: &tmcons.Message_HasVote{ HasVote: &tmcons.HasVote{Height: 1, Round: 1, Type: tmproto.PrevoteType, Index: 1}}}, "3a080801100118012001"}, @@ -680,7 +684,7 @@ func TestProposalPOLMessageValidateBasic(t *testing.T) { func TestBlockPartMessageValidateBasic(t *testing.T) { testPart := new(types.Part) - testPart.Proof.LeafHash = tmhash.Sum([]byte("leaf")) + testPart.Proof.LeafHash = crypto.Checksum([]byte("leaf")) testCases := []struct { testName string messageHeight int64 diff --git a/internal/consensus/pbts_test.go b/internal/consensus/pbts_test.go new file mode 100644 index 0000000000..985a68c98f --- /dev/null +++ b/internal/consensus/pbts_test.go @@ -0,0 +1,506 @@ +package consensus + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/internal/eventbus" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + "github.com/tendermint/tendermint/internal/test/factory" + "github.com/tendermint/tendermint/libs/log" + tmtimemocks "github.com/tendermint/tendermint/libs/time/mocks" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +const ( + // blockTimeIota is used in the test harness as the time between + // blocks when not otherwise specified. + blockTimeIota = time.Millisecond +) + +// pbtsTestHarness constructs a Tendermint network that can be used for testing the +// implementation of the Proposer-Based timestamps algorithm. +// It runs a series of consensus heights and captures timing of votes and events. +type pbtsTestHarness struct { + // configuration options set by the user of the test harness. + pbtsTestConfiguration + + // The timestamp of the first block produced by the network. + firstBlockTime time.Time + + // The Tendermint consensus state machine being run during + // a run of the pbtsTestHarness. + observedState *State + + // A stub for signing votes and messages using the key + // from the observedState. + observedValidator *validatorStub + + // A list of simulated validators that interact with the observedState and are + // fully controlled by the test harness. + otherValidators []*validatorStub + + // The mock time source used by all of the validator stubs in the test harness. + // This mock clock allows the test harness to produce votes and blocks with arbitrary + // timestamps. + validatorClock *tmtimemocks.Source + + chainID string + + // channels for verifying that the observed validator completes certain actions. + ensureProposalCh, roundCh, blockCh, ensureVoteCh <-chan tmpubsub.Message + + // channel of events from the observed validator annotated with the timestamp + // the event was received. + eventCh <-chan timestampedEvent + + currentHeight int64 + currentRound int32 +} + +type pbtsTestConfiguration struct { + // The timestamp consensus parameters to be used by the state machine under test. + synchronyParams types.SynchronyParams + + // The setting to use for the TimeoutPropose configuration parameter. + timeoutPropose time.Duration + + // The genesis time + genesisTime time.Time + + // The times offset from height 1 block time of the block proposed at height 2. + height2ProposedBlockOffset time.Duration + + // The time offset from height 1 block time at which the proposal at height 2 should be delivered. + height2ProposalTimeDeliveryOffset time.Duration + + // The time offset from height 1 block time of the block proposed at height 4. + // At height 4, the proposed block and the deliver offsets are the same so + // that timely-ness does not affect height 4. + height4ProposedBlockOffset time.Duration +} + +func newPBTSTestHarness(ctx context.Context, t *testing.T, tc pbtsTestConfiguration) pbtsTestHarness { + t.Helper() + const validators = 4 + cfg := configSetup(t) + clock := new(tmtimemocks.Source) + + if tc.genesisTime.IsZero() { + tc.genesisTime = time.Now() + } + + if tc.height4ProposedBlockOffset == 0 { + + // Set a default height4ProposedBlockOffset. + // Use a proposed block time that is greater than the time that the + // block at height 2 was delivered. Height 3 is not relevant for testing + // and always occurs blockTimeIota before height 4. If not otherwise specified, + // height 4 therefore occurs 2*blockTimeIota after height 2. + tc.height4ProposedBlockOffset = tc.height2ProposalTimeDeliveryOffset + 2*blockTimeIota + } + consensusParams := factory.ConsensusParams() + consensusParams.Timeout.Propose = tc.timeoutPropose + consensusParams.Synchrony = tc.synchronyParams + + state, privVals := makeGenesisState(ctx, t, cfg, genesisStateArgs{ + Params: consensusParams, + Time: tc.genesisTime, + Validators: validators, + }) + cs := newState(ctx, t, log.NewNopLogger(), state, privVals[0], kvstore.NewApplication()) + vss := make([]*validatorStub, validators) + for i := 0; i < validators; i++ { + vss[i] = newValidatorStub(privVals[i], int32(i), 0) + } + incrementHeight(vss[1:]...) + + for _, vs := range vss { + vs.clock = clock + } + proTxHash, err := vss[0].PrivValidator.GetProTxHash(ctx) + require.NoError(t, err) + + eventCh := timestampedCollector(ctx, t, cs.eventBus) + + return pbtsTestHarness{ + pbtsTestConfiguration: tc, + observedValidator: vss[0], + observedState: cs, + otherValidators: vss[1:], + validatorClock: clock, + currentHeight: 1, + chainID: cfg.ChainID(), + roundCh: subscribe(ctx, t, cs.eventBus, types.EventQueryNewRound), + ensureProposalCh: subscribe(ctx, t, cs.eventBus, types.EventQueryCompleteProposal), + blockCh: subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock), + ensureVoteCh: subscribeToVoterBuffered(ctx, t, cs, proTxHash), + eventCh: eventCh, + } +} + +func (p *pbtsTestHarness) observedValidatorProposerHeight(ctx context.Context, t *testing.T, previousBlockTime time.Time) (heightResult, time.Time) { + p.validatorClock.On("Now").Return(p.genesisTime.Add(p.height2ProposedBlockOffset)).Times(6) + + ensureNewRound(t, p.roundCh, p.currentHeight, p.currentRound) + + timeout := time.Until(previousBlockTime.Add(ensureTimeout)) + ensureProposalWithTimeout(t, p.ensureProposalCh, p.currentHeight, p.currentRound, nil, timeout) + + rs := p.observedState.GetRoundState() + bid := types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()} + ensurePrevote(t, p.ensureVoteCh, p.currentHeight, p.currentRound) + signAddVotes(ctx, t, p.observedState, tmproto.PrevoteType, p.chainID, bid, p.otherValidators...) + + signAddVotes(ctx, t, p.observedState, tmproto.PrecommitType, p.chainID, bid, p.otherValidators...) + ensurePrecommit(t, p.ensureVoteCh, p.currentHeight, p.currentRound) + + ensureNewBlock(t, p.blockCh, p.currentHeight) + + proTxHash, err := p.observedValidator.GetProTxHash(ctx) + require.NoError(t, err) + res := collectHeightResults(ctx, t, p.eventCh, p.currentHeight, proTxHash) + + p.currentHeight++ + incrementHeight(p.otherValidators...) + return res, rs.ProposalBlock.Time +} + +func (p *pbtsTestHarness) height2(ctx context.Context, t *testing.T) heightResult { + signer := p.otherValidators[0].PrivValidator + return p.nextHeight(ctx, t, signer, + p.firstBlockTime.Add(p.height2ProposalTimeDeliveryOffset), + p.firstBlockTime.Add(p.height2ProposedBlockOffset), + p.firstBlockTime.Add(p.height2ProposedBlockOffset+10*blockTimeIota)) +} + +func (p *pbtsTestHarness) intermediateHeights(ctx context.Context, t *testing.T) { + signer := p.otherValidators[1].PrivValidator + p.nextHeight(ctx, t, signer, + p.firstBlockTime.Add(p.height2ProposedBlockOffset+10*blockTimeIota), + p.firstBlockTime.Add(p.height2ProposedBlockOffset+10*blockTimeIota), + p.firstBlockTime.Add(p.height4ProposedBlockOffset)) + + signer = p.otherValidators[2].PrivValidator + p.nextHeight(ctx, t, signer, + p.firstBlockTime.Add(p.height4ProposedBlockOffset), + p.firstBlockTime.Add(p.height4ProposedBlockOffset), + time.Now()) +} + +func (p *pbtsTestHarness) height5(ctx context.Context, t *testing.T) (heightResult, time.Time) { + return p.observedValidatorProposerHeight(ctx, t, p.firstBlockTime.Add(p.height4ProposedBlockOffset)) +} + +func (p *pbtsTestHarness) nextHeight(ctx context.Context, t *testing.T, proposer types.PrivValidator, deliverTime, proposedTime, nextProposedTime time.Time) heightResult { + state := p.observedState.GetState() + quorumType := state.Validators.QuorumType + quorumHash := state.Validators.QuorumHash + + p.validatorClock.On("Now").Return(nextProposedTime).Times(6) + + ensureNewRound(t, p.roundCh, p.currentHeight, p.currentRound) + + b, err := p.observedState.createProposalBlock(ctx) + require.NoError(t, err) + b.Height = p.currentHeight + b.Header.Height = p.currentHeight + b.Header.Time = proposedTime + + b.Header.ProposerProTxHash, err = proposer.GetProTxHash(ctx) + require.NoError(t, err) + ps, err := b.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + bid := types.BlockID{Hash: b.Hash(), PartSetHeader: ps.Header()} + coreChainLockedHeight := p.observedState.state.LastCoreChainLockedBlockHeight + prop := types.NewProposal(p.currentHeight, coreChainLockedHeight, 0, -1, bid, proposedTime) + tp := prop.ToProto() + + if _, err := proposer.SignProposal(ctx, p.observedState.state.ChainID, quorumType, quorumHash, tp); err != nil { + t.Fatalf("error signing proposal: %s", err) + } + + time.Sleep(time.Until(deliverTime)) + prop.Signature = tp.Signature + if err := p.observedState.SetProposalAndBlock(ctx, prop, b, ps, "peerID"); err != nil { + t.Fatal(err) + } + ensureProposal(t, p.ensureProposalCh, p.currentHeight, 0, bid) + + ensurePrevote(t, p.ensureVoteCh, p.currentHeight, p.currentRound) + signAddVotes(ctx, t, p.observedState, tmproto.PrevoteType, p.chainID, bid, p.otherValidators...) + + signAddVotes(ctx, t, p.observedState, tmproto.PrecommitType, p.chainID, bid, p.otherValidators...) + ensurePrecommit(t, p.ensureVoteCh, p.currentHeight, p.currentRound) + + proTxHash, err := p.observedValidator.GetProTxHash(ctx) + require.NoError(t, err) + res := collectHeightResults(ctx, t, p.eventCh, p.currentHeight, proTxHash) + ensureNewBlock(t, p.blockCh, p.currentHeight) + + p.currentHeight++ + incrementHeight(p.otherValidators...) + return res +} + +func timestampedCollector(ctx context.Context, t *testing.T, eb *eventbus.EventBus) <-chan timestampedEvent { + t.Helper() + + // Since eventCh is not read until the end of each height, it must be large + // enough to hold all of the events produced during a single height. + eventCh := make(chan timestampedEvent, 100) + + if err := eb.Observe(ctx, func(msg tmpubsub.Message) error { + eventCh <- timestampedEvent{ + ts: time.Now(), + m: msg, + } + return nil + }, types.EventQueryVote, types.EventQueryCompleteProposal); err != nil { + t.Fatalf("Failed to observe query %v: %v", types.EventQueryVote, err) + } + return eventCh +} + +func collectHeightResults(ctx context.Context, t *testing.T, eventCh <-chan timestampedEvent, height int64, proTxHash crypto.ProTxHash) heightResult { + t.Helper() + var res heightResult + for event := range eventCh { + switch v := event.m.Data().(type) { + case types.EventDataVote: + if v.Vote.Height > height { + t.Fatalf("received prevote from unexpected height, expected: %d, saw: %d", height, v.Vote.Height) + } + if !bytes.Equal(proTxHash, v.Vote.ValidatorProTxHash) { + continue + } + if v.Vote.Type != tmproto.PrevoteType { + continue + } + res.prevote = v.Vote + res.prevoteIssuedAt = event.ts + + case types.EventDataCompleteProposal: + if v.Height > height { + t.Fatalf("received proposal from unexpected height, expected: %d, saw: %d", height, v.Height) + } + res.proposalIssuedAt = event.ts + } + if res.isComplete() { + return res + } + } + t.Fatalf("complete height result never seen for height %d", height) + + panic("unreachable") +} + +type timestampedEvent struct { + ts time.Time + m tmpubsub.Message +} + +func (p *pbtsTestHarness) run(ctx context.Context, t *testing.T) resultSet { + startTestRound(ctx, p.observedState, p.currentHeight, p.currentRound) + + r1, proposalBlockTime := p.observedValidatorProposerHeight(ctx, t, p.genesisTime) + p.firstBlockTime = proposalBlockTime + r2 := p.height2(ctx, t) + p.intermediateHeights(ctx, t) + r5, _ := p.height5(ctx, t) + return resultSet{ + genesisHeight: r1, + height2: r2, + height5: r5, + } +} + +type resultSet struct { + genesisHeight heightResult + height2 heightResult + height5 heightResult +} + +type heightResult struct { + proposalIssuedAt time.Time + prevote *types.Vote + prevoteIssuedAt time.Time +} + +func (hr heightResult) isComplete() bool { + return !hr.proposalIssuedAt.IsZero() && !hr.prevoteIssuedAt.IsZero() && hr.prevote != nil +} + +// TestProposerWaitsForGenesisTime tests that a proposer will not propose a block +// until after the genesis time has passed. The test sets the genesis time in the +// future and then ensures that the observed validator waits to propose a block. +func TestProposerWaitsForGenesisTime(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // create a genesis time far (enough) in the future. + initialTime := time.Now().Add(800 * time.Millisecond) + cfg := pbtsTestConfiguration{ + synchronyParams: types.SynchronyParams{ + Precision: 10 * time.Millisecond, + MessageDelay: 10 * time.Millisecond, + }, + timeoutPropose: 10 * time.Millisecond, + genesisTime: initialTime, + height2ProposalTimeDeliveryOffset: 10 * time.Millisecond, + height2ProposedBlockOffset: 10 * time.Millisecond, + height4ProposedBlockOffset: 30 * time.Millisecond, + } + + pbtsTest := newPBTSTestHarness(ctx, t, cfg) + results := pbtsTest.run(ctx, t) + + // ensure that the proposal was issued after the genesis time. + assert.True(t, results.genesisHeight.proposalIssuedAt.After(cfg.genesisTime)) +} + +// TestProposerWaitsForPreviousBlock tests that the proposer of a block waits until +// the block time of the previous height has passed to propose the next block. +// The test harness ensures that the observed validator will be the proposer at +// height 1 and height 5. The test sets the block time of height 4 in the future +// and then verifies that the observed validator waits until after the block time +// of height 4 to propose a block at height 5. +func TestProposerWaitsForPreviousBlock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + initialTime := time.Now().Add(time.Millisecond * 50) + cfg := pbtsTestConfiguration{ + synchronyParams: types.SynchronyParams{ + Precision: 100 * time.Millisecond, + MessageDelay: 500 * time.Millisecond, + }, + timeoutPropose: 50 * time.Millisecond, + genesisTime: initialTime, + height2ProposalTimeDeliveryOffset: 150 * time.Millisecond, + height2ProposedBlockOffset: 100 * time.Millisecond, + height4ProposedBlockOffset: 800 * time.Millisecond, + } + + pbtsTest := newPBTSTestHarness(ctx, t, cfg) + results := pbtsTest.run(ctx, t) + + // the observed validator is the proposer at height 5. + // ensure that the observed validator did not propose a block until after + // the time configured for height 4. + assert.True(t, results.height5.proposalIssuedAt.After(pbtsTest.firstBlockTime.Add(cfg.height4ProposedBlockOffset))) + + // Ensure that the validator issued a prevote for a non-nil block. + assert.NotNil(t, results.height5.prevote.BlockID.Hash) +} + +func TestProposerWaitTime(t *testing.T) { + genesisTime, err := time.Parse(time.RFC3339, "2019-03-13T23:00:00Z") + require.NoError(t, err) + testCases := []struct { + name string + previousBlockTime time.Time + localTime time.Time + expectedWait time.Duration + }{ + { + name: "block time greater than local time", + previousBlockTime: genesisTime.Add(5 * time.Nanosecond), + localTime: genesisTime.Add(1 * time.Nanosecond), + expectedWait: 4 * time.Nanosecond, + }, + { + name: "local time greater than block time", + previousBlockTime: genesisTime.Add(1 * time.Nanosecond), + localTime: genesisTime.Add(5 * time.Nanosecond), + expectedWait: 0, + }, + { + name: "both times equal", + previousBlockTime: genesisTime.Add(5 * time.Nanosecond), + localTime: genesisTime.Add(5 * time.Nanosecond), + expectedWait: 0, + }, + } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + mockSource := new(tmtimemocks.Source) + mockSource.On("Now").Return(testCase.localTime) + + ti := proposerWaitTime(mockSource, testCase.previousBlockTime) + assert.Equal(t, testCase.expectedWait, ti) + }) + } +} + +func TestTimelyProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + initialTime := time.Now() + + cfg := pbtsTestConfiguration{ + synchronyParams: types.SynchronyParams{ + Precision: 10 * time.Millisecond, + MessageDelay: 140 * time.Millisecond, + }, + timeoutPropose: 40 * time.Millisecond, + genesisTime: initialTime, + height2ProposedBlockOffset: 15 * time.Millisecond, + height2ProposalTimeDeliveryOffset: 30 * time.Millisecond, + } + + pbtsTest := newPBTSTestHarness(ctx, t, cfg) + results := pbtsTest.run(ctx, t) + require.NotNil(t, results.height2.prevote.BlockID.Hash) +} + +func TestTooFarInThePastProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // localtime > proposedBlockTime + MsgDelay + Precision + cfg := pbtsTestConfiguration{ + synchronyParams: types.SynchronyParams{ + Precision: 1 * time.Millisecond, + MessageDelay: 10 * time.Millisecond, + }, + timeoutPropose: 50 * time.Millisecond, + height2ProposedBlockOffset: 15 * time.Millisecond, + height2ProposalTimeDeliveryOffset: 27 * time.Millisecond, + } + + pbtsTest := newPBTSTestHarness(ctx, t, cfg) + results := pbtsTest.run(ctx, t) + + require.Nil(t, results.height2.prevote.BlockID.Hash) +} + +func TestTooFarInTheFutureProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // localtime < proposedBlockTime - Precision + cfg := pbtsTestConfiguration{ + synchronyParams: types.SynchronyParams{ + Precision: 1 * time.Millisecond, + MessageDelay: 10 * time.Millisecond, + }, + timeoutPropose: 50 * time.Millisecond, + height2ProposedBlockOffset: 100 * time.Millisecond, + height2ProposalTimeDeliveryOffset: 10 * time.Millisecond, + height4ProposedBlockOffset: 150 * time.Millisecond, + } + + pbtsTest := newPBTSTestHarness(ctx, t, cfg) + results := pbtsTest.run(ctx, t) + + require.Nil(t, results.height2.prevote.BlockID.Hash) +} diff --git a/internal/consensus/peer_state.go b/internal/consensus/peer_state.go index c366fb3228..6bbc088693 100644 --- a/internal/consensus/peer_state.go +++ b/internal/consensus/peer_state.go @@ -1,16 +1,17 @@ package consensus import ( + "context" + "encoding/json" "errors" "fmt" + "sync" "time" "github.com/rs/zerolog" cstypes "github.com/tendermint/tendermint/internal/consensus/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/bits" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -18,14 +19,17 @@ import ( ) var ( - errPeerClosed = errors.New("peer is closed") + ErrPeerStateHeightRegression = errors.New("peer state height regression") + ErrPeerStateInvalidStartTime = errors.New("peer state invalid startTime") + ErrPeerStateSetNilVote = errors.New("peer state set a nil vote") + ErrPeerStateInvalidVoteIndex = errors.New("peer sent a vote with an invalid vote index") ) // peerStateStats holds internal statistics for a peer. type peerStateStats struct { - Votes int `json:"votes"` - Commits int `json:"commits"` - BlockParts int `json:"block_parts"` + Votes int `json:"votes,string"` + BlockParts int `json:"block_parts,string"` + Commits int `json:"commits,string"` } func (pss peerStateStats) String() string { @@ -41,14 +45,14 @@ type PeerState struct { logger log.Logger // NOTE: Modify below using setters, never directly. - mtx tmsync.RWMutex + mtx sync.RWMutex + cancel context.CancelFunc running bool PRS cstypes.PeerRoundState `json:"round_state"` Stats *peerStateStats `json:"stats"` + // ProTxHash is accessible only for the validator ProTxHash types.ProTxHash - - closer *tmsync.Closer } // NewPeerState returns a new PeerState for the given node ID. @@ -56,7 +60,6 @@ func NewPeerState(logger log.Logger, peerID types.NodeID) *PeerState { return &PeerState{ peerID: peerID, logger: logger, - closer: tmsync.NewCloser(), PRS: cstypes.PeerRoundState{ Round: -1, ProposalPOLRound: -1, @@ -105,8 +108,7 @@ func (ps *PeerState) UpdateRoundState(fn func(prs *cstypes.PeerRoundState)) { func (ps *PeerState) ToJSON() ([]byte, error) { ps.mtx.Lock() defer ps.mtx.Unlock() - - return tmjson.Marshal(ps) + return json.Marshal(ps) } // GetHeight returns an atomic snapshot of the PeerRoundState's height used by @@ -403,18 +405,19 @@ func (ps *PeerState) GetProTxHash() types.ProTxHash { } // SetHasVote sets the given vote as known by the peer -func (ps *PeerState) SetHasVote(vote *types.Vote) { +func (ps *PeerState) SetHasVote(vote *types.Vote) error { + // sanity check if vote == nil { - return + return ErrPeerStateSetNilVote } ps.mtx.Lock() defer ps.mtx.Unlock() - ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex) + return ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex) } -func (ps *PeerState) setHasVote(height int64, round int32, voteType tmproto.SignedMsgType, index int32) { - +// setHasVote will return an error when the index exceeds the bitArray length +func (ps *PeerState) setHasVote(height int64, round int32, voteType tmproto.SignedMsgType, index int32) error { ps.logger.Debug( "peerState setHasVote", "peer", ps.peerID, @@ -430,8 +433,12 @@ func (ps *PeerState) setHasVote(height int64, round int32, voteType tmproto.Sign // NOTE: some may be nil BitArrays -> no side effects psVotes := ps.getVoteBitArray(height, round, voteType) if psVotes != nil { - psVotes.SetIndex(int(index), true) + if ok := psVotes.SetIndex(int(index), true); !ok { + // https://github.com/tendermint/tendermint/issues/2871 + return ErrPeerStateInvalidVoteIndex + } } + return nil } // SetHasCommit sets the given vote as known by the peer @@ -561,15 +568,15 @@ func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) { } // ApplyHasVoteMessage updates the peer state for the new vote. -func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) { +func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) error { ps.mtx.Lock() defer ps.mtx.Unlock() if ps.PRS.Height != msg.Height { - return + return nil } - ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index) + return ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index) } // ApplyHasCommitMessage updates the peer state for the new commit. diff --git a/internal/consensus/peer_state_test.go b/internal/consensus/peer_state_test.go new file mode 100644 index 0000000000..97be569ff0 --- /dev/null +++ b/internal/consensus/peer_state_test.go @@ -0,0 +1,101 @@ +package consensus + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +func peerStateSetup(h, r, v int) *PeerState { + ps := NewPeerState(log.NewNopLogger(), "testPeerState") + ps.PRS.Height = int64(h) + ps.PRS.Round = int32(r) + ps.ensureVoteBitArrays(int64(h), v) + return ps +} + +func TestSetHasVote(t *testing.T) { + ps := peerStateSetup(1, 1, 1) + pva := ps.PRS.Prevotes.Copy() + + // nil vote should return ErrPeerStateNilVote + err := ps.SetHasVote(nil) + require.Equal(t, ErrPeerStateSetNilVote, err) + + // the peer giving an invalid index should returns ErrPeerStateInvalidVoteIndex + v0 := &types.Vote{ + Height: 1, + ValidatorIndex: -1, + Round: 1, + Type: tmproto.PrevoteType, + } + + err = ps.SetHasVote(v0) + require.Equal(t, ErrPeerStateInvalidVoteIndex, err) + + // the peer giving an invalid index should returns ErrPeerStateInvalidVoteIndex + v1 := &types.Vote{ + Height: 1, + ValidatorIndex: 1, + Round: 1, + Type: tmproto.PrevoteType, + } + + err = ps.SetHasVote(v1) + require.Equal(t, ErrPeerStateInvalidVoteIndex, err) + + // the peer giving a correct index should return nil (vote has been set) + v2 := &types.Vote{ + Height: 1, + ValidatorIndex: 0, + Round: 1, + Type: tmproto.PrevoteType, + } + require.Nil(t, ps.SetHasVote(v2)) + + // verify vote + pva.SetIndex(0, true) + require.Equal(t, pva, ps.getVoteBitArray(1, 1, tmproto.PrevoteType)) + + // the vote is not in the correct height/round/voteType should return nil (ignore the vote) + v3 := &types.Vote{ + Height: 2, + ValidatorIndex: 0, + Round: 1, + Type: tmproto.PrevoteType, + } + require.Nil(t, ps.SetHasVote(v3)) + // prevote bitarray has no update + require.Equal(t, pva, ps.getVoteBitArray(1, 1, tmproto.PrevoteType)) +} + +func TestApplyHasVoteMessage(t *testing.T) { + ps := peerStateSetup(1, 1, 1) + pva := ps.PRS.Prevotes.Copy() + + // ignore the message with an invalid height + msg := &HasVoteMessage{ + Height: 2, + } + require.Nil(t, ps.ApplyHasVoteMessage(msg)) + + // apply a message like v2 in TestSetHasVote + msg2 := &HasVoteMessage{ + Height: 1, + Index: 0, + Round: 1, + Type: tmproto.PrevoteType, + } + + require.Nil(t, ps.ApplyHasVoteMessage(msg2)) + + // verify vote + pva.SetIndex(0, true) + require.Equal(t, pva, ps.getVoteBitArray(1, 1, tmproto.PrevoteType)) + + // skip test cases like v & v3 in TestSetHasVote due to the same path +} diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 960cf2ee73..217789cf6e 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -1,22 +1,24 @@ package consensus import ( + "context" "errors" "fmt" "runtime/debug" "sync" "time" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" cstypes "github.com/tendermint/tendermint/internal/consensus/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/bits" tmevents "github.com/tendermint/tendermint/libs/events" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" + tmtime "github.com/tendermint/tendermint/libs/time" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -25,66 +27,53 @@ import ( var ( _ service.Service = (*Reactor)(nil) _ p2p.Wrapper = (*tmcons.Message)(nil) +) - // ChannelShims contains a map of ChannelDescriptorShim objects, where each - // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding - // p2p proto.Message the new p2p Channel is responsible for handling. - // - // - // TODO: Remove once p2p refactor is complete. - // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ +// GetChannelDescriptor produces an instance of a descriptor for this +// package's required channels. +func getChannelDescriptors() map[p2p.ChannelID]*p2p.ChannelDescriptor { + return map[p2p.ChannelID]*p2p.ChannelDescriptor{ StateChannel: { - MsgType: new(tmcons.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(StateChannel), - Priority: 8, - SendQueueCapacity: 64, - RecvMessageCapacity: maxMsgSize, - RecvBufferCapacity: 128, - MaxSendBytes: 12000, - }, + ID: StateChannel, + MessageType: new(tmcons.Message), + Priority: 8, + SendQueueCapacity: 64, + RecvMessageCapacity: maxMsgSize, + RecvBufferCapacity: 128, + Name: "state", }, DataChannel: { - MsgType: new(tmcons.Message), - Descriptor: &p2p.ChannelDescriptor{ - // TODO: Consider a split between gossiping current block and catchup - // stuff. Once we gossip the whole block there is nothing left to send - // until next height or round. - ID: byte(DataChannel), - Priority: 12, - SendQueueCapacity: 64, - RecvBufferCapacity: 512, - RecvMessageCapacity: maxMsgSize, - MaxSendBytes: 40000, - }, + // TODO: Consider a split between gossiping current block and catchup + // stuff. Once we gossip the whole block there is nothing left to send + // until next height or round. + ID: DataChannel, + MessageType: new(tmcons.Message), + Priority: 12, + SendQueueCapacity: 64, + RecvBufferCapacity: 512, + RecvMessageCapacity: maxMsgSize, + Name: "data", }, VoteChannel: { - MsgType: new(tmcons.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(VoteChannel), - Priority: 10, - SendQueueCapacity: 64, - RecvBufferCapacity: 4096, - RecvMessageCapacity: maxMsgSize, - MaxSendBytes: 4096, - }, + ID: VoteChannel, + MessageType: new(tmcons.Message), + Priority: 10, + SendQueueCapacity: 64, + RecvBufferCapacity: 4096, + RecvMessageCapacity: maxMsgSize, + Name: "vote", }, VoteSetBitsChannel: { - MsgType: new(tmcons.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(VoteSetBitsChannel), - Priority: 5, - SendQueueCapacity: 8, - RecvBufferCapacity: 128, - RecvMessageCapacity: maxMsgSize, - MaxSendBytes: 50, - }, + ID: VoteSetBitsChannel, + MessageType: new(tmcons.Message), + Priority: 5, + SendQueueCapacity: 8, + RecvBufferCapacity: 128, + RecvMessageCapacity: maxMsgSize, + Name: "voteSet", }, } - - errReactorClosed = errors.New("reactor is closed") -) +} const ( StateChannel = p2p.ChannelID(0x20) @@ -101,12 +90,12 @@ const ( listenerIDConsensus = "consensus-reactor" ) -type ReactorOption func(*Reactor) +var errReactorClosed = errors.New("reactor is closed") // NOTE: Temporary interface for switching to block sync, we should get rid of v0. // See: https://github.com/tendermint/tendermint/issues/4595 type BlockSyncReactor interface { - SwitchToBlockSync(sm.State) error + SwitchToBlockSync(context.Context, sm.State) error GetMaxPeerBlockHeight() int64 @@ -119,34 +108,23 @@ type BlockSyncReactor interface { GetRemainingSyncTime() time.Duration } -//go:generate ../../scripts/mockery_generate.sh ConsSyncReactor -// ConsSyncReactor defines an interface used for testing abilities of node.startStateSync. -type ConsSyncReactor interface { - SwitchToConsensus(sm.State, bool) - SetStateSyncingMetrics(float64) - SetBlockSyncingMetrics(float64) -} - // Reactor defines a reactor for the consensus service. type Reactor struct { service.BaseService + logger log.Logger state *State - eventBus *types.EventBus + eventBus *eventbus.EventBus Metrics *Metrics - mtx tmsync.RWMutex + mtx sync.RWMutex peers map[types.NodeID]*PeerState waitSync bool + rs *cstypes.RoundState readySignal chan struct{} // closed when the node is ready to start consensus - stateCh *p2p.Channel - dataCh *p2p.Channel - voteCh *p2p.Channel - voteSetBitsCh *p2p.Channel - peerUpdates *p2p.PeerUpdates - - closeCh chan struct{} + peerEvents p2p.PeerEventSubscriber + chCreator p2p.ChannelCreator } // NewReactor returns a reference to a new consensus reactor, which implements @@ -156,34 +134,26 @@ type Reactor struct { func NewReactor( logger log.Logger, cs *State, - stateCh *p2p.Channel, - dataCh *p2p.Channel, - voteCh *p2p.Channel, - voteSetBitsCh *p2p.Channel, - peerUpdates *p2p.PeerUpdates, + channelCreator p2p.ChannelCreator, + peerEvents p2p.PeerEventSubscriber, + eventBus *eventbus.EventBus, waitSync bool, - options ...ReactorOption, + metrics *Metrics, ) *Reactor { - r := &Reactor{ - state: cs, - waitSync: waitSync, - peers: make(map[types.NodeID]*PeerState), - Metrics: NopMetrics(), - stateCh: stateCh, - dataCh: dataCh, - voteCh: voteCh, - voteSetBitsCh: voteSetBitsCh, - peerUpdates: peerUpdates, - readySignal: make(chan struct{}), - closeCh: make(chan struct{}), + logger: logger, + state: cs, + waitSync: waitSync, + rs: cs.GetRoundState(), + peers: make(map[types.NodeID]*PeerState), + eventBus: eventBus, + Metrics: metrics, + peerEvents: peerEvents, + chCreator: channelCreator, + readySignal: make(chan struct{}), } r.BaseService = *service.NewBaseService(logger, "Consensus", r) - for _, opt := range options { - opt(r) - } - if !r.waitSync { close(r.readySignal) } @@ -191,32 +161,67 @@ func NewReactor( return r } +type channelBundle struct { + state *p2p.Channel + data *p2p.Channel + vote *p2p.Channel + votSet *p2p.Channel +} + // OnStart starts separate go routines for each p2p Channel and listens for // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. -func (r *Reactor) OnStart() error { - r.Logger.Debug("consensus wait sync", "wait_sync", r.WaitSync()) +func (r *Reactor) OnStart(ctx context.Context) error { + r.logger.Debug("consensus wait sync", "wait_sync", r.WaitSync()) + + peerUpdates := r.peerEvents(ctx) + + var chBundle channelBundle + var err error + + chans := getChannelDescriptors() + chBundle.state, err = r.chCreator(ctx, chans[StateChannel]) + if err != nil { + return err + } + + chBundle.data, err = r.chCreator(ctx, chans[DataChannel]) + if err != nil { + return err + } + + chBundle.vote, err = r.chCreator(ctx, chans[VoteChannel]) + if err != nil { + return err + } + + chBundle.votSet, err = r.chCreator(ctx, chans[VoteSetBitsChannel]) + if err != nil { + return err + } // start routine that computes peer statistics for evaluating peer quality // // TODO: Evaluate if we need this to be synchronized via WaitGroup as to not // leak the goroutine when stopping the reactor. - go r.peerStatsRoutine() + go r.peerStatsRoutine(ctx, peerUpdates) - r.subscribeToBroadcastEvents() + r.subscribeToBroadcastEvents(ctx, chBundle.state) if !r.WaitSync() { - if err := r.state.Start(); err != nil { + if err := r.state.Start(ctx); err != nil { return err } } - go r.processMsgCh(r.stateCh) - go r.processMsgCh(r.dataCh) - go r.processMsgCh(r.voteCh) - go r.processMsgCh(r.voteSetBitsCh) - go r.processPeerUpdates() + go r.updateRoundStateRoutine(ctx) + + go r.processMsgCh(ctx, chBundle.state, chBundle) + go r.processMsgCh(ctx, chBundle.data, chBundle) + go r.processMsgCh(ctx, chBundle.vote, chBundle) + go r.processMsgCh(ctx, chBundle.votSet, chBundle) + go r.processPeerUpdates(ctx, peerUpdates, chBundle) return nil } @@ -225,35 +230,11 @@ func (r *Reactor) OnStart() error { // blocking until they all exit, as well as unsubscribing from events and stopping // state. func (r *Reactor) OnStop() { - - r.unsubscribeFromBroadcastEvents() - - if err := r.state.Stop(); err != nil { - r.Logger.Error("failed to stop consensus state", "err", err) - } + r.state.Stop() if !r.WaitSync() { r.state.Wait() } - - r.mtx.Lock() - // Close and wait for each of the peers to shutdown. - // This is safe to perform with the lock since none of the peers require the - // lock to complete any of the methods that the waitgroup is waiting on. - for _, state := range r.peers { - state.closer.Close() - } - r.mtx.Unlock() - - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) -} - -// SetEventBus sets the reactor's event bus. -func (r *Reactor) SetEventBus(b *types.EventBus) { - r.eventBus = b - r.state.SetEventBus(b) } // WaitSync returns whether the consensus reactor is waiting for state/block sync. @@ -264,15 +245,10 @@ func (r *Reactor) WaitSync() bool { return r.waitSync } -// ReactorMetrics sets the reactor's metrics as an option function. -func ReactorMetrics(metrics *Metrics) ReactorOption { - return func(r *Reactor) { r.Metrics = metrics } -} - // SwitchToConsensus switches from block-sync mode to consensus mode. It resets // the state, turns off block-sync, and starts the consensus state-machine. -func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { - r.Logger.Info("switching to consensus") +func (r *Reactor) SwitchToConsensus(ctx context.Context, state sm.State, skipWAL bool) { + r.logger.Info("switching to consensus") // We have no votes, so reconstruct LastPrecommits from SeenCommit. if state.LastBlockHeight > 0 { @@ -282,6 +258,15 @@ func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { // NOTE: The line below causes broadcastNewRoundStepRoutine() to broadcast a // NewRoundStepMessage. r.state.updateToState(state, nil) + if err := r.state.Start(ctx); err != nil { + panic(fmt.Sprintf(`failed to start consensus state: %v + +conS: +%+v + +conR: +%+v`, err, r.state, r)) + } r.mtx.Lock() r.waitSync = false @@ -295,19 +280,9 @@ func (r *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { r.state.doWALCatchup = false } - if err := r.state.Start(); err != nil { - panic(fmt.Sprintf(`failed to start consensus state: %v - -conS: -%+v - -conR: -%+v`, err, r.state, r)) - } - d := types.EventDataBlockSyncStatus{Complete: true, Height: state.LastBlockHeight} if err := r.eventBus.PublishEventBlockSyncStatus(d); err != nil { - r.Logger.Error("failed to emit the blocksync complete event", "err", err) + r.logger.Error("failed to emit the blocksync complete event", "err", err) } } @@ -321,22 +296,6 @@ func (r *Reactor) String() string { return "ConsensusReactor" } -// StringIndented returns an indented string representation of the Reactor. -func (r *Reactor) StringIndented(indent string) string { - r.mtx.RLock() - defer r.mtx.RUnlock() - - s := "ConsensusReactor{\n" - s += indent + " " + r.state.StringIndented(indent+" ") + "\n" - - for _, ps := range r.peers { - s += indent + " " + ps.StringIndented(indent+" ") + "\n" - } - - s += indent + "}" - return s -} - // GetPeerState returns PeerState for a given NodeID. func (r *Reactor) GetPeerState(peerID types.NodeID) (*PeerState, bool) { r.mtx.RLock() @@ -349,67 +308,97 @@ func (r *Reactor) GetPeerState(peerID types.NodeID) (*PeerState, bool) { // subscribeToBroadcastEvents subscribes for new round steps and votes using the // internal pubsub defined in the consensus state to broadcast them to peers // upon receiving. -func (r *Reactor) subscribeToBroadcastEvents() { +func (r *Reactor) subscribeToBroadcastEvents(ctx context.Context, stateCh *p2p.Channel) { + onStopCh := r.state.getOnStopCh() + err := r.state.evsw.AddListenerForEvent( listenerIDConsensus, types.EventNewRoundStepValue, - func(data tmevents.EventData) { + func(data tmevents.EventData) error { rs := data.(*cstypes.RoundState) - err := r.broadcast(r.stateCh, rs.NewRoundStepMessage()) - r.logResult(err, r.Logger, "broadcasting round step message", "height", rs.Height, "round", rs.Round) + err := r.broadcast(ctx, stateCh, rs.NewRoundStepMessage()) + if err != nil { + return err + } + r.logResult(err, r.logger, "broadcasting round step message", "height", rs.Height, "round", rs.Round) select { - case r.state.onStopCh <- data.(*cstypes.RoundState): + case onStopCh <- data.(*cstypes.RoundState): + return nil + case <-ctx.Done(): + return ctx.Err() default: + return nil } }, ) if err != nil { - r.Logger.Error("failed to add listener for events", "err", err) + r.logger.Error("failed to add listener for events", "err", err) } err = r.state.evsw.AddListenerForEvent( listenerIDConsensus, types.EventValidBlockValue, - func(data tmevents.EventData) { + func(data tmevents.EventData) error { rs := data.(*cstypes.RoundState) - err := r.broadcast(r.stateCh, rs.NewValidBlockMessage()) - r.logResult(err, r.Logger, "broadcasting new valid block message", "height", rs.Height, "round", rs.Round) - + err := r.broadcast(ctx, stateCh, rs.NewValidBlockMessage()) + r.logResult(err, r.logger, "broadcasting new valid block message", "height", rs.Height, "round", rs.Round) + return err }, ) if err != nil { - r.Logger.Error("failed to add listener for events", "err", err) + r.logger.Error("failed to add listener for events", "err", err) } err = r.state.evsw.AddListenerForEvent( listenerIDConsensus, types.EventVoteValue, - func(data tmevents.EventData) { + func(data tmevents.EventData) error { vote := data.(*types.Vote) - err := r.broadcast(r.stateCh, vote.HasVoteMessage()) - r.logResult(err, r.Logger, "broadcasting HasVote message", "height", vote.Height, "round", vote.Round) + err := r.broadcast(ctx, stateCh, vote.HasVoteMessage()) + r.logResult(err, r.logger, "broadcasting HasVote message", "height", vote.Height, "round", vote.Round) + return err }, ) if err != nil { - r.Logger.Error("failed to add listener for events", "err", err) + r.logger.Error("failed to add listener for events", "err", err) } if err := r.state.evsw.AddListenerForEvent(listenerIDConsensus, types.EventCommitValue, - func(data tmevents.EventData) { + func(data tmevents.EventData) error { commit := data.(*types.Commit) - err := r.broadcast(r.stateCh, commit.HasCommitMessage()) - r.logResult(err, r.Logger, "broadcasting HasVote message", "height", commit.Height, "round", commit.Round) + err := r.broadcast(ctx, stateCh, commit.HasCommitMessage()) + r.logResult(err, r.logger, "broadcasting HasVote message", "height", commit.Height, "round", commit.Round) + return err }); err != nil { - r.Logger.Error("Error adding listener for events", "err", err) + r.logger.Error("Error adding listener for events", "err", err) } } -func (r *Reactor) unsubscribeFromBroadcastEvents() { - r.state.evsw.RemoveListener(listenerIDConsensus) +func (r *Reactor) updateRoundStateRoutine(ctx context.Context) { + t := time.NewTicker(100 * time.Microsecond) + defer t.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-t.C: + rs := r.state.GetRoundState() + r.mtx.Lock() + r.rs = rs + r.mtx.Unlock() + } + } } -func (r *Reactor) gossipDataForCatchup(rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState) { - logger := r.Logger.With("height", prs.Height).With("peer", ps.peerID) +func (r *Reactor) getRoundState() *cstypes.RoundState { + r.mtx.RLock() + defer r.mtx.RUnlock() + return r.rs +} + +func (r *Reactor) gossipDataForCatchup(ctx context.Context, rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState, chans channelBundle) { + logger := r.logger.With("height", prs.Height).With("peer", ps.peerID) if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok { // ensure that the peer's PartSetHeader is correct @@ -448,7 +437,7 @@ func (r *Reactor) gossipDataForCatchup(rs *cstypes.RoundState, prs *cstypes.Peer return } - if err := r.sendProposalBlockPart(ps, part, prs.Height, prs.Round); err != nil { + if err := r.sendProposalBlockPart(ctx, chans.data, ps, part, prs.Height, prs.Round); err != nil { logger.Error("cannot send proposal block part to the peer", "error", err) time.Sleep(r.state.config.PeerGossipSleepDuration) } @@ -458,7 +447,7 @@ func (r *Reactor) gossipDataForCatchup(rs *cstypes.RoundState, prs *cstypes.Peer // block parts already delivered - send commits? if rs.Height > 0 && !prs.HasCommit { - if err := r.gossipCommit(rs, ps, prs); err != nil { + if err := r.gossipCommit(ctx, chans.vote, rs, ps, prs); err != nil { logger.Error("cannot gossip commit to peer", "error", err) } else { time.Sleep(r.state.config.PeerGossipSleepDuration) @@ -470,8 +459,11 @@ func (r *Reactor) gossipDataForCatchup(rs *cstypes.RoundState, prs *cstypes.Peer time.Sleep(r.state.config.PeerGossipSleepDuration) } -func (r *Reactor) gossipDataRoutine(ps *PeerState) { - logger := r.Logger.With("peer", ps.peerID) +func (r *Reactor) gossipDataRoutine(ctx context.Context, ps *PeerState, dataCh *p2p.Channel, chans channelBundle) { + logger := r.logger.With("peer", ps.peerID) + + timer := time.NewTimer(0) + defer timer.Stop() OUTER_LOOP: for { @@ -480,20 +472,15 @@ OUTER_LOOP: } select { - case <-r.closeCh: - return - case <-ps.closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. + case <-ctx.Done(): return - default: } - rs := r.state.GetRoundState() + rs := r.getRoundState() prs := ps.GetRoundState() - isValidator := r.isValidator(ps.ProTxHash) + isValidator := r.isValidator(ps.GetProTxHash()) // Send proposal Block parts? if (isValidator && rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader)) || @@ -508,7 +495,7 @@ OUTER_LOOP: if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok { part := rs.ProposalBlockParts.GetPart(index) - if err := r.sendProposalBlockPart(ps, part, prs.Height, prs.Round); err != nil { + if err := r.sendProposalBlockPart(ctx, chans.data, ps, part, prs.Height, prs.Round); err != nil { logger.Error("cannot send proposal block part to the peer", "error", err) time.Sleep(r.state.config.PeerGossipSleepDuration) } @@ -531,7 +518,13 @@ OUTER_LOOP: "blockstoreBase", blockStoreBase, "blockstoreHeight", r.state.blockStore.Height(), ) - time.Sleep(r.state.config.PeerGossipSleepDuration) + + timer.Reset(r.state.config.PeerGossipSleepDuration) + select { + case <-timer.C: + case <-ctx.Done(): + return + } } else { ps.InitProposalBlockParts(blockMeta.BlockID.PartSetHeader) } @@ -541,13 +534,18 @@ OUTER_LOOP: continue OUTER_LOOP } - r.gossipDataForCatchup(rs, prs, ps) + r.gossipDataForCatchup(ctx, rs, prs, ps, chans) continue OUTER_LOOP } // if height and round don't match, sleep if (rs.Height != prs.Height) || (rs.Round != prs.Round) { - time.Sleep(r.state.config.PeerGossipSleepDuration) + timer.Reset(r.state.config.PeerGossipSleepDuration) + select { + case <-timer.C: + case <-ctx.Done(): + return + } continue OUTER_LOOP } @@ -561,7 +559,7 @@ OUTER_LOOP: // Proposal: share the proposal metadata with peer. { propProto := rs.Proposal.ToProto() - err := r.send(ps, r.dataCh, &tmcons.Proposal{ + err := r.send(ctx, ps, dataCh, &tmcons.Proposal{ Proposal: *propProto, }) r.logResult(err, logger, "sending proposal", "height", prs.Height, "round", prs.Round) @@ -579,7 +577,7 @@ OUTER_LOOP: pPol := rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray() pPolProto := pPol.ToProto() - err := r.send(ps, r.dataCh, &tmcons.ProposalPOL{ + err := r.send(ctx, ps, dataCh, &tmcons.ProposalPOL{ Height: rs.Height, ProposalPolRound: rs.Proposal.POLRound, ProposalPol: *pPolProto, @@ -591,24 +589,29 @@ OUTER_LOOP: } // nothing to do -- sleep - time.Sleep(r.state.config.PeerGossipSleepDuration) + timer.Reset(r.state.config.PeerGossipSleepDuration) + select { + case <-timer.C: + case <-ctx.Done(): + return + } continue OUTER_LOOP } } -func (r *Reactor) sendProposalBlockPart(ps *PeerState, part *types.Part, height int64, round int32) error { +func (r *Reactor) sendProposalBlockPart(ctx context.Context, dataCh *p2p.Channel, ps *PeerState, part *types.Part, height int64, round int32) error { partProto, err := part.ToProto() if err != nil { return fmt.Errorf("failed to convert block part to proto, error: %w", err) } - err = r.send(ps, r.dataCh, &tmcons.BlockPart{ + err = r.send(ctx, ps, dataCh, &tmcons.BlockPart{ Height: height, // not our height, so it does not matter Round: round, // not our height, so it does not matter Part: *partProto, }) - r.logResult(err, r.Logger, "sending block part for catchup", "round", round, "height", height, "index", part.Index, "peer", ps.peerID) + r.logResult(err, r.logger, "sending block part for catchup", "round", round, "height", height, "index", part.Index, "peer", ps.peerID) if err == nil { ps.SetHasProposalBlockPart(height, round, int(part.Index)) } @@ -617,60 +620,62 @@ func (r *Reactor) sendProposalBlockPart(ps *PeerState, part *types.Part, height // pickSendVote picks a vote and sends it to the peer. It will return true if // there is a vote to send and false otherwise. -func (r *Reactor) pickSendVote(ps *PeerState, votes types.VoteSetReader) bool { - if vote, ok := ps.PickVoteToSend(votes); ok { - psJSON, _ := ps.ToJSON() - voteProto := vote.ToProto() - err := r.send(ps, r.voteCh, &tmcons.Vote{ - Vote: voteProto, - }) - r.logResult( - err, - r.Logger, - "sending vote message", - "ps", psJSON, - "peer", ps.peerID, - "vote", vote, - "peer_proTxHash", ps.ProTxHash.ShortString(), - "val_proTxHash", vote.ValidatorProTxHash.ShortString(), - "height", vote.Height, - "round", vote.Round, - "size", voteProto.Size(), - "isValidator", r.isValidator(vote.ValidatorProTxHash), - ) - - if err == nil { - ps.SetHasVote(vote) - return true - } +func (r *Reactor) pickSendVote(ctx context.Context, ps *PeerState, votes types.VoteSetReader, voteCh *p2p.Channel) (bool, error) { + vote, ok := ps.PickVoteToSend(votes) + if !ok { + return false, nil + } + + psJSON, _ := ps.ToJSON() + voteProto := vote.ToProto() + err := r.send(ctx, ps, voteCh, &tmcons.Vote{ + Vote: voteProto, + }) + r.logResult( + err, + r.logger, + "sending vote message", + "ps", psJSON, + "peer", ps.peerID, + "vote", vote, + "peer_proTxHash", ps.ProTxHash.ShortString(), + "val_proTxHash", vote.ValidatorProTxHash.ShortString(), + "height", vote.Height, + "round", vote.Round, + "size", voteProto.Size(), + "isValidator", r.isValidator(vote.ValidatorProTxHash), + ) + if err != nil { + return false, err + } + if err := ps.SetHasVote(vote); err != nil { + return false, err } - return false + return true, nil } -func (r *Reactor) sendCommit(ps *PeerState, commit *types.Commit) error { +func (r *Reactor) sendCommit(ctx context.Context, ps *PeerState, commit *types.Commit, voteCh *p2p.Channel) error { if commit == nil { return fmt.Errorf("attempt to send nil commit to peer %s", ps.peerID) } protoCommit := commit.ToProto() - err := r.send(ps, r.voteCh, &tmcons.Commit{ + err := r.send(ctx, ps, voteCh, &tmcons.Commit{ Commit: protoCommit, }) - r.logResult(err, r.Logger, "sending commit message", "height", commit.Height, "round", commit.Round, "peer", ps.peerID) + r.logResult(err, r.logger, "sending commit message", "height", commit.Height, "round", commit.Round, "peer", ps.peerID) return err } // send sends a message to provided channel. // If to is nil, message will be broadcasted. -func (r *Reactor) send(ps *PeerState, channel *p2p.Channel, msg proto.Message) error { +func (r *Reactor) send(ctx context.Context, ps *PeerState, channel *p2p.Channel, msg proto.Message) error { select { - case <-ps.closer.Done(): - return errPeerClosed - case <-r.closeCh: + case <-ctx.Done(): return errReactorClosed default: - return channel.Send(p2p.Envelope{ + return channel.Send(ctx, p2p.Envelope{ To: ps.peerID, Message: msg, }) @@ -678,12 +683,12 @@ func (r *Reactor) send(ps *PeerState, channel *p2p.Channel, msg proto.Message) e } // broadcast sends a broadcast message to all peers connected to the `channel`. -func (r *Reactor) broadcast(channel *p2p.Channel, msg proto.Message) error { +func (r *Reactor) broadcast(ctx context.Context, channel *p2p.Channel, msg proto.Message) error { select { - case <-r.closeCh: + case <-ctx.Done(): return errReactorClosed default: - return channel.Send(p2p.Envelope{ + return channel.Send(ctx, p2p.Envelope{ Broadcast: true, Message: msg, }) @@ -696,71 +701,91 @@ func (r *Reactor) logResult(err error, logger log.Logger, message string, keyval logger.Debug("error "+message, append(keyvals, "error", err)) return false } - logger.Debug("success "+message, keyvals...) return true } -func (r *Reactor) gossipVotesForHeight(rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState) bool { - logger := r.Logger.With("height", prs.Height).With("peer", ps.peerID) +//func (r *Reactor) gossipVotesForHeight(rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState) bool { +// logger := r.Logger.With("height", prs.Height).With("peer", ps.peerID) +func (r *Reactor) gossipVotesForHeight( + ctx context.Context, + rs *cstypes.RoundState, + prs *cstypes.PeerRoundState, + ps *PeerState, + voteCh *p2p.Channel, +) (bool, error) { + logger := r.logger.With("height", prs.Height).With("peer", ps.peerID) // If there are lastPrecommits to send... if prs.Step == cstypes.RoundStepNewHeight { - if r.pickSendVote(ps, rs.LastPrecommits) { + if ok, err := r.pickSendVote(ctx, ps, rs.LastPrecommits, voteCh); err != nil { logger.Debug("picked previous precommit vote to send") - return true + return false, err + } else if ok { + logger.Debug("picked rs.LastPrecommits to send") + return true, nil } } // if there are POL prevotes to send... if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 { if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { - if r.pickSendVote(ps, polPrevotes) { + if ok, err := r.pickSendVote(ctx, ps, polPrevotes, voteCh); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.Prevotes(prs.ProposalPOLRound) to send", "round", prs.ProposalPOLRound) - return true + return true, nil } } } // if there are prevotes to send... if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round { - if r.pickSendVote(ps, rs.Votes.Prevotes(prs.Round)) { + if ok, err := r.pickSendVote(ctx, ps, rs.Votes.Prevotes(prs.Round), voteCh); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.Prevotes(prs.Round) to send", "round", prs.Round) - return true + return true, nil } } // if there are precommits to send... if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round { - if r.pickSendVote(ps, rs.Votes.Precommits(prs.Round)) { + if ok, err := r.pickSendVote(ctx, ps, rs.Votes.Precommits(prs.Round), voteCh); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.Precommits(prs.Round) to send", "round", prs.Round) - return true + return true, nil } } // if there are prevotes to send...(which are needed because of validBlock mechanism) if prs.Round != -1 && prs.Round <= rs.Round { - if r.pickSendVote(ps, rs.Votes.Prevotes(prs.Round)) { + if ok, err := r.pickSendVote(ctx, ps, rs.Votes.Prevotes(prs.Round), voteCh); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.Prevotes(prs.Round) to send", "round", prs.Round) - return true + return true, nil } } // if there are POLPrevotes to send... if prs.ProposalPOLRound != -1 { if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { - if r.pickSendVote(ps, polPrevotes) { + if ok, err := r.pickSendVote(ctx, ps, polPrevotes, voteCh); err != nil { + return false, err + } else if ok { logger.Debug("picked rs.Prevotes(prs.ProposalPOLRound) to send", "round", prs.ProposalPOLRound) - return true + return true, nil } } } - return false + return false, nil } // gossipCommit sends a commit to the peer -func (r *Reactor) gossipCommit(rs *cstypes.RoundState, ps *PeerState, prs *cstypes.PeerRoundState) error { +func (r *Reactor) gossipCommit(ctx context.Context, voteCh *p2p.Channel, rs *cstypes.RoundState, ps *PeerState, prs *cstypes.PeerRoundState) error { // logger := r.Logger.With("height", rs.Height, "peer_height", prs.Height, "peer", ps.peerID) var commit *types.Commit blockStoreBase := r.state.blockStore.Base() @@ -774,10 +799,16 @@ func (r *Reactor) gossipCommit(rs *cstypes.RoundState, ps *PeerState, prs *cstyp } if commit == nil { + //r.logger.Debug("cannot find a block", + // "prs.Height", prs.Height, + // "rs.Height", rs.Height, + // "blockStoreBase", blockStoreBase, + // "stack", string(debug.Stack()), + //) return fmt.Errorf("commit at height %d not found", prs.Height) } - if err := r.sendCommit(ps, commit); err != nil { + if err := r.sendCommit(ctx, ps, commit, voteCh); err != nil { return fmt.Errorf("failed to send commit to peer: %w", err) } @@ -785,43 +816,32 @@ func (r *Reactor) gossipCommit(rs *cstypes.RoundState, ps *PeerState, prs *cstyp return nil // success } -func (r *Reactor) gossipVotesAndCommitRoutine(ps *PeerState) { - logger := r.Logger.With("peer", ps.peerID) +func (r *Reactor) gossipVotesAndCommitRoutine(ctx context.Context, voteCh *p2p.Channel, ps *PeerState) { + logger := r.logger.With("peer", ps.peerID) - // XXX: simple hack to throttle logs upon sleep - logThrottle := 0 + timer := time.NewTimer(0) + defer timer.Stop() -OUTER_LOOP: for { if !r.IsRunning() { return } select { - case <-ps.closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. + case <-ctx.Done(): return - default: } - rs := r.state.GetRoundState() + rs := r.getRoundState() prs := ps.GetRoundState() isValidator := r.isValidator(ps.GetProTxHash()) - switch logThrottle { - case 1: // first sleep - logThrottle = 2 - case 2: // no more sleep - logThrottle = 0 - } - // If there are lastCommits to send... //prs.Step == cstypes.RoundStepNewHeight && if prs.Height > 0 && prs.Height+1 == rs.Height && !prs.HasCommit { - if err := r.gossipCommit(rs, ps, prs); err != nil { + if err := r.gossipCommit(ctx, voteCh, rs, ps, prs); err != nil { logger.Error("cannot send LastCommit to peer node", "error", err) } else { logger.Info("sending LastCommit to peer node", "peer_height", prs.Height) @@ -831,8 +851,10 @@ OUTER_LOOP: // if height matches, then send LastCommit, Prevotes, and Precommits if isValidator && rs.Height == prs.Height { - if r.gossipVotesForHeight(rs, prs, ps) { - continue OUTER_LOOP + if ok, err := r.gossipVotesForHeight(ctx, rs, prs, ps, voteCh); err != nil { + return + } else if ok { + continue } } @@ -841,51 +863,36 @@ OUTER_LOOP: // so we might need to resend it until it notifies us that it's all right blockStoreBase := r.state.blockStore.Base() if rs.Height >= prs.Height+2 && prs.Height >= blockStoreBase && !prs.HasCommit { - if err := r.gossipCommit(rs, ps, prs); err != nil { + if err := r.gossipCommit(ctx, voteCh, rs, ps, prs); err != nil { logger.Error("cannot gossip commit to peer", "error", err) } } - if logThrottle == 0 { - // we sent nothing -- sleep - logThrottle = 1 - logger.Debug( - "no votes to send; sleeping", - "peer_protxhash", ps.ProTxHash, - "rs.Height", rs.Height, - "prs.Height", prs.Height, - "localPV", rs.Votes.Prevotes(rs.Round).BitArray(), "peerPV", prs.Prevotes, - "localPC", rs.Votes.Precommits(rs.Round).BitArray(), "peerPC", prs.Precommits, - "isValidator", isValidator, - "validators", rs.Validators, - ) - } else if logThrottle == 2 { - logThrottle = 1 + timer.Reset(r.state.config.PeerGossipSleepDuration) + select { + case <-ctx.Done(): + return + case <-timer.C: } - - time.Sleep(r.state.config.PeerGossipSleepDuration) - continue OUTER_LOOP } } // NOTE: `queryMaj23Routine` has a simple crude design since it only comes // into play for liveness when there's a signature DDoS attack happening. -func (r *Reactor) queryMaj23Routine(ps *PeerState) { +func (r *Reactor) queryMaj23Routine(ctx context.Context, stateCh *p2p.Channel, ps *PeerState) { timer := time.NewTimer(0) defer timer.Stop() -OUTER_LOOP: + ctx, cancel := context.WithCancel(ctx) + defer cancel() + for { if !ps.IsRunning() { return } select { - case <-r.closeCh: - return - case <-ps.closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. + case <-ctx.Done(): return case <-timer.C: } @@ -897,16 +904,13 @@ OUTER_LOOP: // If peer is not a validator, we do nothing if !r.isValidator(ps.ProTxHash) { time.Sleep(r.state.config.PeerQueryMaj23SleepDuration) - continue OUTER_LOOP + continue } - // maybe send Height/Round/Prevotes - - rs := r.state.GetRoundState() - prs := ps.GetRoundState() - - // TODO create more reliable coppies of these + // TODO create more reliable copies of these // structures so the following go routines don't race + rs := r.getRoundState() + prs := ps.GetRoundState() wg := &sync.WaitGroup{} @@ -917,74 +921,79 @@ OUTER_LOOP: // maybe send Height/Round/Prevotes if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { - err := r.send(ps, r.stateCh, &tmcons.VoteSetMaj23{ + err := r.send(ctx, ps, stateCh, &tmcons.VoteSetMaj23{ Height: prs.Height, Round: prs.Round, Type: tmproto.PrevoteType, BlockID: maj23.ToProto(), }) - r.logResult(err, r.Logger, "sending prevotes", "height", prs.Height, "round", prs.Round) + if err != nil { + cancel() + } + r.logResult(err, r.logger, "sending prevotes", "height", prs.Height, "round", prs.Round) } }(rs, prs) - } - wg.Add(1) - go func(rs *cstypes.RoundState, prs *cstypes.PeerRoundState) { - defer wg.Done() - - // maybe send Height/Round/Precommits - if rs.Height == prs.Height { - if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { - err := r.send(ps, r.stateCh, &tmcons.VoteSetMaj23{ - Height: prs.Height, - Round: prs.Round, - Type: tmproto.PrecommitType, - BlockID: maj23.ToProto(), - }, - ) - r.logResult(err, r.Logger, "sending precommits", "height", prs.Height, "round", prs.Round) - time.Sleep(r.state.config.PeerQueryMaj23SleepDuration) - } - } - }(rs, prs) + if prs.ProposalPOLRound >= 0 { + wg.Add(1) + go func(rs *cstypes.RoundState, prs *cstypes.PeerRoundState) { + defer wg.Done() - if prs.ProposalPOLRound >= 0 { - wg.Add(1) - go func(rs *cstypes.RoundState, prs *cstypes.PeerRoundState) { - defer wg.Done() - // maybe send Height/Round/ProposalPOL - if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { + // maybe send Height/Round/ProposalPOL if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { - err := r.send(ps, r.stateCh, &tmcons.VoteSetMaj23{ + err := r.send(ctx, ps, stateCh, &tmcons.VoteSetMaj23{ Height: prs.Height, Round: prs.ProposalPOLRound, Type: tmproto.PrevoteType, BlockID: maj23.ToProto(), }) - r.logResult(err, r.Logger, "sending POL prevotes", "height", prs.Height, "round", prs.Round) + if err != nil { + cancel() + } + } + }(rs, prs) + } + + wg.Add(1) + go func(rs *cstypes.RoundState, prs *cstypes.PeerRoundState) { + defer wg.Done() + + // maybe send Height/Round/Precommits + if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { + err := r.send(ctx, ps, stateCh, &tmcons.VoteSetMaj23{ + Height: prs.Height, + Round: prs.Round, + Type: tmproto.PrecommitType, + BlockID: maj23.ToProto(), + }) + if err != nil { + cancel() } + r.logResult(err, r.logger, "sending precommits", "height", prs.Height, "round", prs.Round) } }(rs, prs) + } - // Little point sending LastCommitRound/LastCommit, these are fleeting and - // non-blocking. + // Little point sending LastCommitRound/LastCommit, these are fleeting and + // non-blocking. + if prs.CatchupCommitRound != -1 && prs.Height > 0 { wg.Add(1) go func(rs *cstypes.RoundState, prs *cstypes.PeerRoundState) { defer wg.Done() - // maybe send Height/CatchupCommitRound/CatchupCommit - if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= r.state.blockStore.Height() && - prs.Height >= r.state.blockStore.Base() { + if prs.Height <= r.state.blockStore.Height() && prs.Height >= r.state.blockStore.Base() { + // maybe send Height/CatchupCommitRound/CatchupCommit if commit := r.state.LoadCommit(prs.Height); commit != nil { - err := r.send(ps, r.stateCh, &tmcons.VoteSetMaj23{ + err := r.send(ctx, ps, stateCh, &tmcons.VoteSetMaj23{ Height: prs.Height, Round: commit.Round, Type: tmproto.PrecommitType, BlockID: commit.BlockID.ToProto(), }) - r.logResult(err, r.Logger, "sending catchup precommits", "height", prs.Height, "round", prs.Round) - - time.Sleep(r.state.config.PeerQueryMaj23SleepDuration) + if err != nil { + cancel() + } + r.logResult(err, r.logger, "sending catchup precommits", "height", prs.Height, "round", prs.Round) } } }(rs, prs) @@ -994,14 +1003,10 @@ OUTER_LOOP: go func() { defer close(waitSignal); wg.Wait() }() select { - case <-r.closeCh: - return - case <-ps.closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. - return case <-waitSignal: timer.Reset(r.state.config.PeerQueryMaj23SleepDuration) + case <-ctx.Done(): + return } } } @@ -1016,28 +1021,28 @@ func (r *Reactor) isValidator(proTxHash types.ProTxHash) bool { // be the case, and we spawn all the relevant goroutine to broadcast messages to // the peer. During peer removal, we remove the peer for our set of peers and // signal to all spawned goroutines to gracefully exit in a non-blocking manner. -func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status, "peer_protxhash", peerUpdate.ProTxHash.ShortString()) +func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, chans channelBundle) { + r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status, + "peer_proTxHash", peerUpdate.ProTxHash.ShortString()) switch peerUpdate.Status { case p2p.PeerStatusUp: // Do not allow starting new broadcasting goroutines after reactor shutdown // has been initiated. This can happen after we've manually closed all - // peer goroutines and closed r.closeCh, but the router still sends in-flight - // peer updates. + // peer goroutines, but the router still sends in-flight peer updates. if !r.IsRunning() { return } - r.peerUp(peerUpdate, 3) + r.peerUp(ctx, peerUpdate, 3, chans) case p2p.PeerStatusDown: - r.peerDown(peerUpdate) + r.peerDown(ctx, peerUpdate, chans) } } // peerUp starts the peer. It recursively retries up to `retries` times if the peer is already closing. -func (r *Reactor) peerUp(peerUpdate p2p.PeerUpdate, retries int) { +func (r *Reactor) peerUp(ctx context.Context, peerUpdate p2p.PeerUpdate, retries int, chans channelBundle) { if retries < 1 { - r.Logger.Error("peer up failed: max retries exceeded", "peer", peerUpdate.NodeID) + r.logger.Error("peer up failed: max retries exceeded", "peer", peerUpdate.NodeID) return } @@ -1046,7 +1051,7 @@ func (r *Reactor) peerUp(peerUpdate p2p.PeerUpdate, retries int) { ps, ok := r.peers[peerUpdate.NodeID] if !ok { - ps = NewPeerState(r.Logger, peerUpdate.NodeID) + ps = NewPeerState(r.logger, peerUpdate.NodeID) ps.SetProTxHash(peerUpdate.ProTxHash) r.peers[peerUpdate.NodeID] = ps } else if len(peerUpdate.ProTxHash) > 0 { @@ -1054,12 +1059,12 @@ func (r *Reactor) peerUp(peerUpdate p2p.PeerUpdate, retries int) { } select { - case <-ps.closer.Done(): + case <-ctx.Done(): // Hmm, someone is closing this peer right now, let's wait and retry // Note: we run this in a goroutine to not block main goroutine in ps.broadcastWG.Wait() go func() { time.Sleep(r.state.config.PeerGossipSleepDuration) - r.peerUp(peerUpdate, retries-1) + r.peerUp(ctx, peerUpdate, retries-1, chans) }() return default: @@ -1071,52 +1076,51 @@ func (r *Reactor) peerUp(peerUpdate p2p.PeerUpdate, retries int) { // do not spawn multiple instances of the same goroutines and finally we // set the waitgroup counter so we know when all goroutines have exited. ps.SetRunning(true) + ctx, ps.cancel = context.WithCancel(ctx) go func() { select { - case <-r.closeCh: + case <-ctx.Done(): return case <-r.readySignal: - // do nothing if the peer has - // stopped while we've been waiting. - if !ps.IsRunning() { - return - } - // start goroutines for this peer - go r.gossipDataRoutine(ps) - go r.gossipVotesAndCommitRoutine(ps) - go r.queryMaj23Routine(ps) - - // Send our state to the peer. If we're block-syncing, broadcast a - // RoundStepMessage later upon SwitchToConsensus(). - if !r.WaitSync() { - go func() { - rs := r.state.GetRoundState() - err := r.send(ps, r.stateCh, rs.NewRoundStepMessage()) - r.logResult(err, r.Logger, "sending round step msg", "height", rs.Height, "round", rs.Round) - - }() - } + } + // do nothing if the peer has + // stopped while we've been waiting. + if !ps.IsRunning() { + return + } + // start goroutines for this peer + go r.gossipDataRoutine(ctx, ps, chans.data, chans) + go r.gossipVotesAndCommitRoutine(ctx, chans.vote, ps) + go r.queryMaj23Routine(ctx, chans.state, ps) + + // Send our state to the peer. If we're block-syncing, broadcast a + // RoundStepMessage later upon SwitchToConsensus(). + if !r.WaitSync() { + go func() { + rs := r.state.GetRoundState() + err := r.send(ctx, ps, chans.state, rs.NewRoundStepMessage()) + r.logResult(err, r.logger, "sending round step msg", "height", rs.Height, "round", rs.Round) + }() } }() } } -func (r *Reactor) peerDown(peerUpdate p2p.PeerUpdate) { - r.mtx.Lock() - defer r.mtx.Unlock() - +func (r *Reactor) peerDown(ctx context.Context, peerUpdate p2p.PeerUpdate, chans channelBundle) { + r.mtx.RLock() ps, ok := r.peers[peerUpdate.NodeID] + r.mtx.RUnlock() + if ok && ps.IsRunning() { // signal to all spawned goroutines for the peer to gracefully exit - ps.closer.Close() - go func() { r.mtx.Lock() delete(r.peers, peerUpdate.NodeID) r.mtx.Unlock() ps.SetRunning(false) + ps.cancel() }() } } @@ -1126,10 +1130,10 @@ func (r *Reactor) peerDown(peerUpdate p2p.PeerUpdate) { // If we fail to find the peer state for the envelope sender, we perform a no-op // and return. This can happen when we process the envelope after the peer is // removed. -func (r *Reactor) handleStateMessage(envelope p2p.Envelope, msgI Message) error { +func (r *Reactor) handleStateMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message, voteSetCh *p2p.Channel) error { ps, ok := r.GetPeerState(envelope.From) if !ok || ps == nil { - r.Logger.Debug("failed to find peer state", "peer", envelope.From, "ch_id", "StateChannel") + r.logger.Debug("failed to find peer state", "peer", envelope.From, "ch_id", "StateChannel") return nil } @@ -1138,7 +1142,7 @@ func (r *Reactor) handleStateMessage(envelope p2p.Envelope, msgI Message) error initialHeight := r.state.InitialHeight() if err := msgI.(*NewRoundStepMessage).ValidateHeight(initialHeight); err != nil { - r.Logger.Error("peer sent us an invalid msg", "msg", msg, "err", err) + r.logger.Error("peer sent us an invalid msg", "msg", msg, "err", err) return err } @@ -1151,8 +1155,10 @@ func (r *Reactor) handleStateMessage(envelope p2p.Envelope, msgI Message) error ps.ApplyHasCommitMessage(msgI.(*HasCommitMessage)) case *tmcons.HasVote: - ps.ApplyHasVoteMessage(msgI.(*HasVoteMessage)) - + if err := ps.ApplyHasVoteMessage(msgI.(*HasVoteMessage)); err != nil { + r.logger.Error("applying HasVote message", "msg", msg, "err", err) + return err + } case *tmcons.VoteSetMaj23: height := r.state.CurrentHeight() votes := r.state.HeightVoteSet() @@ -1194,9 +1200,11 @@ func (r *Reactor) handleStateMessage(envelope p2p.Envelope, msgI Message) error eMsg.Votes = *votesProto } - r.voteSetBitsCh.Out <- p2p.Envelope{ + if err := voteSetCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: eMsg, + }); err != nil { + return err } default: @@ -1210,12 +1218,12 @@ func (r *Reactor) handleStateMessage(envelope p2p.Envelope, msgI Message) error // fail to find the peer state for the envelope sender, we perform a no-op and // return. This can happen when we process the envelope after the peer is // removed. -func (r *Reactor) handleDataMessage(envelope p2p.Envelope, msgI Message) error { - logger := r.Logger.With("peer", envelope.From, "ch_id", "DataChannel") +func (r *Reactor) handleDataMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message) error { + logger := r.logger.With("peer", envelope.From, "ch_id", "DataChannel") ps, ok := r.GetPeerState(envelope.From) if !ok || ps == nil { - r.Logger.Debug("failed to find peer state") + r.logger.Debug("failed to find peer state") return nil } @@ -1231,17 +1239,24 @@ func (r *Reactor) handleDataMessage(envelope p2p.Envelope, msgI Message) error { pMsg := msgI.(*ProposalMessage) ps.SetHasProposal(pMsg.Proposal) - r.state.peerMsgQueue <- msgInfo{pMsg, envelope.From} - + select { + case <-ctx.Done(): + return ctx.Err() + case r.state.peerMsgQueue <- msgInfo{pMsg, envelope.From, tmtime.Now()}: + } case *tmcons.ProposalPOL: ps.ApplyProposalPOLMessage(msgI.(*ProposalPOLMessage)) - case *tmcons.BlockPart: bpMsg := msgI.(*BlockPartMessage) ps.SetHasProposalBlockPart(bpMsg.Height, bpMsg.Round, int(bpMsg.Part.Index)) r.Metrics.BlockParts.With("peer_id", string(envelope.From)).Add(1) - r.state.peerMsgQueue <- msgInfo{bpMsg, envelope.From} + select { + case r.state.peerMsgQueue <- msgInfo{bpMsg, envelope.From, tmtime.Now()}: + return nil + case <-ctx.Done(): + return ctx.Err() + } default: return fmt.Errorf("received unknown message on DataChannel: %T", msg) @@ -1254,8 +1269,8 @@ func (r *Reactor) handleDataMessage(envelope p2p.Envelope, msgI Message) error { // fail to find the peer state for the envelope sender, we perform a no-op and // return. This can happen when we process the envelope after the peer is // removed. -func (r *Reactor) handleVoteMessage(envelope p2p.Envelope, msgI Message) error { - logger := r.Logger.With("peer", envelope.From, "ch_id", "VoteChannel") +func (r *Reactor) handleVoteMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message) error { + logger := r.logger.With("peer", envelope.From, "ch_id", "VoteChannel") ps, ok := r.GetPeerState(envelope.From) if !ok || ps == nil { @@ -1269,6 +1284,7 @@ func (r *Reactor) handleVoteMessage(envelope p2p.Envelope, msgI Message) error { } logger.Debug("vote channel processing", "msg", envelope.Message, "type", fmt.Sprintf("%T", envelope.Message)) + switch msg := envelope.Message.(type) { case *tmcons.Commit: c, err := types.CommitFromProto(msg.Commit) @@ -1278,7 +1294,7 @@ func (r *Reactor) handleVoteMessage(envelope p2p.Envelope, msgI Message) error { ps.SetHasCommit(c) cMsg := msgI.(*CommitMessage) - r.state.peerMsgQueue <- msgInfo{cMsg, envelope.From} + r.state.peerMsgQueue <- msgInfo{cMsg, envelope.From, tmtime.Now()} case *tmcons.Vote: r.state.mtx.RLock() isValidator := r.state.Validators.HasProTxHash(r.state.privValidatorProTxHash) @@ -1290,9 +1306,16 @@ func (r *Reactor) handleVoteMessage(envelope p2p.Envelope, msgI Message) error { ps.EnsureVoteBitArrays(height, valSize) ps.EnsureVoteBitArrays(height-1, lastCommitSize) - ps.SetHasVote(vMsg.Vote) + if err := ps.SetHasVote(vMsg.Vote); err != nil { + return err + } - r.state.peerMsgQueue <- msgInfo{vMsg, envelope.From} + select { + case r.state.peerMsgQueue <- msgInfo{vMsg, envelope.From, tmtime.Now()}: + return nil + case <-ctx.Done(): + return ctx.Err() + } } default: return fmt.Errorf("received unknown message on VoteChannel: %T", msg) @@ -1305,12 +1328,12 @@ func (r *Reactor) handleVoteMessage(envelope p2p.Envelope, msgI Message) error { // VoteSetBitsChannel. If we fail to find the peer state for the envelope sender, // we perform a no-op and return. This can happen when we process the envelope // after the peer is removed. -func (r *Reactor) handleVoteSetBitsMessage(envelope p2p.Envelope, msgI Message) error { - logger := r.Logger.With("peer", envelope.From, "ch_id", "VoteSetBitsChannel") +func (r *Reactor) handleVoteSetBitsMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message) error { + logger := r.logger.With("peer", envelope.From, "ch_id", "VoteSetBitsChannel") ps, ok := r.GetPeerState(envelope.From) if !ok || ps == nil { - r.Logger.Debug("failed to find peer state") + r.logger.Debug("failed to find peer state") return nil } @@ -1363,11 +1386,11 @@ func (r *Reactor) handleVoteSetBitsMessage(envelope p2p.Envelope, msgI Message) // the p2p channel. // // NOTE: We block on consensus state for proposals, block parts, and votes. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, chans channelBundle) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) - r.Logger.Error( + r.logger.Error( "recovering from processing message panic", "err", err, "stack", string(debug.Stack()), @@ -1381,32 +1404,29 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err // and because a large part of the core business logic depends on these // domain types opposed to simply working with the Proto types. protoMsg := new(tmcons.Message) - if err := protoMsg.Wrap(envelope.Message); err != nil { + if err = protoMsg.Wrap(envelope.Message); err != nil { return err } - msgI, err := MsgFromProto(protoMsg) + var msgI Message + msgI, err = MsgFromProto(protoMsg) if err != nil { return err } - // r.Logger.Debug("received message on channel", "ch_id", chID, "msg", msgI, "peer", envelope.From, "type", fmt.Sprintf("%T", msgI)) + //r.logger.Debug("received message", "ch_id", envelope.ChannelID, "message", msgI, "peer", envelope.From) - switch chID { + switch envelope.ChannelID { case StateChannel: - err = r.handleStateMessage(envelope, msgI) - + err = r.handleStateMessage(ctx, envelope, msgI, chans.votSet) case DataChannel: - err = r.handleDataMessage(envelope, msgI) - + err = r.handleDataMessage(ctx, envelope, msgI) case VoteChannel: - err = r.handleVoteMessage(envelope, msgI) - + err = r.handleVoteMessage(ctx, envelope, msgI) case VoteSetBitsChannel: - err = r.handleVoteSetBitsMessage(envelope, msgI) - + err = r.handleVoteSetBitsMessage(ctx, envelope, msgI) default: - err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) + err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", envelope.ChannelID, envelope) } return err @@ -1417,20 +1437,17 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err // Any error encountered during message execution will result in a PeerError being sent // on the StateChannel or DataChannel or VoteChannel or VoteSetBitsChannel. // When the reactor is stopped, we will catch the signal and close the p2p Channel gracefully. -func (r *Reactor) processMsgCh(msgCh *p2p.Channel) { - defer msgCh.Close() - for { - select { - case <-r.closeCh: - return - default: - envelope := <-msgCh.In - if err := r.handleMessage(msgCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", msgCh.ID, "envelope", envelope, "err", err) - msgCh.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } +func (r *Reactor) processMsgCh(ctx context.Context, msgCh *p2p.Channel, chBundle channelBundle) { + iter := msgCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, envelope, chBundle); err != nil { + r.logger.Error("failed to process message", "ch_id", envelope.ChannelID, "envelope", envelope, "err", err) + if serr := msgCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return } } } @@ -1439,25 +1456,21 @@ func (r *Reactor) processMsgCh(msgCh *p2p.Channel) { // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates() { - defer r.peerUpdates.Close() - +func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, chans channelBundle) { for { select { - case peerUpdate := <-r.peerUpdates.Updates(): - r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.Logger.Debug("stopped listening on peer updates channel; closing...") + case <-ctx.Done(): return + case peerUpdate := <-peerUpdates.Updates(): + r.processPeerUpdate(ctx, peerUpdate, chans) } } } -func (r *Reactor) peerStatsRoutine() { +func (r *Reactor) peerStatsRoutine(ctx context.Context, peerUpdates *p2p.PeerUpdates) { for { if !r.IsRunning() { - r.Logger.Info("stopping peerStatsRoutine") + r.logger.Info("stopping peerStatsRoutine") return } @@ -1465,14 +1478,14 @@ func (r *Reactor) peerStatsRoutine() { case msg := <-r.state.statsMsgQueue: ps, ok := r.GetPeerState(msg.PeerID) if !ok || ps == nil { - r.Logger.Debug("attempt to update stats for non-existent peer", "peer", msg.PeerID) + r.logger.Debug("attempt to update stats for non-existent peer", "peer", msg.PeerID) continue } switch msg.Msg.(type) { case *CommitMessage: if numCommits := ps.RecordCommit(); numCommits%commitsToContributeToBecomeGoodPeer == 0 { - r.peerUpdates.SendUpdate(p2p.PeerUpdate{ + peerUpdates.SendUpdate(ctx, p2p.PeerUpdate{ NodeID: msg.PeerID, Status: p2p.PeerStatusGood, }) @@ -1480,7 +1493,7 @@ func (r *Reactor) peerStatsRoutine() { case *VoteMessage: if numVotes := ps.RecordVote(); numVotes%votesToContributeToBecomeGoodPeer == 0 { - r.peerUpdates.SendUpdate(p2p.PeerUpdate{ + peerUpdates.SendUpdate(ctx, p2p.PeerUpdate{ NodeID: msg.PeerID, Status: p2p.PeerStatusGood, }) @@ -1488,13 +1501,13 @@ func (r *Reactor) peerStatsRoutine() { case *BlockPartMessage: if numParts := ps.RecordBlockPart(); numParts%blocksToContributeToBecomeGoodPeer == 0 { - r.peerUpdates.SendUpdate(p2p.PeerUpdate{ + peerUpdates.SendUpdate(ctx, p2p.PeerUpdate{ NodeID: msg.PeerID, Status: p2p.PeerStatusGood, }) } } - case <-r.closeCh: + case <-ctx.Done(): return } } diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index f549be4873..f6e3661bd8 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -3,9 +3,9 @@ package consensus import ( "bytes" "context" + "errors" "fmt" "os" - "path" "sync" "testing" "time" @@ -22,17 +22,16 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/dash/llmq" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/mempool" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" statemocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" "github.com/tendermint/tendermint/types" ) @@ -45,21 +44,29 @@ type reactorTestSuite struct { network *p2ptest.Network states map[types.NodeID]*State reactors map[types.NodeID]*Reactor - subs map[types.NodeID]types.Subscription - blocksyncSubs map[types.NodeID]types.Subscription + subs map[types.NodeID]eventbus.Subscription + blocksyncSubs map[types.NodeID]eventbus.Subscription stateChannels map[types.NodeID]*p2p.Channel dataChannels map[types.NodeID]*p2p.Channel voteChannels map[types.NodeID]*p2p.Channel voteSetBitsChannels map[types.NodeID]*p2p.Channel } -func chDesc(chID p2p.ChannelID) p2p.ChannelDescriptor { - return p2p.ChannelDescriptor{ - ID: byte(chID), +func chDesc(chID p2p.ChannelID, size int) *p2p.ChannelDescriptor { + return &p2p.ChannelDescriptor{ + ID: chID, + MessageType: new(tmcons.Message), + RecvBufferCapacity: size, } } -func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSuite { +func setup( + ctx context.Context, + t *testing.T, + numNodes int, + states []*State, + size int, +) *reactorTestSuite { t.Helper() privProTxHashes := make([]crypto.ProTxHash, len(states)) @@ -67,43 +74,65 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu privProTxHashes[i] = state.privValidatorProTxHash } rts := &reactorTestSuite{ - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes, ProTxHashes: privProTxHashes}), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes, ProTxHashes: privProTxHashes}), states: make(map[types.NodeID]*State), reactors: make(map[types.NodeID]*Reactor, numNodes), - subs: make(map[types.NodeID]types.Subscription, numNodes), - blocksyncSubs: make(map[types.NodeID]types.Subscription, numNodes), + subs: make(map[types.NodeID]eventbus.Subscription, numNodes), + blocksyncSubs: make(map[types.NodeID]eventbus.Subscription, numNodes), + } + + rts.stateChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(StateChannel, size)) + rts.dataChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(DataChannel, size)) + rts.voteChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(VoteChannel, size)) + rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(VoteSetBitsChannel, size)) + + ctx, cancel := context.WithCancel(ctx) + t.Cleanup(cancel) + + chCreator := func(nodeID types.NodeID) p2p.ChannelCreator { + return func(ctx context.Context, desc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + switch desc.ID { + case StateChannel: + return rts.stateChannels[nodeID], nil + case DataChannel: + return rts.dataChannels[nodeID], nil + case VoteChannel: + return rts.voteChannels[nodeID], nil + case VoteSetBitsChannel: + return rts.voteSetBitsChannels[nodeID], nil + default: + return nil, fmt.Errorf("invalid channel; %v", desc.ID) + } + } } - rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel), new(tmcons.Message), size) - rts.dataChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(DataChannel), new(tmcons.Message), size) - rts.voteChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteChannel), new(tmcons.Message), size) - rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteSetBitsChannel), new(tmcons.Message), size) - - ctx, cancel := context.WithCancel(context.Background()) - for i := 0; i < numNodes; i++ { state := states[i] node := rts.network.NodeByProTxHash(state.privValidatorProTxHash) require.NotNil(t, node) nodeID := node.NodeID reactor := NewReactor( - state.Logger.With("node", nodeID), + state.logger.With("node", nodeID), state, - rts.stateChannels[nodeID], - rts.dataChannels[nodeID], - rts.voteChannels[nodeID], - rts.voteSetBitsChannels[nodeID], - node.MakePeerUpdates(t), + chCreator(nodeID), + func(ctx context.Context) *p2p.PeerUpdates { return node.MakePeerUpdates(ctx, t) }, + state.eventBus, true, + NopMetrics(), ) - state.timeoutTicker.SetLogger(state.Logger.With("impl", "TimeoutTicker")) - - reactor.SetEventBus(state.eventBus) - blocksSub, err := state.eventBus.Subscribe(ctx, testSubscriber, types.EventQueryNewBlock, size) + blocksSub, err := state.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: types.EventQueryNewBlock, + Limit: size, + }) require.NoError(t, err) - fsSub, err := state.eventBus.Subscribe(ctx, testSubscriber, types.EventQueryBlockSyncStatus, size) + fsSub, err := state.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: types.EventQueryBlockSyncStatus, + Limit: size, + }) require.NoError(t, err) rts.states[nodeID] = state @@ -116,25 +145,17 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu require.NoError(t, state.blockExec.Store().Save(state.state)) } - require.NoError(t, reactor.Start()) + require.NoError(t, reactor.Start(ctx)) require.True(t, reactor.IsRunning()) + t.Cleanup(reactor.Wait) } require.Len(t, rts.reactors, numNodes) // start the in-memory network and connect all peers with each other - rts.network.Start(t) + rts.network.Start(ctx, t) - t.Cleanup(func() { - for _, r := range rts.reactors { - require.NoError(t, r.eventBus.Stop()) - require.NoError(t, r.Stop()) - require.False(t, r.IsRunning()) - } - - leaktest.Check(t) - cancel() - }) + t.Cleanup(leaktest.Check(t)) return rts } @@ -147,54 +168,89 @@ func validateBlock(block *types.Block, activeVals map[string]struct{}) error { } func waitForAndValidateBlock( + bctx context.Context, t *testing.T, n int, activeVals map[string]struct{}, - blocksSubs []types.Subscription, + blocksSubs []eventbus.Subscription, states []*State, txs ...[]byte, ) { + t.Helper() + + ctx, cancel := context.WithCancel(bctx) + defer cancel() fn := func(j int) { - msg := <-blocksSubs[j].Out() - newBlock := msg.Data().(types.EventDataNewBlock).Block + msg, err := blocksSubs[j].Next(ctx) + switch { + case errors.Is(err, context.DeadlineExceeded): + return + case errors.Is(err, context.Canceled): + return + case err != nil: + cancel() // terminate other workers + require.NoError(t, err) + return + } + newBlock := msg.Data().(types.EventDataNewBlock).Block require.NoError(t, validateBlock(newBlock, activeVals)) for _, tx := range txs { - require.NoError(t, assertMempool(states[j].txNotifier).CheckTx(context.Background(), tx, nil, mempool.TxInfo{})) + err := assertMempool(t, states[j].txNotifier).CheckTx(ctx, tx, nil, mempool.TxInfo{}) + if errors.Is(err, types.ErrTxInCache) { + continue + } + require.NoError(t, err) } } var wg sync.WaitGroup - wg.Add(n) - for i := 0; i < n; i++ { + wg.Add(1) go func(j int) { + defer wg.Done() fn(j) - wg.Done() }(i) } wg.Wait() + + if err := ctx.Err(); errors.Is(err, context.DeadlineExceeded) { + t.Fatal("encountered timeout") + } } func waitForAndValidateBlockWithTx( + bctx context.Context, t *testing.T, n int, activeVals map[string]struct{}, - blocksSubs []types.Subscription, + blocksSubs []eventbus.Subscription, states []*State, txs ...[]byte, ) { + t.Helper() + ctx, cancel := context.WithCancel(bctx) + defer cancel() fn := func(j int) { ntxs := 0 - BLOCK_TX_LOOP: for { - msg := <-blocksSubs[j].Out() - newBlock := msg.Data().(types.EventDataNewBlock).Block + msg, err := blocksSubs[j].Next(ctx) + switch { + case errors.Is(err, context.DeadlineExceeded): + return + case errors.Is(err, context.Canceled): + return + case err != nil: + cancel() // terminate other workers + t.Fatalf("problem waiting for %d subscription: %v", j, err) + return + } + newBlock := msg.Data().(types.EventDataNewBlock).Block require.NoError(t, validateBlock(newBlock, activeVals)) // check that txs match the txs we're waiting for. @@ -206,43 +262,59 @@ func waitForAndValidateBlockWithTx( } if ntxs == len(txs) { - break BLOCK_TX_LOOP + break } } } var wg sync.WaitGroup - wg.Add(n) - for i := 0; i < n; i++ { + wg.Add(1) go func(j int) { + defer wg.Done() fn(j) - wg.Done() }(i) } wg.Wait() + if err := ctx.Err(); errors.Is(err, context.DeadlineExceeded) { + t.Fatal("encountered timeout") + } } func waitForBlockWithUpdatedValsAndValidateIt( + bctx context.Context, t *testing.T, n int, quorumHash crypto.QuorumHash, - blocksSubs []types.Subscription, - states []*State, + blocksSubs []eventbus.Subscription, + css []*State, ) { + t.Helper() + ctx, cancel := context.WithCancel(bctx) + defer cancel() fn := func(j int) { var newBlock *types.Block - LOOP: for { - msg := <-blocksSubs[j].Out() + msg, err := blocksSubs[j].Next(ctx) + switch { + case errors.Is(err, context.DeadlineExceeded): + return + case errors.Is(err, context.Canceled): + return + case err != nil: + cancel() // terminate other workers + t.Fatalf("problem waiting for %d subscription: %v", j, err) + return + } + newBlock = msg.Data().(types.EventDataNewBlock).Block if bytes.Equal(newBlock.LastCommit.QuorumHash, quorumHash) { - break LOOP + break } - states[j].Logger.Info( + css[j].logger.Info( "waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", "height", newBlock.Height, @@ -254,16 +326,18 @@ func waitForBlockWithUpdatedValsAndValidateIt( } var wg sync.WaitGroup - wg.Add(n) - for i := 0; i < n; i++ { + wg.Add(1) go func(j int) { + defer wg.Done() fn(j) - wg.Done() }(i) } wg.Wait() + if err := ctx.Err(); errors.Is(err, context.DeadlineExceeded) { + t.Fatal("encountered timeout") + } } func ensureBlockSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, height int64) { @@ -276,57 +350,112 @@ func ensureBlockSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, he } func TestReactorBasic(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + cfg := configSetup(t) - n := 4 - states, cleanup := randConsensusState(t, + n := 2 + states, cleanup := makeConsensusState(ctx, t, cfg, n, "consensus_reactor_test", - newMockTickerFunc(true), newKVStore) + newMockTickerFunc(true)) t.Cleanup(cleanup) - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } var wg sync.WaitGroup + errCh := make(chan error, len(rts.subs)) + for _, sub := range rts.subs { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { + go func(s eventbus.Subscription) { defer wg.Done() - <-s.Out() + _, err := s.Next(ctx) + switch { + case errors.Is(err, context.DeadlineExceeded): + return + case errors.Is(err, context.Canceled): + return + case err != nil: + errCh <- err + cancel() // terminate other workers + return + } }(sub) } wg.Wait() + if err := ctx.Err(); errors.Is(err, context.DeadlineExceeded) { + t.Fatal("encountered timeout") + } + select { + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + default: + } + errCh = make(chan error, len(rts.blocksyncSubs)) for _, sub := range rts.blocksyncSubs { wg.Add(1) // wait till everyone makes the consensus switch - go func(s types.Subscription) { + go func(s eventbus.Subscription) { defer wg.Done() - msg := <-s.Out() + msg, err := s.Next(ctx) + switch { + case errors.Is(err, context.DeadlineExceeded): + return + case errors.Is(err, context.Canceled): + return + case err != nil: + errCh <- err + cancel() // terminate other workers + return + } ensureBlockSyncStatus(t, msg, true, 0) }(sub) } wg.Wait() + if err := ctx.Err(); errors.Is(err, context.DeadlineExceeded) { + t.Fatal("encountered timeout") + } + + select { + case err := <-errCh: + if err != nil { + t.Fatal(err) + } + default: + } } func TestReactorWithEvidence(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + cfg := configSetup(t) n := 4 testName := "consensus_reactor_test" - tickerFunc := newMockTickerFunc(true) - appFunc := newKVStore + tickerFunc := newTickerFunc() + + consParams := factory.ConsensusParams() + + // if this parameter is not increased, then with a high probability the code will be stuck on proposal step + // due to a timeout handler performs before than validators will be ready for the message + consParams.Timeout.Propose = 1 * time.Second - genDoc, privVals := factory.RandGenesisDoc(cfg, n, 1) + genDoc, privVals := factory.RandGenesisDoc(cfg, n, 1, consParams) states := make([]*State, n) logger := consensusLogger() @@ -335,27 +464,31 @@ func TestReactorWithEvidence(t *testing.T) { stateStore := sm.NewStore(stateDB) state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) - thisConfig, err := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) + require.NoError(t, stateStore.Save(state)) + thisConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%d", testName, i)) require.NoError(t, err) defer os.RemoveAll(thisConfig.RootDir) - ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal - app := appFunc() + app := kvstore.NewApplication() vals := types.TM2PB.ValidatorUpdates(state.Validators) - app.InitChain(abci.RequestInitChain{ValidatorSet: &vals}) + _, err = app.InitChain(ctx, &abci.RequestInitChain{ValidatorSet: &vals}) + require.NoError(t, err) pv := privVals[i] blockDB := dbm.NewMemDB() blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus - mtx := new(tmsync.Mutex) - proxyAppConnMem := abciclient.NewLocalClient(mtx, app) - proxyAppConnCon := abciclient.NewLocalClient(mtx, app) + proxyAppConnMem := abciclient.NewLocalClient(logger, app) + proxyAppConnCon := abciclient.NewLocalClient(logger, app) + + mempool := mempool.NewTxMempool( + log.NewNopLogger().With("module", "mempool"), + thisConfig.Mempool, + proxyAppConnMem, + ) - mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) - mempool.SetLogger(log.TestingLogger().With("module", "mempool")) if thisConfig.Consensus.WaitForTxs() { mempool.EnableTxsAvailable() } @@ -363,37 +496,37 @@ func TestReactorWithEvidence(t *testing.T) { // mock the evidence pool // everyone includes evidence of another double signing vIdx := (i + 1) % n - ev, _ := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], cfg.ChainID(), state.Validators.QuorumType, state.Validators.QuorumHash) + + ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, 1, defaultTestTime, privVals[vIdx], cfg.ChainID(), state.Validators.QuorumType, state.Validators.QuorumHash) + require.NoError(t, err) evpool := &statemocks.EvidencePool{} - evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) + evpool.On("CheckEvidence", ctx, mock.AnythingOfType("types.EvidenceList")).Return(nil) evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{ ev}, int64(len(ev.Bytes()))) - evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() + evpool.On("Update", ctx, mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() evpool2 := sm.EmptyEvidencePool{} - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, proxyAppConnCon, mempool, evpool, blockStore, nil) - cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2) - cs.SetLogger(log.TestingLogger().With("module", "consensus")) - cs.SetPrivValidator(pv) + eventBus := eventbus.NewDefault(log.NewNopLogger().With("module", "events")) + require.NoError(t, eventBus.Start(ctx)) + + blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), proxyAppConnCon, mempool, evpool, blockStore, eventBus, sm.NopMetrics()) - eventBus := types.NewEventBus() - eventBus.SetLogger(log.TestingLogger().With("module", "events")) - err = eventBus.Start() + cs, err := NewState(logger.With("validator", i, "module", "consensus"), + thisConfig.Consensus, stateStore, blockExec, blockStore, mempool, evpool2, eventBus) require.NoError(t, err) - cs.SetEventBus(eventBus) + cs.SetPrivValidator(ctx, pv) cs.SetTimeoutTicker(tickerFunc()) - cs.SetLogger(logger.With("validator", i, "module", "consensus")) states[i] = cs } - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } var wg sync.WaitGroup @@ -402,12 +535,16 @@ func TestReactorWithEvidence(t *testing.T) { // We expect for each validator that is the proposer to propose one piece of // evidence. - go func(s types.Subscription) { - msg := <-s.Out() - block := msg.Data().(types.EventDataNewBlock).Block + go func(s eventbus.Subscription) { + defer wg.Done() + msg, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + return + } - require.Len(t, block.Evidence.Evidence, 1) - wg.Done() + block := msg.Data().(types.EventDataNewBlock).Block + require.Len(t, block.Evidence, 1) }(sub) } @@ -415,35 +552,36 @@ func TestReactorWithEvidence(t *testing.T) { } func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + cfg := configSetup(t) - n := 4 - states, cleanup := randConsensusState( + n := 2 + states, cleanup := makeConsensusState(ctx, t, cfg, n, "consensus_reactor_test", newMockTickerFunc(true), - newKVStore, func(c *config.Config) { c.Consensus.CreateEmptyBlocks = false }, ) - t.Cleanup(cleanup) - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // send a tx require.NoError( t, - assertMempool(states[3].txNotifier).CheckTx( - context.Background(), + assertMempool(t, states[1].txNotifier).CheckTx( + ctx, []byte{1, 2, 3}, nil, mempool.TxInfo{}, @@ -455,9 +593,12 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } @@ -465,19 +606,22 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { } func TestReactorRecordsVotesAndBlockParts(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + cfg := configSetup(t) - n := 4 - states, cleanup := randConsensusState(t, + n := 2 + states, cleanup := makeConsensusState(ctx, t, cfg, n, "consensus_reactor_test", - newMockTickerFunc(true), newKVStore) + newMockTickerFunc(true)) t.Cleanup(cleanup) - rts := setup(t, n, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, n, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } var wg sync.WaitGroup @@ -485,9 +629,12 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + if !assert.NoError(t, err) { + cancel() + } }(sub) } @@ -524,32 +671,36 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { } func TestReactorValidatorSetChanges(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + cfg := configSetup(t) - cfg.Consensus.TimeoutPropose = 2 * time.Second nPeers := 7 nVals := 4 states, _, _, cleanup := randConsensusNetWithPeers( + ctx, + t, cfg, nVals, nPeers, "consensus_val_set_changes_test", - func() TimeoutTicker { return NewTimeoutTicker() }, - newPersistentKVStoreWithPath, + newTickerFunc(), + newEpehemeralKVStore, ) t.Cleanup(cleanup) - rts := setup(t, nPeers, states, 100) // buffer must be large enough to not deadlock + rts := setup(ctx, t, nPeers, states, 100) // buffer must be large enough to not deadlock for _, reactor := range rts.reactors { state := reactor.state.GetState() - reactor.SwitchToConsensus(state, false) + reactor.SwitchToConsensus(ctx, state, false) } // map of active validators activeVals := make(map[string]struct{}) for i := 0; i < nVals; i++ { - proTxHash, err := states[i].privValidator.GetProTxHash(context.Background()) + proTxHash, err := states[i].privValidator.GetProTxHash(ctx) require.NoError(t, err) activeVals[proTxHash.String()] = struct{}{} @@ -560,15 +711,22 @@ func TestReactorValidatorSetChanges(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { - <-s.Out() - wg.Done() + go func(s eventbus.Subscription) { + defer wg.Done() + _, err := s.Next(ctx) + switch { + case err == nil: + case errors.Is(err, context.DeadlineExceeded): + default: + t.Log(err) + cancel() + } }(sub) } wg.Wait() - blocksSubs := []types.Subscription{} + blocksSubs := []eventbus.Subscription{} for _, sub := range rts.subs { blocksSubs = append(blocksSubs, sub) } @@ -577,55 +735,55 @@ func TestReactorValidatorSetChanges(t *testing.T) { require.NoError(t, err) // add one validator to a validator set - addOneVal, err := valsUpdater.addValidatorsAt(5, 1) + addOneVal, err := valsUpdater.addValidatorsAt(ctx, 5, 1) require.NoError(t, err) // add two validators to the validator set - addTwoVals, err := valsUpdater.addValidatorsAt(10, 2) + addTwoVals, err := valsUpdater.addValidatorsAt(ctx, 10, 2) require.NoError(t, err) // remove two validators from the validator set - removeTwoVals, err := valsUpdater.removeValidatorsAt(15, 2) + removeTwoVals, err := valsUpdater.removeValidatorsAt(ctx, 15, 2) require.NoError(t, err) // wait till everyone makes block 2 // ensure the commit includes all validators // send newValTx to change vals in block 3 - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states, addOneVal.tx) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states, addOneVal.tx) // wait till everyone makes block 3. // it includes the commit for block 2, which is by the original validator set - waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, states, addOneVal.tx) + waitForAndValidateBlockWithTx(ctx, t, nPeers, activeVals, blocksSubs, states, addOneVal.tx) // wait till everyone makes block 4. // it includes the commit for block 3, which is by the original validator set - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states) // the commits for block 4 should be with the updated validator set activeVals = makeProTxHashMap(addOneVal.ProTxHashes) // wait till everyone makes block 5 // it includes the commit for block 4, which should have the updated validator set - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, addOneVal.quorumHash, blocksSubs, states) + waitForBlockWithUpdatedValsAndValidateIt(ctx, t, nPeers, addOneVal.quorumHash, blocksSubs, states) validate(t, states) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states, addTwoVals.tx) - waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, states, addTwoVals.tx) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states, addTwoVals.tx) + waitForAndValidateBlockWithTx(ctx, t, nPeers, activeVals, blocksSubs, states, addTwoVals.tx) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states) // the commits for block 8 should be with the updated validator set activeVals = makeProTxHashMap(addTwoVals.ProTxHashes) - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, addTwoVals.quorumHash, blocksSubs, states) + waitForBlockWithUpdatedValsAndValidateIt(ctx, t, nPeers, addTwoVals.quorumHash, blocksSubs, states) validate(t, states) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states, removeTwoVals.tx) - waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, states, removeTwoVals.tx) - waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, states) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states, removeTwoVals.tx) + waitForAndValidateBlockWithTx(ctx, t, nPeers, activeVals, blocksSubs, states, removeTwoVals.tx) + waitForAndValidateBlock(ctx, t, nPeers, activeVals, blocksSubs, states) - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, removeTwoVals.quorumHash, blocksSubs, states) + waitForBlockWithUpdatedValsAndValidateIt(ctx, t, nPeers, removeTwoVals.quorumHash, blocksSubs, states) validate(t, states) } @@ -670,12 +828,12 @@ func newValidatorUpdater(states []*State, nVals int) (*validatorUpdater, error) return &updater, nil } -func (u *validatorUpdater) addValidatorsAt(height int64, count int) (*quorumData, error) { +func (u *validatorUpdater) addValidatorsAt(ctx context.Context, height int64, count int) (*quorumData, error) { proTxHashes := u.lastProTxHashes l := len(proTxHashes) // add new newProTxHashes for i := l; i < l+count; i++ { - proTxHash, err := u.states[i].privValidator.GetProTxHash(context.Background()) + proTxHash, err := u.states[i].privValidator.GetProTxHash(ctx) if err != nil { return nil, err } @@ -685,18 +843,18 @@ func (u *validatorUpdater) addValidatorsAt(height int64, count int) (*quorumData if err != nil { return nil, err } - u.updateStatePrivVals(res, height) + u.updateStatePrivVals(ctx, res, height) return res, nil } -func (u *validatorUpdater) removeValidatorsAt(height int64, count int) (*quorumData, error) { +func (u *validatorUpdater) removeValidatorsAt(ctx context.Context, height int64, count int) (*quorumData, error) { l := len(u.lastProTxHashes) if count >= l { return nil, fmt.Errorf("you can not remove all validators") } var newProTxHashes []crypto.ProTxHash for i := 0; i < l-count; i++ { - proTxHash, err := u.states[i].privValidator.GetProTxHash(context.Background()) + proTxHash, err := u.states[i].privValidator.GetProTxHash(ctx) if err != nil { return nil, err } @@ -706,18 +864,18 @@ func (u *validatorUpdater) removeValidatorsAt(height int64, count int) (*quorumD if err != nil { return nil, err } - u.updateStatePrivVals(priValUpdate, height) + u.updateStatePrivVals(ctx, priValUpdate, height) return priValUpdate, nil } -func (u *validatorUpdater) updateStatePrivVals(data *quorumData, height int64) { +func (u *validatorUpdater) updateStatePrivVals(ctx context.Context, data *quorumData, height int64) { iter := data.Iter() for iter.Next() { proTxHash, qks := iter.Value() j := u.stateIndexMap[proTxHash.String()] priVal := u.states[j].PrivValidator() priVal.UpdatePrivateKey( - context.Background(), + ctx, qks.PrivKey, data.quorumHash, data.ThresholdPubKey, diff --git a/internal/consensus/replay.go b/internal/consensus/replay.go index 4881e182d3..591597474f 100644 --- a/internal/consensus/replay.go +++ b/internal/consensus/replay.go @@ -10,10 +10,11 @@ import ( "reflect" "time" - "github.com/tendermint/tendermint/crypto" - + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" tmbytes "github.com/tendermint/tendermint/libs/bytes" @@ -40,7 +41,7 @@ var crc32c = crc32.MakeTable(crc32.Castagnoli) // Unmarshal and apply a single message to the consensus state as if it were // received in receiveRoutine. Lines that start with "#" are ignored. // NOTE: receiveRoutine should not be running. -func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscription) error { +func (cs *State) readReplayMessage(ctx context.Context, msg *TimedWALMessage, newStepSub eventbus.Subscription) error { // Skip meta messages which exist for demarcating boundaries. if _, ok := msg.Msg.(EndHeightMessage); ok { return nil @@ -49,20 +50,20 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr // for logging switch m := msg.Msg.(type) { case types.EventDataRoundState: - cs.Logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step) + cs.logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step) // these are playback checks - ticker := time.After(time.Second * 2) if newStepSub != nil { - select { - case stepMsg := <-newStepSub.Out(): - m2 := stepMsg.Data().(types.EventDataRoundState) - if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step { - return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m) - } - case <-newStepSub.Canceled(): - return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was canceled") - case <-ticker: - return fmt.Errorf("failed to read off newStepSub.Out()") + ctxto, cancel := context.WithTimeout(ctx, 2*time.Second) + defer cancel() + stepMsg, err := newStepSub.Next(ctxto) + if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("subscription timed out: %w", err) + } else if err != nil { + return fmt.Errorf("subscription canceled: %w", err) + } + m2 := stepMsg.Data().(types.EventDataRoundState) + if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step { + return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m) } } case msgInfo: @@ -73,20 +74,20 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr switch msg := m.Msg.(type) { case *ProposalMessage: p := msg.Proposal - cs.Logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header", + cs.logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header", p.BlockID.PartSetHeader, "pol", p.POLRound, "peer", peerID) case *BlockPartMessage: - cs.Logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerID) + cs.logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerID) case *VoteMessage: v := msg.Vote - cs.Logger.Info("Replay: Vote", "height", v.Height, "round", v.Round, "type", v.Type, + cs.logger.Info("Replay: Vote", "height", v.Height, "round", v.Round, "type", v.Type, "blockID", v.BlockID, "peer", peerID) } - cs.handleMsg(m, true) + cs.handleMsg(ctx, m, true) case timeoutInfo: - cs.Logger.Info("Replay: Timeout", "height", m.Height, "round", m.Round, "step", m.Step.String(), "dur", m.Duration) - cs.handleTimeout(m, cs.RoundState) + cs.logger.Info("Replay: Timeout", "height", m.Height, "round", m.Round, "step", m.Step, "dur", m.Duration) + cs.handleTimeout(ctx, m, cs.RoundState) default: return fmt.Errorf("replay: Unknown TimedWALMessage type: %v", reflect.TypeOf(msg.Msg)) } @@ -95,7 +96,7 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr // Replay only those messages since the last block. `timeoutRoutine` should // run concurrently to read off tickChan. -func (cs *State) catchupReplay(csHeight int64) error { +func (cs *State) catchupReplay(ctx context.Context, csHeight int64) error { // Set replayMode to true so we don't log signing errors. cs.replayMode = true @@ -142,7 +143,7 @@ func (cs *State) catchupReplay(csHeight int64) error { &WALSearchOptions{IgnoreDataCorruptionErrors: true}, ) if err == io.EOF { - cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", endHeight) + cs.logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", endHeight) } else if err != nil { return err } @@ -155,25 +156,25 @@ func (cs *State) catchupReplay(csHeight int64) error { } defer gr.Close() - cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight) + cs.logger.Info("Catchup by replaying consensus messages", "height", csHeight) iter := newWalIter(&WALDecoder{gr}) for iter.Next() { // NOTE: since the priv key is set when the msgs are received // it will attempt to eg double sign but we can just ignore it // since the votes will be replayed and we'll get to the next step - if err := cs.readReplayMessage(iter.Value(), nil); err != nil { + if err := cs.readReplayMessage(ctx, iter.Value(), nil); err != nil { return err } } err = iter.Err() if err != nil { if IsDataCorruptionError(err) { - cs.Logger.Error("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight) + cs.logger.Error("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight) } return err } - cs.Logger.Info("Replay: Done") + cs.logger.Info("Replay: Done") return nil } @@ -210,61 +211,51 @@ func makeHeightSearchFunc(height int64) auto.SearchFunc { //--------------------------------------------------- type Handshaker struct { - stateStore sm.Store - initialState sm.State - store sm.BlockStore - eventBus types.BlockEventPublisher - genDoc *types.GenesisDoc - nodeProTxHash crypto.ProTxHash - logger log.Logger + stateStore sm.Store + initialState sm.State + store sm.BlockStore + eventBus *eventbus.EventBus + genDoc *types.GenesisDoc + logger log.Logger nBlocks int // number of blocks applied to the state - appHashSize int + appHashSize int + nodeProTxHash crypto.ProTxHash } func NewHandshaker( + logger log.Logger, stateStore sm.Store, state sm.State, store sm.BlockStore, + eventBus *eventbus.EventBus, genDoc *types.GenesisDoc, nodeProTxHash crypto.ProTxHash, appHashSize int, ) *Handshaker { - return &Handshaker{ stateStore: stateStore, initialState: state, store: store, - eventBus: types.NopEventBus{}, + eventBus: eventBus, genDoc: genDoc, - logger: log.NewNopLogger(), - nBlocks: 0, + logger: logger, appHashSize: appHashSize, nodeProTxHash: nodeProTxHash, } } -func (h *Handshaker) SetLogger(l log.Logger) { - h.logger = l -} - -// SetEventBus - sets the event bus for publishing block related events. -// If not called, it defaults to types.NopEventBus. -func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) { - h.eventBus = eventBus -} - // NBlocks returns the number of blocks applied to the state. func (h *Handshaker) NBlocks() int { return h.nBlocks } // TODO: retry the handshake/replay if it fails ? -func (h *Handshaker) Handshake(proxyApp proxy.AppConns) (uint64, error) { +func (h *Handshaker) Handshake(ctx context.Context, appClient abciclient.Client) (uint64, error) { // Handshake is done via ABCI Info on the query conn. - res, err := proxyApp.Query().InfoSync(context.Background(), proxy.RequestInfo) + res, err := appClient.Info(ctx, &proxy.RequestInfo) if err != nil { return 0, fmt.Errorf("error calling Info: %v", err) } @@ -288,9 +279,9 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) (uint64, error) { } // Replay blocks up to the latest in the blockstore. - _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) + _, err = h.ReplayBlocks(ctx, h.initialState, appHash, blockHeight, appClient) if err != nil { - return 0, fmt.Errorf("error on replay: %v", err) + return 0, fmt.Errorf("error on replay: %w", err) } h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", @@ -305,10 +296,11 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) (uint64, error) { // matches the current state. // Returns the final AppHash or an error. func (h *Handshaker) ReplayBlocks( + ctx context.Context, state sm.State, appHash []byte, appBlockHeight int64, - proxyApp proxy.AppConns, + appClient abciclient.Client, ) ([]byte, error) { storeBlockBase := h.store.Base() storeBlockHeight := h.store.Height() @@ -361,7 +353,7 @@ func (h *Handshaker) ReplayBlocks( return nil, errors.New("the initial core chain locked height in genesis can not be 0") } pbParams := h.genDoc.ConsensusParams.ToProto() - req := abci.RequestInitChain{ + res, err := appClient.InitChain(ctx, &abci.RequestInitChain{ Time: h.genDoc.GenesisTime, ChainId: h.genDoc.ChainID, InitialHeight: h.genDoc.InitialHeight, @@ -369,8 +361,7 @@ func (h *Handshaker) ReplayBlocks( ValidatorSet: nextVals, AppStateBytes: h.genDoc.AppState, InitialCoreHeight: h.genDoc.InitialCoreChainLockedHeight, - } - res, err := proxyApp.Consensus().InitChainSync(context.Background(), req) + }) if err != nil { return nil, fmt.Errorf("initChain error from abci: %v", err) } @@ -422,7 +413,7 @@ func (h *Handshaker) ReplayBlocks( } // If we received non-zero initial core height, we set it here - if res.InitialCoreHeight > 0 && res.InitialCoreHeight != req.InitialCoreHeight { + if res.InitialCoreHeight > 0 && int64(res.InitialCoreHeight) != h.genDoc.InitialHeight { state.LastCoreChainLockedBlockHeight = res.InitialCoreHeight h.initialState.LastCoreChainLockedBlockHeight = res.InitialCoreHeight } @@ -438,7 +429,9 @@ func (h *Handshaker) ReplayBlocks( // First handle edge cases and constraints on the storeBlockHeight and storeBlockBase. switch { case storeBlockHeight == 0: - assertAppHashEqualsOneFromState(appHash, state) + if err := checkAppHashEqualsOneFromState(appHash, state); err != nil { + return nil, err + } return appHash, nil case appBlockHeight == 0 && state.InitialHeight < storeBlockBase: @@ -464,23 +457,11 @@ func (h *Handshaker) ReplayBlocks( case storeBlockHeight < stateBlockHeight: // the state should never be ahead of the store (this is under tendermint's control) - panic( - fmt.Sprintf( - "StateBlockHeight (%d) > StoreBlockHeight (%d)", - stateBlockHeight, - storeBlockHeight, - ), - ) + return nil, fmt.Errorf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight) case storeBlockHeight > stateBlockHeight+1: // store should be at most one ahead of the state (this is under tendermint's control) - panic( - fmt.Sprintf( - "StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", - storeBlockHeight, - stateBlockHeight+1, - ), - ) + return nil, fmt.Errorf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1) } var err error @@ -491,11 +472,13 @@ func (h *Handshaker) ReplayBlocks( // Either the app is asking for replay, or we're all synced up. if appBlockHeight < storeBlockHeight { // the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store) - return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, false) + return h.replayBlocks(ctx, state, appClient, appBlockHeight, storeBlockHeight, false) } else if appBlockHeight == storeBlockHeight { // We're good! - assertAppHashEqualsOneFromState(appHash, state) + if err := checkAppHashEqualsOneFromState(appHash, state); err != nil { + return nil, err + } return appHash, nil } @@ -506,7 +489,7 @@ func (h *Handshaker) ReplayBlocks( case appBlockHeight < stateBlockHeight: // the app is further behind than it should be, so replay blocks // but leave the last block to go through the WAL - return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, true) + return h.replayBlocks(ctx, state, appClient, appBlockHeight, storeBlockHeight, true) case appBlockHeight == stateBlockHeight: // We haven't run Commit (both the state and app are one block behind), @@ -514,8 +497,11 @@ func (h *Handshaker) ReplayBlocks( // NOTE: We could instead use the cs.WAL on cs.Start, // but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT h.logger.Info("Replay last block using real app") - state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus(), proxyApp.Query()) - return state.AppHash, err + state, err = h.replayBlock(ctx, state, storeBlockHeight, appClient) + if err != nil { + return nil, err + } + return state.AppHash, nil case appBlockHeight == storeBlockHeight: // We ran Commit, but didn't save the state, so replayBlock with mock app. @@ -523,25 +509,38 @@ func (h *Handshaker) ReplayBlocks( if err != nil { return nil, err } - mockApp := newMockProxyApp(appHash, abciResponses) + mockApp, err := newMockProxyApp(h.logger, appHash, abciResponses) + if err != nil { + return nil, err + } + if err := mockApp.Start(ctx); err != nil { + return nil, err + } + h.logger.Info("Replay last block using mock app") //ToDo: we could optimize by passing a mockValidationApp since all signatures were already verified - state, err = h.replayBlock(state, storeBlockHeight, mockApp, proxyApp.Query()) - return state.AppHash, err + state, err = h.replayBlock(ctx, state, storeBlockHeight, mockApp) + if err != nil { + return nil, err + } + + return state.AppHash, nil } } - panic(fmt.Sprintf("uncovered case! appHeight: %d, storeHeight: %d, stateHeight: %d", - appBlockHeight, storeBlockHeight, stateBlockHeight)) + return nil, fmt.Errorf("uncovered case! appHeight: %d, storeHeight: %d, stateHeight: %d", + appBlockHeight, storeBlockHeight, stateBlockHeight) } func (h *Handshaker) replayBlocks( + ctx context.Context, state sm.State, - proxyApp proxy.AppConns, + appClient abciclient.Client, appBlockHeight, storeBlockHeight int64, - mutateState bool) ([]byte, error) { + mutateState bool, +) ([]byte, error) { // App is further behind than it should be, so we need to replay blocks. // We replay all blocks from appBlockHeight+1. // @@ -567,23 +566,23 @@ func (h *Handshaker) replayBlocks( block := h.store.LoadBlock(i) // Extra check to ensure the app was not changed in a way it shouldn't have. if len(appHash) > 0 { - assertAppHashEqualsOneFromBlock(appHash, block) + if err := checkAppHashEqualsOneFromBlock(appHash, block); err != nil { + return nil, err + } } if i == finalBlock && !mutateState { // We emit events for the index services at the final block due to the sync issue when // the node shutdown during the block committing status. - blockExec := sm.NewBlockExecutor( - h.stateStore, h.logger, proxyApp.Consensus(), proxyApp.Query(), emptyMempool{}, sm.EmptyEvidencePool{}, h.store, nil) - blockExec.SetEventBus(h.eventBus) - appHash, err = sm.ExecCommitBlock( - blockExec, proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight, state) + blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, appClient, emptyMempool{}, sm.EmptyEvidencePool{}, h.store, h.eventBus, sm.NopMetrics()) + appHash, err = sm.ExecCommitBlock(ctx, + blockExec, appClient, block, h.logger, h.stateStore, h.genDoc.InitialHeight, state) if err != nil { return nil, err } } else { - appHash, err = sm.ExecCommitBlock( - nil, proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight, state) + appHash, err = sm.ExecCommitBlock(ctx, + nil, appClient, block, h.logger, h.stateStore, h.genDoc.InitialHeight, state) if err != nil { return nil, err } @@ -594,20 +593,26 @@ func (h *Handshaker) replayBlocks( if mutateState { // sync the final block - state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus(), proxyApp.Query()) + state, err = h.replayBlock(ctx, state, storeBlockHeight, appClient) if err != nil { return nil, err } appHash = state.AppHash } - assertAppHashEqualsOneFromState(appHash, state) + if err := checkAppHashEqualsOneFromState(appHash, state); err != nil { + return nil, err + } return appHash, nil } // ApplyBlock on the proxyApp with the last block. -func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.AppConnConsensus, - proxyAppQuery proxy.AppConnQuery) (sm.State, error) { +func (h *Handshaker) replayBlock( + ctx context.Context, + state sm.State, + height int64, + appClient abciclient.Client, +) (sm.State, error) { block := h.store.LoadBlock(height) meta := h.store.LoadBlockMeta(height) @@ -616,18 +621,17 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap blockExec := sm.NewBlockExecutor( h.stateStore, h.logger, - proxyApp, - proxyAppQuery, + appClient, emptyMempool{}, sm.EmptyEvidencePool{}, h.store, - nil, - sm.BlockExecutorWithAppHashSize(h.appHashSize), + h.eventBus, + sm.NopMetrics(), ) - blockExec.SetEventBus(h.eventBus) + blockExec.SetAppHashSize(h.appHashSize) var err error - state, err = blockExec.ApplyBlock(state, h.nodeProTxHash, meta.BlockID, block) + state, err = blockExec.ApplyBlock(ctx, state, h.nodeProTxHash, meta.BlockID, block) if err != nil { return sm.State{}, err } @@ -637,24 +641,25 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap return state, nil } -func assertAppHashEqualsOneFromBlock(appHash []byte, block *types.Block) { +func checkAppHashEqualsOneFromBlock(appHash []byte, block *types.Block) error { if !bytes.Equal(appHash, block.AppHash) { - panic(fmt.Sprintf(`block.AppHash does not match AppHash after replay. Got %X, expected %X. + return fmt.Errorf(`block.AppHash does not match AppHash after replay. Got '%X', expected '%X'. -Block: %v -`, - appHash, block.AppHash, block)) +Block: %v`, + appHash, block.AppHash, block) } + return nil } -func assertAppHashEqualsOneFromState(appHash []byte, state sm.State) { +func checkAppHashEqualsOneFromState(appHash []byte, state sm.State) error { if !bytes.Equal(appHash, state.AppHash) { - panic(fmt.Sprintf(`state.AppHash does not match AppHash after replay. Got -%X, expected %X. + return fmt.Errorf(`state.AppHash does not match AppHash after replay. Got '%X', expected '%X'. State: %v Did you reset Tendermint without resetting your application's data?`, - appHash, state.AppHash, state)) + appHash, state.AppHash, state) } + + return nil } diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index f2645287a7..be000f4837 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -13,12 +13,12 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/proxy" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" - tmos "github.com/tendermint/tendermint/libs/os" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/types" ) @@ -31,16 +31,27 @@ const ( // replay messages interactively or all at once // replay the wal file -func RunReplayFile(cfg config.BaseConfig, csConfig *config.ConsensusConfig, console bool) { - consensusState := newConsensusStateForReplay(cfg, csConfig) +func RunReplayFile( + ctx context.Context, + logger log.Logger, + cfg config.BaseConfig, + csConfig *config.ConsensusConfig, + console bool, +) error { + consensusState, err := newConsensusStateForReplay(ctx, cfg, logger, csConfig) + if err != nil { + return err + } - if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil { - tmos.Exit(fmt.Sprintf("Error during consensus replay: %v", err)) + if err := consensusState.ReplayFile(ctx, csConfig.WalFile(), console); err != nil { + return fmt.Errorf("consensus replay: %w", err) } + + return nil } // Replay msgs in file or start the console -func (cs *State) ReplayFile(file string, console bool) error { +func (cs *State) ReplayFile(ctx context.Context, file string, console bool) error { if cs.IsRunning() { return errors.New("cs is already running, cannot replay") @@ -53,15 +64,17 @@ func (cs *State) ReplayFile(file string, console bool) error { // ensure all new step events are regenerated as expected - ctx := context.Background() - newStepSub, err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep) + newStepSub, err := cs.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: subscriber, + Query: types.EventQueryNewRoundStep, + }) if err != nil { return fmt.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep) } defer func() { args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: types.EventQueryNewRoundStep} if err := cs.eventBus.Unsubscribe(ctx, args); err != nil { - cs.Logger.Error("Error unsubscribing to event bus", "err", err) + cs.logger.Error("error unsubscribing to event bus", "err", err) } }() @@ -71,14 +84,17 @@ func (cs *State) ReplayFile(file string, console bool) error { return err } - pb := newPlayback(file, fp, cs, cs.state.Copy()) + pb := newPlayback(file, fp, cs, cs.stateStore) defer pb.fp.Close() var nextN int // apply N msgs in a row var msg *TimedWALMessage for { if nextN == 0 && console { - nextN = pb.replayConsoleLoop() + nextN, err = pb.replayConsoleLoop(ctx) + if err != nil { + return err + } } msg, err = pb.dec.Decode() @@ -88,7 +104,7 @@ func (cs *State) ReplayFile(file string, console bool) error { return err } - if err := pb.cs.readReplayMessage(msg, newStepSub); err != nil { + if err := pb.cs.readReplayMessage(ctx, msg, newStepSub); err != nil { return err } @@ -110,29 +126,30 @@ type playback struct { count int // how many lines/msgs into the file are we // replays can be reset to beginning - fileName string // so we can close/reopen the file - genesisState sm.State // so the replay session knows where to restart from + fileName string // so we can close/reopen the file + stateStore sm.Store } -func newPlayback(fileName string, fp *os.File, cs *State, genState sm.State) *playback { +func newPlayback(fileName string, fp *os.File, cs *State, store sm.Store) *playback { return &playback{ - cs: cs, - fp: fp, - fileName: fileName, - genesisState: genState, - dec: NewWALDecoder(fp), + cs: cs, + fp: fp, + fileName: fileName, + stateStore: store, + dec: NewWALDecoder(fp), } } // go back count steps by resetting the state and running (pb.count - count) steps -func (pb *playback) replayReset(count int, newStepSub types.Subscription) error { - if err := pb.cs.Stop(); err != nil { +func (pb *playback) replayReset(ctx context.Context, count int, newStepSub eventbus.Subscription) error { + pb.cs.Stop() + pb.cs.Wait() + + newCS, err := NewState(pb.cs.logger, pb.cs.config, pb.stateStore, pb.cs.blockExec, + pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool, pb.cs.eventBus) + if err != nil { return err } - pb.cs.Wait() - newCS := NewStateWithLogger(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec, - pb.cs.blockStore, pb.cs.txNotifier, pb.cs.evpool, pb.cs.Logger, 0) - newCS.SetEventBus(pb.cs.eventBus) newCS.startForReplay() if err := pb.fp.Close(); err != nil { @@ -156,7 +173,7 @@ func (pb *playback) replayReset(count int, newStepSub types.Subscription) error } else if err != nil { return err } - if err := pb.cs.readReplayMessage(msg, newStepSub); err != nil { + if err := pb.cs.readReplayMessage(ctx, msg, newStepSub); err != nil { return err } pb.count++ @@ -165,30 +182,20 @@ func (pb *playback) replayReset(count int, newStepSub types.Subscription) error } func (cs *State) startForReplay() { - cs.Logger.Error("Replay commands are disabled until someone updates them and writes tests") - /* TODO:! - // since we replay tocks we just ignore ticks - go func() { - for { - select { - case <-cs.tickChan: - case <-cs.Quit: - return - } - } - }()*/ + cs.logger.Error("Replay commands are disabled until someone updates them and writes tests") } -// console function for parsing input and running commands -func (pb *playback) replayConsoleLoop() int { +// console function for parsing input and running commands. The integer +// return value is invalid unless the error is nil. +func (pb *playback) replayConsoleLoop(ctx context.Context) (int, error) { for { fmt.Printf("> ") bufReader := bufio.NewReader(os.Stdin) line, more, err := bufReader.ReadLine() if more { - tmos.Exit("input is too long") + return 0, fmt.Errorf("input is too long") } else if err != nil { - tmos.Exit(err.Error()) + return 0, err } tokens := strings.Split(string(line), " ") @@ -202,13 +209,13 @@ func (pb *playback) replayConsoleLoop() int { // "next N" -> replay next N messages if len(tokens) == 1 { - return 0 + return 0, nil } i, err := strconv.Atoi(tokens[1]) if err != nil { fmt.Println("next takes an integer argument") } else { - return i + return i, nil } case "back": @@ -218,33 +225,25 @@ func (pb *playback) replayConsoleLoop() int { // NOTE: "back" is not supported in the state machine design, // so we restart and replay up to - ctx := context.Background() // ensure all new step events are regenerated as expected - newStepSub, err := pb.cs.eventBus.Subscribe( - ctx, - subscriber, - types.EventQueryNewRoundStep, - ) + newStepSub, err := pb.cs.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: subscriber, + Query: types.EventQueryNewRoundStep, + }) if err != nil { - tmos.Exit( - fmt.Sprintf( - "failed to subscribe %s to %v", - subscriber, - types.EventQueryNewRoundStep, - ), - ) + return 0, fmt.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep) } defer func() { args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: types.EventQueryNewRoundStep} if err := pb.cs.eventBus.Unsubscribe(ctx, args); err != nil { - pb.cs.Logger.Error("Error unsubscribing from eventBus", "err", err) + pb.cs.logger.Error("error unsubscribing from eventBus", "err", err) } }() if len(tokens) == 1 { - if err := pb.replayReset(1, newStepSub); err != nil { - pb.cs.Logger.Error("Replay reset error", "err", err) + if err := pb.replayReset(ctx, 1, newStepSub); err != nil { + pb.cs.logger.Error("Replay reset error", "err", err) } } else { i, err := strconv.Atoi(tokens[1]) @@ -252,8 +251,8 @@ func (pb *playback) replayConsoleLoop() int { fmt.Println("back takes an integer argument") } else if i > pb.count { fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count) - } else if err := pb.replayReset(i, newStepSub); err != nil { - pb.cs.Logger.Error("Replay reset error", "err", err) + } else if err := pb.replayReset(ctx, i, newStepSub); err != nil { + pb.cs.logger.Error("Replay reset error", "err", err) } } @@ -295,67 +294,77 @@ func (pb *playback) replayConsoleLoop() int { //-------------------------------------------------------------------------------- // convenience for replay mode -func newConsensusStateForReplay(cfg config.BaseConfig, csConfig *config.ConsensusConfig) *State { +func newConsensusStateForReplay( + ctx context.Context, + cfg config.BaseConfig, + logger log.Logger, + csConfig *config.ConsensusConfig, +) (*State, error) { dbType := dbm.BackendType(cfg.DBBackend) // Get BlockStore blockStoreDB, err := dbm.NewDB("blockstore", dbType, cfg.DBDir()) if err != nil { - tmos.Exit(err.Error()) + return nil, err } blockStore := store.NewBlockStore(blockStoreDB) // Get State stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) if err != nil { - tmos.Exit(err.Error()) + return nil, err } + stateStore := sm.NewStore(stateDB) gdoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) if err != nil { - tmos.Exit(err.Error()) + return nil, err } + state, err := sm.MakeGenesisState(gdoc) if err != nil { - tmos.Exit(err.Error()) + return nil, err + } + + client, _, err := proxy.ClientFactory(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + if err != nil { + return nil, err } - // Create proxyAppConn connection (consensus, mempool, query) - clientCreator, _ := proxy.DefaultClientCreator(cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) - err = proxyApp.Start() + proxyApp := proxy.New(client, logger, proxy.NopMetrics()) + err = proxyApp.Start(ctx) if err != nil { - tmos.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err)) + return nil, fmt.Errorf("starting proxy app conns: %w", err) } - eventBus := types.NewEventBus() - if err := eventBus.Start(); err != nil { - tmos.Exit(fmt.Sprintf("Failed to start event bus: %v", err)) + eventBus := eventbus.NewDefault(logger) + if err := eventBus.Start(ctx); err != nil { + return nil, fmt.Errorf("failed to start event bus: %w", err) } // We should be able to just pass nil here for the node pro tx hash - handshaker := NewHandshaker(stateStore, state, blockStore, gdoc, nil, csConfig.AppHashSize) - handshaker.SetEventBus(eventBus) - _, err = handshaker.Handshake(proxyApp) + handshaker := NewHandshaker(logger, stateStore, state, blockStore, eventBus, gdoc, nil, csConfig.AppHashSize) + _, err = handshaker.Handshake(ctx, proxyApp) if err != nil { - tmos.Exit(fmt.Sprintf("Error on handshake: %v", err)) + return nil, err } mempool, evpool := emptyMempool{}, sm.EmptyEvidencePool{} blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), - proxyApp.Consensus(), - proxyApp.Query(), + logger, + proxyApp, mempool, evpool, blockStore, - nil, - sm.BlockExecutorWithAppHashSize(csConfig.AppHashSize), + eventBus, + sm.NopMetrics(), ) + blockExec.SetAppHashSize(csConfig.AppHashSize) - consensusState := NewState(csConfig, state.Copy(), blockExec, - blockStore, mempool, evpool) - - consensusState.SetEventBus(eventBus) - return consensusState + consensusState, err := NewState(logger, csConfig, stateStore, blockExec, + blockStore, mempool, evpool, eventBus) + if err != nil { + return nil, err + } + return consensusState, nil } diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index 1235baccbf..3cd5bdac03 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -8,6 +8,7 @@ import ( "github.com/tendermint/tendermint/internal/libs/clist" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/libs/log" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -21,26 +22,27 @@ var _ mempool.Mempool = emptyMempool{} func (emptyMempool) Lock() {} func (emptyMempool) Unlock() {} func (emptyMempool) Size() int { return 0 } -func (emptyMempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempool.TxInfo) error { +func (emptyMempool) CheckTx(context.Context, types.Tx, func(*abci.ResponseCheckTx), mempool.TxInfo) error { return nil } func (emptyMempool) RemoveTxByKey(txKey types.TxKey) error { return nil } func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } func (emptyMempool) Update( + _ context.Context, _ int64, _ types.Txs, - _ []*abci.ResponseDeliverTx, + _ []*abci.ExecTxResult, _ mempool.PreCheckFunc, _ mempool.PostCheckFunc, ) error { return nil } -func (emptyMempool) Flush() {} -func (emptyMempool) FlushAppConn() error { return nil } -func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } -func (emptyMempool) EnableTxsAvailable() {} -func (emptyMempool) SizeBytes() int64 { return 0 } +func (emptyMempool) Flush() {} +func (emptyMempool) FlushAppConn(ctx context.Context) error { return nil } +func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } +func (emptyMempool) EnableTxsAvailable() {} +func (emptyMempool) SizeBytes() int64 { return 0 } func (emptyMempool) TxsFront() *clist.CElement { return nil } func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil } @@ -54,17 +56,15 @@ func (emptyMempool) CloseWAL() {} // Useful because we don't want to call Commit() twice for the same block on // the real app. -func newMockProxyApp(appHash []byte, abciResponses *tmstate.ABCIResponses) proxy.AppConnConsensus { - clientCreator := abciclient.NewLocalCreator(&mockProxyApp{ +func newMockProxyApp( + logger log.Logger, + appHash []byte, + abciResponses *tmstate.ABCIResponses, +) (abciclient.Client, error) { + return proxy.New(abciclient.NewLocalClient(logger, &mockProxyApp{ appHash: appHash, abciResponses: abciResponses, - }) - cli, _ := clientCreator() - err := cli.Start() - if err != nil { - panic(err) - } - return proxy.NewAppConnConsensus(cli, proxy.NopMetrics()) + }), logger, proxy.NopMetrics()), nil } type mockProxyApp struct { @@ -75,20 +75,15 @@ type mockProxyApp struct { abciResponses *tmstate.ABCIResponses } -func (mock *mockProxyApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { - r := mock.abciResponses.DeliverTxs[mock.txCount] +func (mock *mockProxyApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { + r := mock.abciResponses.FinalizeBlock mock.txCount++ if r == nil { - return abci.ResponseDeliverTx{} + return &abci.ResponseFinalizeBlock{}, nil } - return *r -} - -func (mock *mockProxyApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { - mock.txCount = 0 - return *mock.abciResponses.EndBlock + return r, nil } -func (mock *mockProxyApp) Commit() abci.ResponseCommit { - return abci.ResponseCommit{Data: mock.appHash} +func (mock *mockProxyApp) Commit(context.Context) (*abci.ResponseCommit, error) { + return &abci.ResponseCommit{Data: mock.appHash}, nil } diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 67dd30f5cb..c21aa97561 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -3,18 +3,16 @@ package consensus import ( "bytes" "context" + "errors" "fmt" "io" - "io/ioutil" - "math/rand" "os" - "path/filepath" "runtime" - "sort" "testing" "time" "github.com/dashevo/dashd-go/btcjson" + "github.com/fortytw2/leaktest" "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -25,15 +23,16 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/privval" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -53,62 +52,63 @@ import ( // and which ones we need the wal for - then we'd also be able to only flush the // wal writer when we need to, instead of with every message. -func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *config.Config, +func startNewStateAndWaitForBlock(ctx context.Context, t *testing.T, consensusReplayConfig *config.Config, lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) { - logger := log.TestingLogger() + logger := log.NewNopLogger() state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) require.NoError(t, err) - privValidator := loadPrivValidator(consensusReplayConfig) + privValidator := loadPrivValidator(t, consensusReplayConfig) blockStore := store.NewBlockStore(dbm.NewMemDB()) cs := newStateWithConfigAndBlockStore( + ctx, + t, + logger, consensusReplayConfig, state, privValidator, kvstore.NewApplication(), blockStore, ) - cs.SetLogger(logger) - bytes, _ := ioutil.ReadFile(cs.config.WalFile()) - t.Logf("====== WAL: \n\r%X\n", bytes) - - err = cs.Start() + bytes, err := os.ReadFile(cs.config.WalFile()) require.NoError(t, err) + require.NotNil(t, bytes) + + require.NoError(t, cs.Start(ctx)) defer func() { - if err := cs.Stop(); err != nil { - t.Error(err) - } + cs.Stop() }() - + t.Cleanup(cs.Wait) // This is just a signal that we haven't halted; its not something contained // in the WAL itself. Assuming the consensus state is running, replay of any // WAL, including the empty one, should eventually be followed by a new // block, or else something is wrong. - newBlockSub, err := cs.eventBus.Subscribe( - context.Background(), - testSubscriber, - types.EventQueryNewBlock, - ) + newBlockSub, err := cs.eventBus.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: types.EventQueryNewBlock, + }) require.NoError(t, err) - select { - case <-newBlockSub.Out(): - case <-newBlockSub.Canceled(): - t.Fatal("newBlockSub was canceled") - case <-time.After(240 * time.Second): + ctxto, cancel := context.WithTimeout(ctx, 120*time.Second) + defer cancel() + _, err = newBlockSub.Next(ctxto) + if errors.Is(err, context.DeadlineExceeded) { t.Fatal("Timed out waiting for new block (see trace above)") + } else if err != nil { + t.Fatal("newBlockSub was canceled") } } -func sendTxs(ctx context.Context, cs *State) { +func sendTxs(ctx context.Context, t *testing.T, cs *State) { + t.Helper() for i := 0; i < 256; i++ { select { case <-ctx.Done(): return default: tx := []byte{byte(i)} - if err := assertMempool(cs.txNotifier).CheckTx(context.Background(), tx, nil, mempl.TxInfo{}); err != nil { - panic(err) - } + + require.NoError(t, assertMempool(t, cs.txNotifier).CheckTx(ctx, tx, nil, mempool.TxInfo{})) + i++ } } @@ -126,7 +126,7 @@ func TestWALCrash(t *testing.T) { 1}, {"many non-empty blocks", func(stateDB dbm.DB, cs *State, ctx context.Context) { - go sendTxs(ctx, cs) + go sendTxs(ctx, t, cs) }, 3}, } @@ -134,14 +134,17 @@ func TestWALCrash(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - consensusReplayConfig, err := ResetConfig(tc.name) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + consensusReplayConfig, err := ResetConfig(t.TempDir(), tc.name) require.NoError(t, err) - crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop) + crashWALandCheckLiveness(ctx, t, consensusReplayConfig, tc.initFn, tc.heightToStop) }) } } -func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *config.Config, +func crashWALandCheckLiveness(rctx context.Context, t *testing.T, consensusReplayConfig *config.Config, initFn func(dbm.DB, *State, context.Context), heightToStop int64) { walPanicked := make(chan error) crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop} @@ -149,8 +152,6 @@ func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *config.Config i := 1 LOOP: for { - t.Logf("====== LOOP %d\n", i) - // create consensus state from a clean slate logger := log.NewNopLogger() blockDB := dbm.NewMemDB() @@ -159,18 +160,26 @@ LOOP: blockStore := store.NewBlockStore(blockDB) state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) require.NoError(t, err) - privValidator := loadPrivValidator(consensusReplayConfig) + + // default timeout value 30ms is not enough, if this parameter is not increased, + // then with a high probability the code will be stuck on proposal step + // due to a timeout handler performs before than validators will be ready for the message + state.ConsensusParams.Timeout.Propose = 1 * time.Second + + privValidator := loadPrivValidator(t, consensusReplayConfig) cs := newStateWithConfigAndBlockStore( + rctx, + t, + logger, consensusReplayConfig, state, privValidator, kvstore.NewApplication(), blockStore, ) - cs.SetLogger(logger) // start sending transactions - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(rctx) initFn(stateDB, cs, ctx) // clean up WAL file from the previous iteration @@ -178,7 +187,7 @@ LOOP: os.Remove(walFile) // set crashing WAL - csWal, err := cs.OpenWAL(walFile) + csWal, err := cs.OpenWAL(ctx, walFile) require.NoError(t, err) crashingWal.next = csWal @@ -187,20 +196,20 @@ LOOP: cs.wal = crashingWal // start consensus state - err = cs.Start() + err = cs.Start(ctx) require.NoError(t, err) i++ select { + case <-rctx.Done(): + t.Fatal("context canceled before test completed") case err := <-walPanicked: - t.Logf("WAL panicked: %v", err) - // make sure we can make blocks after a crash - startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateStore) + startNewStateAndWaitForBlock(ctx, t, consensusReplayConfig, cs.Height, blockDB, stateStore) // stop consensus state and transactions sender (initFn) - cs.Stop() //nolint:errcheck // Logging this error causes failure + cs.Stop() cancel() // if we reached the required height, exit @@ -283,9 +292,9 @@ func (w *crashingWAL) SearchForEndHeight( return w.next.SearchForEndHeight(height, options) } -func (w *crashingWAL) Start() error { return w.next.Start() } -func (w *crashingWAL) Stop() error { return w.next.Stop() } -func (w *crashingWAL) Wait() { w.next.Wait() } +func (w *crashingWAL) Start(ctx context.Context) error { return w.next.Start(ctx) } +func (w *crashingWAL) Stop() { w.next.Stop() } +func (w *crashingWAL) Wait() { w.next.Wait() } //------------------------------------------------------------------------------------------ type simulatorTestSuite struct { @@ -295,7 +304,7 @@ type simulatorTestSuite struct { Commits []*types.Commit CleanupFunc cleanupFunc - Mempool mempl.Mempool + Mempool mempool.Mempool Evpool sm.EvidencePool } @@ -312,21 +321,22 @@ const ( // 3 - save block and committed with truncated block store and state behind var modes = []uint{0, 1, 2, 3} -func findProposer(validatorStubs []*validatorStub, proTxHash crypto.ProTxHash) *validatorStub { +func findProposer(ctx context.Context, t *testing.T, validatorStubs []*validatorStub, proTxHash crypto.ProTxHash) *validatorStub { for _, validatorStub := range validatorStubs { - valProTxHash, _ := validatorStub.GetProTxHash(context.Background()) + valProTxHash, err := validatorStub.GetProTxHash(ctx) + require.NoError(t, err) if bytes.Equal(valProTxHash, proTxHash) { return validatorStub } } - panic("validator not found") + t.Error("validator not found") + return nil } // This is actually not a test, it's for storing validator change tx data for testHandshakeReplay -func setupSimulator(t *testing.T) *simulatorTestSuite { +func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite { t.Helper() cfg := configSetup(t) - ctx := context.Background() sim := &simulatorTestSuite{ Mempool: emptyMempool{}, @@ -336,54 +346,49 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { nPeers := 7 nVals := 4 css, genDoc, cfg, cleanup := randConsensusNetWithPeers( + ctx, + t, cfg, nVals, nPeers, "replay_test", newMockTickerFunc(true), - newPersistentKVStoreWithPath) - fmt.Printf("initial quorum hash is %X\n", genDoc.QuorumHash) + newEpehemeralKVStore) + t.Logf("genesis quorum hash is %X\n", genDoc.QuorumHash) sim.Config = cfg - sim.GenesisState, _ = sm.MakeGenesisState(genDoc) - sim.CleanupFunc = cleanup + defer func() { t.Cleanup(cleanup) }() + + var err error + sim.GenesisState, err = sm.MakeGenesisState(genDoc) + require.NoError(t, err) partSize := types.BlockPartSizeBytes - newRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound) - proposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, css[0].eventBus, types.EventQueryNewRound) + proposalCh := subscribe(ctx, t, css[0].eventBus, types.EventQueryCompleteProposal) vss := make([]*validatorStub, nPeers) for i := 0; i < nPeers; i++ { - vss[i] = newValidatorStub(css[i].privValidator, int32(i), genDoc.InitialHeight) + vss[i] = newValidatorStub(css[i].privValidator, int32(i), 0) } height, round := css[0].Height, css[0].Round // start the machine; note height should be equal to InitialHeight here, // so we don't need to increment it - startTestRound(css[0], height, round) - ensureNewRound(newRoundCh, height, 0) - ensureNewProposal(proposalCh, height, round) + startTestRound(ctx, css[0], height, round) + incrementHeight(vss...) + ensureNewRound(t, newRoundCh, height, 0) + ensureNewProposal(t, proposalCh, height, round) + rs := css[0].GetRoundState() // Stop auto proposing blocks, as this could lead to issues based on the // randomness of proposer selection css[0].config.DontAutoPropose = true - rs := css[0].GetRoundState() - signAddVotes( - sim.Config, - css[0], - tmproto.PrevoteType, - rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), - vss[1:nVals]...) - signAddVotes( - sim.Config, - css[0], - tmproto.PrecommitType, - rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), - vss[1:nVals]...) - ensureNewRound(newRoundCh, height+1, 0) + blockID := types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()} + signAddVotes(ctx, t, css[0], tmproto.PrecommitType, sim.Config.ChainID(), blockID, vss[1:nVals]...) + + ensureNewRound(t, newRoundCh, height+1, 0) sqm, err := newStateQuorumManager(css) require.NoError(t, err) @@ -394,15 +399,15 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { height++ incrementHeight(vss...) - err = assertMempool(css[0].txNotifier).CheckTx(ctx, hvsu2.tx, nil, mempl.TxInfo{}) + err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, hvsu2.tx, nil, mempool.TxInfo{}) assert.Nil(t, err) - propBlock, _ := css[0].createProposalBlock() // changeProposer(t, cs1, vs2) - propBlockParts := propBlock.MakePartSet(partSize) - blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - // stateID := types.StateID{LastAppHash: css[0].state.AppHash} + propBlock, _ := css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) + propBlockParts, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal := types.NewProposal(vss[1].Height, 1, round, -1, blockID) + proposal := types.NewProposal(vss[1].Height, 1, round, -1, blockID, propBlock.Header.Time) p := proposal.ToProto() if _, err := vss[1].SignProposal(ctx, cfg.ChainID(), genDoc.QuorumType, genDoc.QuorumHash, p); err != nil { t.Fatal("failed to sign bad proposal", err) @@ -411,35 +416,25 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { // set the proposal block to state on node 0, this will result in a signed prevote, // so we do not need to prevote with it again (hence the vss[1:nVals]) - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs = css[0].GetRoundState() - signAddVotes( - sim.Config, - css[0], - tmproto.PrevoteType, - rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), - vss[1:nVals]...) - signAddVotes( - sim.Config, - css[0], - tmproto.PrecommitType, - rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), + signAddVotes(ctx, t, css[0], tmproto.PrecommitType, sim.Config.ChainID(), + types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, vss[1:nVals]...) - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) // HEIGHT 3 height++ incrementHeight(vss...) - propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) - propBlockParts = propBlock.MakePartSet(partSize) + propBlock, _ = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) + propBlockParts, err = propBlock.MakePartSet(partSize) + require.NoError(t, err) blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal = types.NewProposal(vss[2].Height, 1, round, -1, blockID) + proposal = types.NewProposal(vss[2].Height, 1, round, -1, blockID, propBlock.Header.Time) p = proposal.ToProto() if _, err := vss[2].SignProposal(ctx, cfg.ChainID(), genDoc.QuorumType, genDoc.QuorumHash, p); err != nil { t.Fatal("failed to sign bad proposal", err) @@ -447,26 +442,15 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal.Signature = p.Signature // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs = css[0].GetRoundState() - signAddVotes( - sim.Config, - css[0], - tmproto.PrevoteType, - rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), - vss[1:nVals]...) - signAddVotes( - sim.Config, - css[0], - tmproto.PrecommitType, - rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), + signAddVotes(ctx, t, css[0], tmproto.PrecommitType, sim.Config.ChainID(), + types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}, vss[1:nVals]...) - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) // HEIGHT 4 // 1 new validator comes in here from block 2 @@ -475,17 +459,19 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { height++ incrementHeight(vss...) - err = assertMempool(css[0].txNotifier).CheckTx(ctx, hvsu4.tx, nil, mempl.TxInfo{}) + err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, hvsu4.tx, nil, mempool.TxInfo{}) assert.Nil(t, err) - propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) - propBlockParts = propBlock.MakePartSet(partSize) + propBlock, err = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) + require.NoError(t, err) + propBlockParts, err = propBlock.MakePartSet(partSize) + require.NoError(t, err) require.Equal(t, 1, len(propBlock.Txs), "there should be 1 transaction") require.Equal(t, hvsu4.tx, []byte(propBlock.Txs[0])) blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - vssProposer := findProposer(vss, css[0].Validators.Proposer.ProTxHash) - proposal = types.NewProposal(vss[3].Height, 1, round, -1, blockID) + vssProposer := findProposer(ctx, t, vss, css[0].Validators.Proposer.ProTxHash) + proposal = types.NewProposal(vss[3].Height, 1, round, -1, blockID, propBlock.Header.Time) p = proposal.ToProto() if _, err := vssProposer.SignProposal(ctx, cfg.ChainID(), genDoc.QuorumType, hvsu2.quorumHash, p); err != nil { t.Fatal("failed to sign bad proposal", err) @@ -493,13 +479,12 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal.Signature = p.Signature // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - ensureNewProposal(proposalCh, height, round) - rs = css[0].GetRoundState() - vssForSigning := vss[0 : nVals+1] - sort.Sort(ValidatorStubsByPower(vssForSigning)) + ensureNewProposal(t, proposalCh, height, round) + vssForSigning := vss[:nVals+1] + vssForSigning = sortVValidatorStubsByPower(ctx, t, vssForSigning) valIndexFn := func(cssIdx int) int { for i, vs := range vssForSigning { @@ -513,10 +498,12 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { return i } } - panic(fmt.Sprintf("validator css[%d] not found in newVss", cssIdx)) + t.Fatalf("validator css[%d] not found in newVss", cssIdx) + return -1 } selfIndex := valIndexFn(0) + require.NotEqual(t, -1, selfIndex) // A new validator should come in for i := 0; i < nVals+1; i++ { @@ -524,37 +511,25 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { continue } signAddVotes( - sim.Config, - css[0], - tmproto.PrevoteType, - rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), - vssForSigning[i], - ) - } - for i := 0; i < nVals+1; i++ { - if i == selfIndex { - continue - } - signAddVotes( - sim.Config, + ctx, t, css[0], tmproto.PrecommitType, - rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), + sim.Config.ChainID(), + blockID, vssForSigning[i], ) } - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) // HEIGHT 5 height++ incrementHeight(vss...) - propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) - propBlockParts = propBlock.MakePartSet(partSize) + propBlock, _ = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) + propBlockParts, err = propBlock.MakePartSet(partSize) + require.NoError(t, err) blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal = types.NewProposal(vss[2].Height, 1, round, -1, blockID) + proposal = types.NewProposal(vss[2].Height, 1, round, -1, blockID, propBlock.Header.Time) p = proposal.ToProto() proposerProTxHash := css[0].RoundState.Validators.GetProposer().ProTxHash valIndexFnByProTxHash := func(proTxHash crypto.ProTxHash) int { @@ -575,38 +550,24 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal.Signature = p.Signature // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - ensureNewProposal(proposalCh, height, round) - rs = css[0].GetRoundState() + ensureNewProposal(t, proposalCh, height, round) for i := 0; i < nVals+1; i++ { if i == selfIndex { continue } signAddVotes( - sim.Config, - css[0], - tmproto.PrevoteType, - rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), - vssForSigning[i], - ) - } - for i := 0; i < nVals+1; i++ { - if i == selfIndex { - continue - } - signAddVotes( - sim.Config, + ctx, t, css[0], tmproto.PrecommitType, - rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), + sim.Config.ChainID(), + blockID, vssForSigning[i], ) } - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) // HEIGHT 6 hvsu6, err := sqm.remValidators(height, 1) @@ -614,21 +575,22 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { height++ incrementHeight(vss...) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), hvsu6.tx, nil, mempl.TxInfo{}) + err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, hvsu6.tx, nil, mempool.TxInfo{}) require.NoError(t, err) - propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) - propBlockParts = propBlock.MakePartSet(partSize) + propBlock, _ = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) + propBlockParts, err = propBlock.MakePartSet(partSize) + require.NoError(t, err) blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal = types.NewProposal(vss[2].Height, 1, round, -1, blockID) + proposal = types.NewProposal(vss[2].Height, 1, round, -1, blockID, propBlock.Header.Time) p = proposal.ToProto() proposer := css[0].RoundState.Validators.GetProposer() proposerProTxHash = proposer.ProTxHash proposerPubKey := proposer.PubKey valIndexFnByProTxHash = func(proTxHash crypto.ProTxHash) int { for i, vs := range vss { - vsProTxHash, err := vs.GetProTxHash(context.Background()) + vsProTxHash, err := vs.GetProTxHash(ctx) require.NoError(t, err) if bytes.Equal(vsProTxHash, proposerProTxHash) { @@ -645,7 +607,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { signID, err := vss[proposerIndex].SignProposal( - context.Background(), + ctx, cfg.ChainID(), genDoc.QuorumType, validatorsAtProposalHeight.QuorumHash, @@ -669,7 +631,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { t.Fatal("wrong proposer pubKey", err) } - css[0].Logger.Debug( + css[0].logger.Debug( "signed proposal", "height", proposal.Height, "round", proposal.Round, @@ -684,55 +646,42 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal.Signature = p.Signature // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) vssForSigning = vss[0 : nVals+3] - sort.Sort(ValidatorStubsByPower(vssForSigning)) + vssForSigning = sortVValidatorStubsByPower(ctx, t, vssForSigning) selfIndex = valIndexFn(0) // All validators should be in now - rs = css[0].GetRoundState() for i := 0; i < nVals+3; i++ { if i == selfIndex { continue } signAddVotes( - sim.Config, - css[0], - tmproto.PrevoteType, - rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), - vssForSigning[i], - ) - } - for i := 0; i < nVals+3; i++ { - if i == selfIndex { - continue - } - signAddVotes( - sim.Config, + ctx, t, css[0], tmproto.PrecommitType, - rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), + sim.Config.ChainID(), + blockID, vssForSigning[i], ) } - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) // HEIGHT 7 height++ incrementHeight(vss...) - propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) - propBlockParts = propBlock.MakePartSet(partSize) + propBlock, _ = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) + propBlockParts, err = propBlock.MakePartSet(partSize) + require.NoError(t, err) blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal = types.NewProposal(vss[2].Height, 1, round, -1, blockID) + proposal = types.NewProposal(vss[2].Height, 1, round, -1, blockID, propBlock.Header.Time) p = proposal.ToProto() proposerProTxHash = css[0].RoundState.Validators.GetProposer().ProTxHash proposerIndex = valIndexFnByProTxHash(proposerProTxHash) @@ -749,12 +698,12 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal.Signature = p.Signature // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } selfIndex = valIndexFn(0) - ensureNewProposal(proposalCh, height, round) - rs = css[0].GetRoundState() + require.NotEqual(t, -1, selfIndex) + ensureNewProposal(t, proposalCh, height, round) // Still have 7 validators for i := 0; i < nVals+3; i++ { @@ -762,28 +711,15 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { continue } signAddVotes( - sim.Config, - css[0], - tmproto.PrevoteType, - rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), - vssForSigning[i], - ) - } - for i := 0; i < nVals+3; i++ { - if i == selfIndex { - continue - } - signAddVotes( - sim.Config, + ctx, t, css[0], tmproto.PrecommitType, - rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), + sim.Config.ChainID(), + blockID, vssForSigning[i], ) } - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) // HEIGHT 8 proTxHashToRemove := hvsu6.ProTxHashes[len(hvsu6.ProTxHashes)-1] @@ -792,14 +728,15 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { height++ incrementHeight(vss...) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), hvsu8.tx, nil, mempl.TxInfo{}) + err = assertMempool(t, css[0].txNotifier).CheckTx(context.Background(), hvsu8.tx, nil, mempool.TxInfo{}) assert.Nil(t, err) - propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) - propBlockParts = propBlock.MakePartSet(partSize) + propBlock, _ = css[0].createProposalBlock(ctx) // changeProposer(t, cs1, vs2) + propBlockParts, err = propBlock.MakePartSet(partSize) + require.NoError(t, err) blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal = types.NewProposal(vss[5].Height, 1, round, -1, blockID) + proposal = types.NewProposal(vss[5].Height, 1, round, -1, blockID, propBlock.Header.Time) p = proposal.ToProto() proposer = css[0].RoundState.Validators.GetProposer() proposerProTxHash = proposer.ProTxHash @@ -825,7 +762,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { //t.Fatal("wrong proposer pubKey", err) }*/ - css[0].Logger.Debug( + css[0].logger.Debug( "signed proposal", "height", proposal.Height, "round", proposal.Round, "proposer", proposerProTxHash.ShortString(), "signature", p.Signature, "pubkey", proposerPubKey.Bytes(), "quorum type", @@ -835,44 +772,31 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal.Signature = p.Signature // set the proposal block - if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { + if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - ensureNewProposal(proposalCh, height, round) - rs = css[0].GetRoundState() + ensureNewProposal(t, proposalCh, height, round) // Reflect the changes to vss[nVals] at height 3 and resort newVss. vssForSigning = vss[0 : nVals+3] - sort.Sort(ValidatorStubsByPower(vssForSigning)) + sortVValidatorStubsByPower(ctx, t, vssForSigning) vssForSigning = vssForSigning[0 : nVals+2] selfIndex = valIndexFn(0) + for i := 0; i < nVals+2; i++ { if i == selfIndex { continue } signAddVotes( - sim.Config, - css[0], - tmproto.PrevoteType, - rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), - vssForSigning[i], - ) - } - for i := 0; i < nVals+2; i++ { - if i == selfIndex { - continue - } - signAddVotes( - sim.Config, + ctx, t, css[0], tmproto.PrecommitType, - rs.ProposalBlock.Hash(), - rs.ProposalBlockParts.Header(), + sim.Config.ChainID(), + blockID, vssForSigning[i], ) } - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) sim.Chain = make([]*types.Block, 0) sim.Commits = make([]*types.Commit, 0) @@ -880,137 +804,94 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i))) sim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i))) } - if sim.CleanupFunc != nil { - t.Cleanup(sim.CleanupFunc) - } return sim } // Sync from scratch func TestHandshakeReplayAll(t *testing.T) { - sim := setupSimulator(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) + + t.Cleanup(leaktest.Check(t)) for _, m := range modes { - testHandshakeReplay(t, sim, 0, m, false) + testHandshakeReplay(ctx, t, sim, 0, m, false) } for _, m := range modes { - testHandshakeReplay(t, sim, 0, m, true) + testHandshakeReplay(ctx, t, sim, 0, m, true) } } // Sync many, not from scratch func TestHandshakeReplaySome(t *testing.T) { - sim := setupSimulator(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) + + t.Cleanup(leaktest.Check(t)) for _, m := range modes { - testHandshakeReplay(t, sim, 2, m, false) + testHandshakeReplay(ctx, t, sim, 2, m, false) } for _, m := range modes { - testHandshakeReplay(t, sim, 2, m, true) + testHandshakeReplay(ctx, t, sim, 2, m, true) } } // Sync from lagging by one func TestHandshakeReplayOne(t *testing.T) { - sim := setupSimulator(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) for _, m := range modes { - testHandshakeReplay(t, sim, numBlocks-1, m, false) + testHandshakeReplay(ctx, t, sim, numBlocks-1, m, false) } for _, m := range modes { - testHandshakeReplay(t, sim, numBlocks-1, m, true) + testHandshakeReplay(ctx, t, sim, numBlocks-1, m, true) } } // Sync from caught up func TestHandshakeReplayNone(t *testing.T) { - sim := setupSimulator(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sim := setupSimulator(ctx, t) + + t.Cleanup(leaktest.Check(t)) for _, m := range modes { - testHandshakeReplay(t, sim, numBlocks, m, false) + testHandshakeReplay(ctx, t, sim, numBlocks, m, false) } for _, m := range modes { - testHandshakeReplay(t, sim, numBlocks, m, true) + testHandshakeReplay(ctx, t, sim, numBlocks, m, true) } } -// Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx -func TestMockProxyApp(t *testing.T) { - sim := setupSimulator(t) // setup config and simulator - cfg := sim.Config - assert.NotNil(t, cfg) - - logger := log.TestingLogger() - var validTxs, invalidTxs = 0, 0 - txIndex := 0 - - assert.NotPanics(t, func() { - abciResWithEmptyDeliverTx := new(tmstate.ABCIResponses) - abciResWithEmptyDeliverTx.DeliverTxs = make([]*abci.ResponseDeliverTx, 0) - abciResWithEmptyDeliverTx.DeliverTxs = append( - abciResWithEmptyDeliverTx.DeliverTxs, - &abci.ResponseDeliverTx{}, - ) - - // called when saveABCIResponses: - bytes, err := proto.Marshal(abciResWithEmptyDeliverTx) - require.NoError(t, err) - loadedAbciRes := new(tmstate.ABCIResponses) - - // this also happens sm.LoadABCIResponses - err = proto.Unmarshal(bytes, loadedAbciRes) - require.NoError(t, err) - - mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes) - - abciRes := new(tmstate.ABCIResponses) - abciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs)) - // Execute transactions and get hash. - proxyCb := func(req *abci.Request, res *abci.Response) { - if r, ok := res.Value.(*abci.Response_DeliverTx); ok { - // TODO: make use of res.Log - // TODO: make use of this info - // Blocks may include invalid txs. - txRes := r.DeliverTx - if txRes.Code == abci.CodeTypeOK { - validTxs++ - } else { - logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log) - invalidTxs++ - } - abciRes.DeliverTxs[txIndex] = txRes - txIndex++ - } - } - mock.SetResponseCallback(proxyCb) +func tempWALWithData(t *testing.T, data []byte) string { + t.Helper() - someTx := []byte("tx") - _, err = mock.DeliverTxAsync(context.Background(), abci.RequestDeliverTx{Tx: someTx}) - assert.NoError(t, err) - }) - assert.True(t, validTxs == 1) - assert.True(t, invalidTxs == 0) -} + walFile, err := os.CreateTemp(t.TempDir(), "wal") + require.NoError(t, err, "failed to create temp WAL file") + t.Cleanup(func() { _ = os.RemoveAll(walFile.Name()) }) -func tempWALWithData(data []byte) string { - walFile, err := ioutil.TempFile("", "wal") - if err != nil { - panic(fmt.Sprintf("failed to create temp WAL file: %v", err)) - } _, err = walFile.Write(data) - if err != nil { - panic(fmt.Sprintf("failed to write to temp WAL file: %v", err)) - } - if err := walFile.Close(); err != nil { - panic(fmt.Sprintf("failed to close temp WAL file: %v", err)) - } + require.NoError(t, err, "failed to write to temp WAL file") + + require.NoError(t, walFile.Close(), "failed to close temp WAL file") return walFile.Name() } // Make some blocks. Start a fresh app and apply nBlocks blocks. // Then restart the app and sync it up with the remaining blocks func testHandshakeReplay( + rctx context.Context, t *testing.T, sim *simulatorTestSuite, nBlocks int, @@ -1024,10 +905,14 @@ func testHandshakeReplay( var genesisState sm.State var privVal types.PrivValidator + ctx, cancel := context.WithCancel(rctx) + t.Cleanup(cancel) + cfg := sim.Config + logger := log.NewNopLogger() if testValidatorsChange { - testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) + testConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%v_m", t.Name(), mode)) require.NoError(t, err) defer func() { _ = os.RemoveAll(testConfig.RootDir) }() stateDB = dbm.NewMemDB() @@ -1036,16 +921,16 @@ func testHandshakeReplay( cfg = sim.Config chain = append([]*types.Block{}, sim.Chain...) // copy chain commits = sim.Commits - store = newMockBlockStore(cfg, genesisState.ConsensusParams) + store = newMockBlockStore(t, cfg, genesisState.ConsensusParams) privVal, err = privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) } else { // test single node - testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode)) + testConfig, err := ResetConfig(t.TempDir(), fmt.Sprintf("%s_%v_s", t.Name(), mode)) require.NoError(t, err) defer func() { _ = os.RemoveAll(testConfig.RootDir) }() - walBody, err := WALWithNBlocks(t, numBlocks) + walBody, err := WALWithNBlocks(ctx, t, logger, numBlocks) require.NoError(t, err) - walFile := tempWALWithData(walBody) + walFile := tempWALWithData(t, walBody) cfg.Consensus.SetWalFile(walFile) privVal, err = privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) @@ -1056,24 +941,17 @@ func testHandshakeReplay( t.Error(err) } - wal, err := NewWAL(walFile) + wal, err := NewWAL(ctx, logger, walFile) require.NoError(t, err) - wal.SetLogger(log.TestingLogger()) - err = wal.Start() + err = wal.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := wal.Stop(); err != nil { - t.Error(err) - } - }) - chain, commits, err = makeBlockchainFromWAL(wal, gdoc) - require.NoError(t, err) - pubKey, err := privVal.GetPubKey(context.Background(), gdoc.QuorumHash) + t.Cleanup(func() { cancel(); wal.Wait() }) + chain, commits = makeBlockchainFromWAL(t, wal, gdoc) + pubKey, err := privVal.GetPubKey(ctx, gdoc.QuorumHash) require.NoError(t, err) - stateDB, genesisState, store = stateAndStore(cfg, pubKey, kvstore.ProtocolVersion) - + stateDB, genesisState, store = stateAndStore(t, cfg, pubKey, kvstore.ProtocolVersion) } - proTxHash, err := privVal.GetProTxHash(context.Background()) + proTxHash, err := privVal.GetProTxHash(ctx) require.NoError(t, err) stateStore := sm.NewStore(stateDB) @@ -1084,7 +962,10 @@ func testHandshakeReplay( firstValidatorProTxHash, _ := state.Validators.GetByIndex(0) // run the chain through state.ApplyBlock to build up the tendermint state state = buildTMStateFromChain( + ctx, + t, cfg, + logger, sim.Mempool, sim.Evpool, stateStore, @@ -1097,21 +978,20 @@ func testHandshakeReplay( ) latestAppHash := state.AppHash - // make a new client creator - kvstoreApp := kvstore.NewPersistentKVStoreApplication( - filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_a_r%d", nBlocks, mode, rand.Int()))) - t.Cleanup(func() { require.NoError(t, kvstoreApp.Close()) }) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) - clientCreator2 := abciclient.NewLocalCreator(kvstoreApp) + client := abciclient.NewLocalClient(logger, kvstore.NewApplication()) if nBlocks > 0 { // run nBlocks against a new client to build up the app state. // use a throwaway tendermint state - proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics()) + proxyApp := proxy.New(client, logger, proxy.NopMetrics()) stateDB1 := dbm.NewMemDB() stateStore := sm.NewStore(stateDB1) err := stateStore.Save(genesisState) require.NoError(t, err) buildAppStateFromChain( + ctx, t, proxyApp, stateStore, firstValidatorProTxHash, @@ -1119,6 +999,7 @@ func testHandshakeReplay( sim.Evpool, genesisState, chain, + eventBus, nBlocks, mode, store, @@ -1135,36 +1016,33 @@ func testHandshakeReplay( } // now start the app using the handshake - it should sync - genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) + genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) + require.NoError(t, err) handshaker := NewHandshaker( + logger, stateStore, state, store, + eventBus, genDoc, proTxHash, cfg.Consensus.AppHashSize, ) - proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics()) - if err := proxyApp.Start(); err != nil { - t.Fatalf("Error starting proxy app connections: %v", err) - } - - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) + proxyApp := proxy.New(client, logger, proxy.NopMetrics()) + require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections") + require.True(t, proxyApp.IsRunning()) + require.NotNil(t, proxyApp) + t.Cleanup(func() { cancel(); proxyApp.Wait() }) - _, err = handshaker.Handshake(proxyApp) + _, err = handshaker.Handshake(ctx, proxyApp) if expectError { require.Error(t, err) return - } else if err != nil { - t.Fatalf("Error on abci handshake: %v", err) } + require.NoError(t, err, "Error on abci handshake") // get the latest app hash from the app - res, err := proxyApp.Query().InfoSync(context.Background(), abci.RequestInfo{Version: ""}) + res, err := proxyApp.Info(ctx, &abci.RequestInfo{Version: ""}) if err != nil { t.Fatal(err) } @@ -1194,88 +1072,95 @@ func testHandshakeReplay( } func applyBlock( + ctx context.Context, + t *testing.T, stateStore sm.Store, - mempool mempl.Mempool, + mempool mempool.Mempool, evpool sm.EvidencePool, st sm.State, nodeProTxHash crypto.ProTxHash, blk *types.Block, - proxyApp proxy.AppConns, + appClient abciclient.Client, blockStore *mockBlockStore, + eventBus *eventbus.EventBus, ) sm.State { testPartSize := types.BlockPartSizeBytes blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), - proxyApp.Consensus(), - proxyApp.Query(), + log.NewNopLogger(), + appClient, mempool, evpool, blockStore, - nil, + eventBus, + sm.NopMetrics(), ) - blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: blk.MakePartSet(testPartSize).Header()} - newState, err := blockExec.ApplyBlock(st, nodeProTxHash, blkID, blk) - if err != nil { - panic(err) - } + bps, err := blk.MakePartSet(testPartSize) + require.NoError(t, err) + blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: bps.Header()} + newState, err := blockExec.ApplyBlock(ctx, st, nodeProTxHash, blkID, blk) + require.NoError(t, err) return newState } func buildAppStateFromChain( - proxyApp proxy.AppConns, + ctx context.Context, + t *testing.T, + appClient abciclient.Client, stateStore sm.Store, nodeProTxHash crypto.ProTxHash, - mempool mempl.Mempool, + mempool mempool.Mempool, evpool sm.EvidencePool, state sm.State, chain []*types.Block, + eventBus *eventbus.EventBus, nBlocks int, mode uint, - blockStore *mockBlockStore) { + blockStore *mockBlockStore, +) { + t.Helper() // start a new app without handshake, play nBlocks blocks - if err := proxyApp.Start(); err != nil { - panic(err) - } - defer proxyApp.Stop() //nolint:errcheck // ignore + require.NoError(t, appClient.Start(ctx)) state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) - if _, err := proxyApp.Consensus().InitChainSync(context.Background(), abci.RequestInitChain{ + _, err := appClient.InitChain(ctx, &abci.RequestInitChain{ ValidatorSet: &validators, - }); err != nil { - panic(err) - } - if err := stateStore.Save(state); err != nil { // save height 1's validatorsInfo - panic(err) - } + }) + require.NoError(t, err) + + require.NoError(t, stateStore.Save(state)) // save height 1's validatorsInfo + switch mode { case 0: for i := 0; i < nBlocks; i++ { block := chain[i] - state = applyBlock(stateStore, mempool, evpool, state, nodeProTxHash, block, proxyApp, blockStore) + state = applyBlock(ctx, t, stateStore, mempool, evpool, state, nodeProTxHash, block, appClient, blockStore, eventBus) } case 1, 2, 3: for i := 0; i < nBlocks-1; i++ { block := chain[i] - state = applyBlock(stateStore, mempool, evpool, state, nodeProTxHash, block, proxyApp, blockStore) + state = applyBlock(ctx, t, stateStore, mempool, evpool, state, nodeProTxHash, block, appClient, blockStore, eventBus) } if mode == 2 || mode == 3 { // update the kvstore height and apphash // as if we ran commit but not - state = applyBlock(stateStore, mempool, evpool, state, nodeProTxHash, chain[nBlocks-1], proxyApp, blockStore) + state = applyBlock(ctx, t, stateStore, mempool, evpool, state, nodeProTxHash, chain[nBlocks-1], appClient, blockStore, eventBus) } default: - panic(fmt.Sprintf("unknown mode %v", mode)) + require.Fail(t, "unknown mode %v", mode) } } func buildTMStateFromChain( + ctx context.Context, + t *testing.T, cfg *config.Config, - mempool mempl.Mempool, + logger log.Logger, + mempool mempool.Mempool, evpool sm.EvidencePool, stateStore sm.Store, nodeProTxHash crypto.ProTxHash, @@ -1283,108 +1168,112 @@ func buildTMStateFromChain( chain []*types.Block, nBlocks int, mode uint, - blockStore *mockBlockStore) sm.State { + blockStore *mockBlockStore, +) sm.State { + t.Helper() + // run the whole chain against this client to build up the tendermint state - kvstoreApp := kvstore.NewPersistentKVStoreApplication( - filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))) - defer kvstoreApp.Close() - clientCreator := abciclient.NewLocalCreator(kvstoreApp) + client := abciclient.NewLocalClient(logger, kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) - if err := proxyApp.Start(); err != nil { - panic(err) - } - defer proxyApp.Stop() //nolint:errcheck + proxyApp := proxy.New(client, logger, proxy.NopMetrics()) + require.NoError(t, proxyApp.Start(ctx)) state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version validators := types.TM2PB.ValidatorUpdates(state.Validators) - if _, err := proxyApp.Consensus().InitChainSync(context.Background(), abci.RequestInitChain{ + _, err := proxyApp.InitChain(ctx, &abci.RequestInitChain{ ValidatorSet: &validators, - }); err != nil { - panic(err) - } - if err := stateStore.Save(state); err != nil { // save height 1's validatorsInfo - panic(err) - } + }) + require.NoError(t, err) + + require.NoError(t, stateStore.Save(state)) + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + switch mode { case 0: // sync right up for _, block := range chain { - state = applyBlock(stateStore, mempool, evpool, state, nodeProTxHash, block, proxyApp, blockStore) + state = applyBlock(ctx, t, stateStore, mempool, evpool, state, nodeProTxHash, block, proxyApp, blockStore, eventBus) } case 1, 2, 3: // sync up to the penultimate as if we stored the block. // whether we commit or not depends on the appHash for _, block := range chain[:len(chain)-1] { - state = applyBlock(stateStore, mempool, evpool, state, nodeProTxHash, block, proxyApp, blockStore) + state = applyBlock(ctx, t, stateStore, mempool, evpool, state, nodeProTxHash, block, proxyApp, blockStore, eventBus) } // apply the final block to a state copy so we can // get the right next appHash but keep the state back - applyBlock(stateStore, mempool, evpool, state, nodeProTxHash, chain[len(chain)-1], proxyApp, blockStore) + applyBlock(ctx, t, stateStore, mempool, evpool, state, nodeProTxHash, chain[len(chain)-1], proxyApp, blockStore, eventBus) default: - panic(fmt.Sprintf("unknown mode %v", mode)) + require.Fail(t, "unknown mode %v", mode) } return state } -func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { +func TestHandshakeErrorsIfAppReturnsWrongAppHash(t *testing.T) { // 1. Initialize tendermint and commit 3 blocks with the following app hashes: // - 0x01 // - 0x02 // - 0x03 - cfg, err := ResetConfig("handshake_test_") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := ResetConfig(t.TempDir(), "handshake_test_") require.NoError(t, err) t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) const appVersion = 0x0 - quorumHash, err := privVal.GetFirstQuorumHash(context.Background()) + quorumHash, err := privVal.GetFirstQuorumHash(ctx) require.NoError(t, err) - pubKey, err := privVal.GetPubKey(context.Background(), quorumHash) + pubKey, err := privVal.GetPubKey(ctx, quorumHash) require.NoError(t, err) - proTxHash, err := privVal.GetProTxHash(context.Background()) + proTxHash, err := privVal.GetProTxHash(ctx) require.NoError(t, err) - stateDB, state, store := stateAndStore(cfg, pubKey, appVersion) + stateDB, state, store := stateAndStore(t, cfg, pubKey, appVersion) stateStore := sm.NewStore(stateDB) - genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) + genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) + require.NoError(t, err) state.LastValidators = state.Validators.Copy() // mode = 0 for committing all the blocks - blocks, err := sf.MakeBlocks(3, &state, privVal) - require.NoError(t, err) + blocks := sf.MakeBlocks(ctx, t, 3, &state, privVal) + store.chain = blocks + logger := log.NewNopLogger() + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + // 2. Tendermint must panic if app returns wrong hash for the first block // - RANDOM HASH // - 0x02 // - 0x03 { app := &badApp{numBlocks: 3, allHashesAreWrong: true} - clientCreator := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) - err := proxyApp.Start() + client := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(client, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); proxyApp.Wait() }) - assert.Panics(t, func() { - h := NewHandshaker( - stateStore, - state, - store, - genDoc, - proTxHash, - cfg.Consensus.AppHashSize, - ) - if _, err = h.Handshake(proxyApp); err != nil { - t.Log(err) - } - }) + h := NewHandshaker( + logger, + stateStore, + state, + store, + eventBus, + genDoc, + proTxHash, + cfg.Consensus.AppHashSize, + ) + _, err = h.Handshake(ctx, proxyApp) + assert.Error(t, err) } // 3. Tendermint must panic if app returns wrong hash for the last block @@ -1393,29 +1282,23 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { // - RANDOM HASH { app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true} - clientCreator := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) - err := proxyApp.Start() + client := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(client, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { cancel(); proxyApp.Wait() }) - assert.Panics(t, func() { - h := NewHandshaker( - stateStore, - state, - store, - genDoc, - proTxHash, - cfg.Consensus.AppHashSize, - ) - if _, err = h.Handshake(proxyApp); err != nil { - t.Log(err) - } - }) + h := NewHandshaker( + logger, + stateStore, + state, store, + eventBus, + genDoc, + proTxHash, + cfg.Consensus.AppHashSize, + ) + _, err = h.Handshake(ctx, proxyApp) + require.Error(t, err) } } @@ -1427,18 +1310,16 @@ type badApp struct { onlyLastHashIsWrong bool } -func (app *badApp) Commit() abci.ResponseCommit { +func (app *badApp) Commit(context.Context) (*abci.ResponseCommit, error) { app.height++ if app.onlyLastHashIsWrong { if app.height == app.numBlocks { - return abci.ResponseCommit{Data: tmrand.Bytes(32)} - } - return abci.ResponseCommit{ - Data: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, app.height}, + return &abci.ResponseCommit{Data: tmrand.Bytes(32)}, nil } + return &abci.ResponseCommit{Data: []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, app.height}}, nil } else if app.allHashesAreWrong { - return abci.ResponseCommit{Data: tmrand.Bytes(32)} + return &abci.ResponseCommit{Data: tmrand.Bytes(32)}, nil } panic("either allHashesAreWrong or onlyLastHashIsWrong must be set") @@ -1447,17 +1328,14 @@ func (app *badApp) Commit() abci.ResponseCommit { //-------------------------- // utils for making blocks -func makeBlockchainFromWAL(wal WAL, genDoc *types.GenesisDoc) ([]*types.Block, []*types.Commit, error) { +func makeBlockchainFromWAL(t *testing.T, wal WAL, genDoc *types.GenesisDoc) ([]*types.Block, []*types.Commit) { + t.Helper() var height int64 // Search for height marker gr, found, err := wal.SearchForEndHeight(height, &WALSearchOptions{}) - if err != nil { - return nil, nil, err - } - if !found { - return nil, nil, fmt.Errorf("wal does not contain height %d", height) - } + require.NoError(t, err) + require.True(t, found, "wal does not contain height %d", height) defer gr.Close() // log.Notice("Build a blockchain by reading from the WAL") @@ -1474,9 +1352,8 @@ func makeBlockchainFromWAL(wal WAL, genDoc *types.GenesisDoc) ([]*types.Block, [ msg, err := dec.Decode() if err == io.EOF { break - } else if err != nil { - return nil, nil, err } + require.NoError(t, err) piece := readPieceFromWAL(msg) if piece == nil { @@ -1488,26 +1365,21 @@ func makeBlockchainFromWAL(wal WAL, genDoc *types.GenesisDoc) ([]*types.Block, [ // if its not the first one, we have a full block if thisBlockParts != nil { var pbb = new(tmproto.Block) - bz, err := ioutil.ReadAll(thisBlockParts.GetReader()) - if err != nil { - panic(err) - } - err = proto.Unmarshal(bz, pbb) - if err != nil { - panic(err) - } + bz, err := io.ReadAll(thisBlockParts.GetReader()) + require.NoError(t, err) + + require.NoError(t, proto.Unmarshal(bz, pbb)) + block, err := types.BlockFromProto(pbb) - if err != nil { - panic(err) - } + require.NoError(t, err) + + require.Equal(t, block.Height, height+1, + "read bad block from wal. got height %d, expected %d", block.Height, height+1) - if block.Height != height+1 { - panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1)) - } commitHeight := thisBlockCommit.Height - if commitHeight != height+1 { - panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) - } + require.Equal(t, commitHeight, height+1, + "commit doesnt match. got height %d, expected %d", commitHeight, height+1) + blocks = append(blocks, block) commits = append(commits, thisBlockCommit) height++ @@ -1516,9 +1388,7 @@ func makeBlockchainFromWAL(wal WAL, genDoc *types.GenesisDoc) ([]*types.Block, [ thisBlockParts = types.NewPartSetFromHeader(*p) case *types.Part: _, err := thisBlockParts.AddPart(p) - if err != nil { - return nil, nil, err - } + require.NoError(t, err) case *types.Vote: if p.Type == tmproto.PrecommitType { // previous block, needed to detemine StateID @@ -1536,37 +1406,22 @@ func makeBlockchainFromWAL(wal WAL, genDoc *types.GenesisDoc) ([]*types.Block, [ } } // grab the last block too - bz, err := ioutil.ReadAll(thisBlockParts.GetReader()) - if err != nil { - panic(err) - } + bz, err := io.ReadAll(thisBlockParts.GetReader()) + require.NoError(t, err) + var pbb = new(tmproto.Block) - err = proto.Unmarshal(bz, pbb) - if err != nil { - panic(err) - } + require.NoError(t, proto.Unmarshal(bz, pbb)) + block, err := types.BlockFromProto(pbb) - if err != nil { - panic(err) - } - if block.Height != height+1 { - panic( - fmt.Sprintf( - "read bad block from wal. got height %d, expected %d", - block.Height, - height+1, - ), - ) - } + require.NoError(t, err) + + require.Equal(t, block.Height, height+1, "read bad block from wal. got height %d, expected %d", block.Height, height+1) commitHeight := thisBlockCommit.Height - if commitHeight != height+1 { - panic( - fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1), - ) - } + require.Equal(t, commitHeight, height+1, "commit does not match. got height %d, expected %d", commitHeight, height+1) + blocks = append(blocks, block) commits = append(commits, thisBlockCommit) - return blocks, commits, nil + return blocks, commits } func readPieceFromWAL(msg *TimedWALMessage) interface{} { @@ -1590,17 +1445,19 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { // fresh state and mock store func stateAndStore( + t *testing.T, cfg *config.Config, pubKey crypto.PubKey, - appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) { + appVersion uint64, +) (dbm.DB, sm.State, *mockBlockStore) { stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - state, _ := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) + state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) + require.NoError(t, err) state.Version.Consensus.App = appVersion - store := newMockBlockStore(cfg, state.ConsensusParams) - if err := stateStore.Save(state); err != nil { - panic(err) - } + store := newMockBlockStore(t, cfg, state.ConsensusParams) + require.NoError(t, stateStore.Save(state)) + return stateDB, state, store } @@ -1608,17 +1465,25 @@ func stateAndStore( // mock block store type mockBlockStore struct { - cfg *config.Config - params types.ConsensusParams - chain []*types.Block - commits []*types.Commit - base int64 + cfg *config.Config + params types.ConsensusParams + chain []*types.Block + commits []*types.Commit + base int64 + t *testing.T + coreChainLockedHeight uint32 } // TODO: NewBlockStore(db.NewMemDB) ... -func newMockBlockStore(cfg *config.Config, params types.ConsensusParams) *mockBlockStore { - return &mockBlockStore{cfg, params, nil, nil, 0, 1} +func newMockBlockStore(t *testing.T, cfg *config.Config, params types.ConsensusParams) *mockBlockStore { + return &mockBlockStore{ + cfg: cfg, + params: params, + t: t, + + coreChainLockedHeight: 1, + } } func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } @@ -1634,12 +1499,11 @@ func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { func (bs *mockBlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { return nil } func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { block := bs.chain[height-1] + bps, err := block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(bs.t, err) return &types.BlockMeta{ - BlockID: types.BlockID{ - Hash: block.Hash(), - PartSetHeader: block.MakePartSet(types.BlockPartSizeBytes).Header(), - }, - Header: block.Header, + BlockID: types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()}, + Header: block.Header, } } func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } @@ -1672,14 +1536,19 @@ func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) { // Test handshake/init chain func TestHandshakeUpdatesValidators(t *testing.T) { - cfg, err := ResetConfig("handshake_test_") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := ResetConfig(t.TempDir(), "handshake_test_") require.NoError(t, err) - defer os.RemoveAll(cfg.RootDir) + t.Cleanup(func() { _ = os.RemoveAll(cfg.RootDir) }) + privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) require.NoError(t, err) + logger := log.NewNopLogger() val, _ := randValidator() - randQuorumHash, err := privVal.GetFirstQuorumHash(context.Background()) + randQuorumHash, err := privVal.GetFirstQuorumHash(ctx) require.NoError(t, err) vals := types.NewValidatorSet( []*types.Validator{val}, @@ -1690,39 +1559,40 @@ func TestHandshakeUpdatesValidators(t *testing.T) { ) abciValidatorSetUpdates := types.TM2PB.ValidatorUpdates(vals) app := &initChainApp{vals: &abciValidatorSetUpdates} - clientCreator := abciclient.NewLocalCreator(app) + client := abciclient.NewLocalClient(logger, app) - pubKey, err := privVal.GetPubKey(context.Background(), randQuorumHash) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + pubKey, err := privVal.GetPubKey(ctx, randQuorumHash) require.NoError(t, err) - proTxHash, err := privVal.GetProTxHash(context.Background()) + proTxHash, err := privVal.GetProTxHash(ctx) require.NoError(t, err) - stateDB, state, store := stateAndStore(cfg, pubKey, 0x0) + stateDB, state, store := stateAndStore(t, cfg, pubKey, 0x0) stateStore := sm.NewStore(stateDB) oldValProTxHash := state.Validators.Validators[0].ProTxHash // now start the app using the handshake - it should sync - genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) + genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) + require.NoError(t, err) + handshaker := NewHandshaker( + logger, stateStore, state, store, + eventBus, genDoc, proTxHash, cfg.Consensus.AppHashSize, ) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) - if err := proxyApp.Start(); err != nil { - t.Fatalf("Error starting proxy app connections: %v", err) - } - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) - if _, err := handshaker.Handshake(proxyApp); err != nil { - t.Fatalf("Error on abci handshake: %v", err) - } + proxyApp := proxy.New(client, logger, proxy.NopMetrics()) + require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections") + + _, err = handshaker.Handshake(ctx, proxyApp) + require.NoError(t, err, "error on abci handshake") + // reload the state, check the validator set was updated state, err = stateStore.Load() require.NoError(t, err) @@ -1734,52 +1604,51 @@ func TestHandshakeUpdatesValidators(t *testing.T) { } func TestHandshakeInitialCoreLockHeight(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + const InitialCoreHeight uint32 = 12345 - config, err := ResetConfig("handshake_test_initial_core_lock_height") + logger := log.NewNopLogger() + conf, err := ResetConfig(t.TempDir(), "handshake_test_initial_core_lock_height") require.NoError(t, err) - defer os.RemoveAll(config.RootDir) + t.Cleanup(func() { _ = os.RemoveAll(conf.RootDir) }) - privVal, err := privval.LoadFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) + privVal, err := privval.LoadFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile()) require.NoError(t, err) - randQuorumHash, err := privVal.GetFirstQuorumHash(context.TODO()) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + randQuorumHash, err := privVal.GetFirstQuorumHash(ctx) require.NoError(t, err) app := &initChainApp{initialCoreHeight: InitialCoreHeight} - clientCreator := abciclient.NewLocalCreator(app) - require.NotNil(t, clientCreator) - proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) - require.NotNil(t, proxyApp) + client := abciclient.NewLocalClient(logger, app) - pubKey, err := privVal.GetPubKey(context.TODO(), randQuorumHash) + pubKey, err := privVal.GetPubKey(ctx, randQuorumHash) require.NoError(t, err) - proTxHash, err := privVal.GetProTxHash(context.TODO()) + proTxHash, err := privVal.GetProTxHash(ctx) require.NoError(t, err) - stateDB, state, store := stateAndStore(config, pubKey, 0x0) + stateDB, state, store := stateAndStore(t, conf, pubKey, 0x0) stateStore := sm.NewStore(stateDB) // now start the app using the handshake - it should sync - genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) + genDoc, _ := sm.MakeGenesisDocFromFile(conf.GenesisFile()) handshaker := NewHandshaker( + logger, stateStore, state, store, + eventBus, genDoc, proTxHash, - config.Consensus.AppHashSize, + conf.Consensus.AppHashSize, ) - if err := proxyApp.Start(); err != nil { - t.Fatalf("Error starting proxy app connections: %v", err) - } - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) - if _, err := handshaker.Handshake(proxyApp); err != nil { - t.Fatalf("Error on abci handshake: %v", err) - } + proxyApp := proxy.New(client, logger, proxy.NopMetrics()) + require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections") + _, err = handshaker.Handshake(ctx, proxyApp) + require.NoError(t, err, "error on abci handshake") // reload the state, check the validator set was updated state, err = stateStore.Load() @@ -1795,14 +1664,14 @@ type initChainApp struct { initialCoreHeight uint32 } -func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitChain { +func (ica *initChainApp) InitChain(_ context.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) { resp := abci.ResponseInitChain{ InitialCoreHeight: ica.initialCoreHeight, } if ica.vals != nil { resp.ValidatorSetUpdate = *ica.vals } - return resp + return &resp, nil } func randValidator() (*types.Validator, types.PrivValidator) { diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 1119164c49..e37ce96d79 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -3,11 +3,13 @@ package consensus import ( "bytes" "context" + "encoding/json" "errors" "fmt" "io/ioutil" "os" "runtime/debug" + "sync" "time" "github.com/gogo/protobuf/proto" @@ -15,12 +17,12 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" cstypes "github.com/tendermint/tendermint/internal/consensus/types" - "github.com/tendermint/tendermint/internal/libs/fail" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/jsontypes" + "github.com/tendermint/tendermint/internal/libs/autofile" sm "github.com/tendermint/tendermint/internal/state" tmbytes "github.com/tendermint/tendermint/libs/bytes" tmevents "github.com/tendermint/tendermint/libs/events" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" tmos "github.com/tendermint/tendermint/libs/os" @@ -41,7 +43,6 @@ var ( ErrInvalidProposalCoreHeight = errors.New("error invalid proposal core height") ErrInvalidProposalPOLRound = errors.New("error invalid proposal POL round") ErrAddingVote = errors.New("error adding vote") - ErrAddingCommit = errors.New("error adding commit") ErrSignatureFoundInPastBlocks = errors.New("found signature from the same key") errProTxHashIsNotSet = errors.New("protxhash is not set. Look for \"Can't get private validator protxhash\" errors") @@ -51,18 +52,49 @@ var msgQueueSize = 1000 // msgs from the reactor which may update the state type msgInfo struct { - Msg Message `json:"msg"` - PeerID types.NodeID `json:"peer_key"` + Msg Message + PeerID types.NodeID + ReceiveTime time.Time +} + +func (msgInfo) TypeTag() string { return "tendermint/wal/MsgInfo" } + +type msgInfoJSON struct { + Msg json.RawMessage `json:"msg"` + PeerID types.NodeID `json:"peer_key"` + ReceiveTime time.Time `json:"receive_time"` +} + +func (m msgInfo) MarshalJSON() ([]byte, error) { + msg, err := jsontypes.Marshal(m.Msg) + if err != nil { + return nil, err + } + return json.Marshal(msgInfoJSON{Msg: msg, PeerID: m.PeerID, ReceiveTime: m.ReceiveTime}) +} + +func (m *msgInfo) UnmarshalJSON(data []byte) error { + var msg msgInfoJSON + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + if err := jsontypes.Unmarshal(msg.Msg, &m.Msg); err != nil { + return err + } + m.PeerID = msg.PeerID + return nil } // internally generated messages which may update the state type timeoutInfo struct { - Duration time.Duration `json:"duration"` - Height int64 `json:"height"` + Duration time.Duration `json:"duration,string"` + Height int64 `json:"height,string"` Round int32 `json:"round"` Step cstypes.RoundStepType `json:"step"` } +func (timeoutInfo) TypeTag() string { return "tendermint/wal/TimeoutInfo" } + func (ti *timeoutInfo) String() string { return fmt.Sprintf("%v ; %d/%d %v", ti.Duration, ti.Height, ti.Round, ti.Step) } @@ -84,6 +116,7 @@ type evidencePool interface { // The internal state machine receives input from peers, the internal validator, and from a timer. type State struct { service.BaseService + logger log.Logger // config details config *config.ConsensusConfig @@ -93,6 +126,10 @@ type State struct { // store blocks and commits blockStore sm.BlockStore + stateStore sm.Store + initialStatePopulated bool + skipBootstrapping bool + // create and execute blocks blockExec *sm.BlockExecutor @@ -104,7 +141,7 @@ type State struct { evpool evidencePool // internal state - mtx tmsync.RWMutex + mtx sync.RWMutex cstypes.RoundState state sm.State // State until height-1. @@ -124,7 +161,7 @@ type State struct { // we use eventBus to trigger msg broadcasts in the reactor, // and to notify external subscribers, eg. through a websocket - eventBus *types.EventBus + eventBus *eventbus.EventBus // a Write-Ahead Log ensures we can recover from any kind of crash // and helps us avoid signing conflicting votes @@ -136,12 +173,9 @@ type State struct { nSteps int // some functions can be overwritten for testing - decideProposal func(height int64, round int32) - doPrevote func(height int64, round int32, allowOldBlocks bool) - setProposal func(proposal *types.Proposal) error - - // closed when we finish shutting down - done chan struct{} + decideProposal func(ctx context.Context, height int64, round int32) + doPrevote func(ctx context.Context, height int64, round int32, allowOldBlocks bool) + setProposal func(proposal *types.Proposal, t time.Time) error // synchronous pubsub between consensus state and reactor. // state only emits EventNewRoundStep, EventValidBlock, and EventVote @@ -160,48 +194,42 @@ type State struct { // StateOption sets an optional parameter on the State. type StateOption func(*State) -// NewState returns a new State without a logger set. -func NewState( - cfg *config.ConsensusConfig, - state sm.State, - blockExec *sm.BlockExecutor, - blockStore sm.BlockStore, - txNotifier txNotifier, - evpool evidencePool, - options ...StateOption, -) *State { - return NewStateWithLogger(cfg, state, blockExec, blockStore, txNotifier, evpool, nil, 0, options...) +// SkipStateStoreBootstrap is a state option forces the constructor to +// skip state bootstrapping during construction. +func SkipStateStoreBootstrap(sm *State) { + sm.skipBootstrapping = true } -// NewStateWithLogger returns a new State with the logger set. -func NewStateWithLogger( +// NewState returns a new State. +func NewState( + logger log.Logger, cfg *config.ConsensusConfig, - state sm.State, + store sm.Store, blockExec *sm.BlockExecutor, blockStore sm.BlockStore, txNotifier txNotifier, evpool evidencePool, - logger log.Logger, - proposedAppVersion uint64, + eventBus *eventbus.EventBus, options ...StateOption, -) *State { +) (*State, error) { cs := &State{ - config: cfg, - blockExec: blockExec, - blockStore: blockStore, - txNotifier: txNotifier, - peerMsgQueue: make(chan msgInfo, msgQueueSize), - internalMsgQueue: make(chan msgInfo, msgQueueSize), - timeoutTicker: NewTimeoutTicker(), - statsMsgQueue: make(chan msgInfo, msgQueueSize), - done: make(chan struct{}), - doWALCatchup: true, - wal: nilWAL{}, - evpool: evpool, - evsw: tmevents.NewEventSwitch(), - metrics: NopMetrics(), - proposedAppVersion: proposedAppVersion, - onStopCh: make(chan *cstypes.RoundState), + eventBus: eventBus, + logger: logger, + config: cfg, + blockExec: blockExec, + blockStore: blockStore, + stateStore: store, + txNotifier: txNotifier, + peerMsgQueue: make(chan msgInfo, msgQueueSize), + internalMsgQueue: make(chan msgInfo, msgQueueSize), + timeoutTicker: NewTimeoutTicker(logger), + statsMsgQueue: make(chan msgInfo, msgQueueSize), + doWALCatchup: true, + wal: nilWAL{}, + evpool: evpool, + evsw: tmevents.NewEventSwitch(), + metrics: NopMetrics(), + onStopCh: make(chan *cstypes.RoundState), } // set function defaults (may be overwritten before calling Start) @@ -209,33 +237,49 @@ func NewStateWithLogger( cs.doPrevote = cs.defaultDoPrevote cs.setProposal = cs.defaultSetProposal - // We have no votes, so reconstruct LastPrecommits from SeenCommit. - if state.LastBlockHeight > 0 { - cs.reconstructLastCommit(state) - } - - cs.updateToState(state, nil) - // NOTE: we do not call scheduleRound0 yet, we do that upon Start() cs.BaseService = *service.NewBaseService(logger, "State", cs) - for _, option := range options { option(cs) } - return cs + // this is not ideal, but it lets the consensus tests start + // node-fragments gracefully while letting the nodes + // themselves avoid this. + if !cs.skipBootstrapping { + if err := cs.updateStateFromStore(); err != nil { + return nil, err + } + } + + return cs, nil } -// SetLogger implements Service. -func (cs *State) SetLogger(l log.Logger) { - cs.BaseService.Logger = l - cs.timeoutTicker.SetLogger(l) +func (cs *State) SetProposedAppVersion(ver uint64) { + cs.proposedAppVersion = ver } -// SetEventBus sets event bus. -func (cs *State) SetEventBus(b *types.EventBus) { - cs.eventBus = b - cs.blockExec.SetEventBus(b) +func (cs *State) updateStateFromStore() error { + if cs.initialStatePopulated { + return nil + } + state, err := cs.stateStore.Load() + if err != nil { + return fmt.Errorf("loading state: %w", err) + } + if state.IsEmpty() { + return nil + } + + // We have no votes, so reconstruct LastPrecommits from SeenCommit. + if state.LastBlockHeight > 0 { + cs.reconstructLastCommit(state) + } + + cs.updateToState(state, nil) + + cs.initialStatePopulated = true + return nil } // StateMetrics sets the metrics. @@ -280,14 +324,14 @@ func (cs *State) GetRoundState() *cstypes.RoundState { func (cs *State) GetRoundStateJSON() ([]byte, error) { cs.mtx.RLock() defer cs.mtx.RUnlock() - return tmjson.Marshal(cs.RoundState) + return json.Marshal(cs.RoundState) } // GetRoundStateSimpleJSON returns a json of RoundStateSimple func (cs *State) GetRoundStateSimpleJSON() ([]byte, error) { cs.mtx.RLock() defer cs.mtx.RUnlock() - return tmjson.Marshal(cs.RoundState.RoundStateSimple()) + return json.Marshal(cs.RoundState.RoundStateSimple()) } // GetValidators returns a copy of the current validators. @@ -306,12 +350,12 @@ func (cs *State) GetValidatorSet() (int64, *types.ValidatorSet) { // SetPrivValidator sets the private validator account for signing votes. It // immediately requests pubkey and caches it. -func (cs *State) SetPrivValidator(priv types.PrivValidator) { +func (cs *State) SetPrivValidator(ctx context.Context, priv types.PrivValidator) { cs.mtx.Lock() defer cs.mtx.Unlock() if priv == nil { - cs.Logger.Error("attempting to set private validator to nil") + cs.logger.Error("attempting to set private validator to nil") } cs.privValidator = priv @@ -333,13 +377,13 @@ func (cs *State) SetPrivValidator(priv types.PrivValidator) { case *privval.DashCoreSignerClient: cs.privValidatorType = types.DashCoreRPCClient default: - cs.Logger.Error("unsupported priv validator type", "err", + cs.logger.Error("unsupported priv validator type", "err", fmt.Errorf("error privValidatorType %s", t)) } } - if err := cs.updatePrivValidatorProTxHash(); err != nil { - cs.Logger.Error("failed to get private validator protxhash", "err", err) + if err := cs.updatePrivValidatorProTxHash(ctx); err != nil { + cs.logger.Error("failed to get private validator protxhash", "err", err) } } @@ -372,15 +416,28 @@ func (cs *State) LoadCommit(height int64) *types.Commit { // OnStart loads the latest state via the WAL, and starts the timeout and // receive routines. -func (cs *State) OnStart() error { +func (cs *State) OnStart(ctx context.Context) error { + if err := cs.updateStateFromStore(); err != nil { + return err + } + // We may set the WAL in testing before calling Start, so only OpenWAL if its // still the nilWAL. if _, ok := cs.wal.(nilWAL); ok { - if err := cs.loadWalFile(); err != nil { + if err := cs.loadWalFile(ctx); err != nil { return err } } + // we need the timeoutRoutine for replay so + // we don't block on the tick chan. + // NOTE: we will get a build up of garbage go routines + // firing on the tockChan until the receiveRoutine is started + // to deal with them (by that point, at most one will be valid) + if err := cs.timeoutTicker.Start(ctx); err != nil { + return err + } + // We may have lost some votes if the process crashed reload from consensus // log to catchup. if cs.doWALCatchup { @@ -388,25 +445,23 @@ func (cs *State) OnStart() error { LOOP: for { - err := cs.catchupReplay(cs.Height) + err := cs.catchupReplay(ctx, cs.Height) switch { case err == nil: break LOOP case !IsDataCorruptionError(err): - cs.Logger.Error("error on catchup replay; proceeding to start state anyway", "err", err) + cs.logger.Error("error on catchup replay; proceeding to start state anyway", "err", err) break LOOP case repairAttempted: return err } - cs.Logger.Error("the WAL file is corrupted; attempting repair", "err", err) + cs.logger.Error("the WAL file is corrupted; attempting repair", "err", err) // 1) prep work - if err := cs.wal.Stop(); err != nil { - return err - } + cs.wal.Stop() repairAttempted = true @@ -416,38 +471,25 @@ func (cs *State) OnStart() error { return err } - cs.Logger.Debug("backed up WAL file", "src", cs.config.WalFile(), "dst", corruptedFile) + cs.logger.Debug("backed up WAL file", "src", cs.config.WalFile(), "dst", corruptedFile) // 3) try to repair (WAL file will be overwritten!) if err := repairWalFile(corruptedFile, cs.config.WalFile()); err != nil { - cs.Logger.Error("the WAL repair failed", "err", err) + cs.logger.Error("the WAL repair failed", "err", err) return err } - cs.Logger.Info("successful WAL repair") + cs.logger.Info("successful WAL repair") // reload WAL file - if err := cs.loadWalFile(); err != nil { + if err := cs.loadWalFile(ctx); err != nil { return err } } } - if err := cs.evsw.Start(); err != nil { - return err - } - - // we need the timeoutRoutine for replay so - // we don't block on the tick chan. - // NOTE: we will get a build up of garbage go routines - // firing on the tockChan until the receiveRoutine is started - // to deal with them (by that point, at most one will be valid) - if err := cs.timeoutTicker.Start(); err != nil { - return err - } - // now start the receiveRoutine - go cs.receiveRoutine(0) + go cs.receiveRoutine(ctx, 0) // schedule the first round! // use GetRoundState so we don't race the receiveRoutine for access @@ -458,21 +500,23 @@ func (cs *State) OnStart() error { // timeoutRoutine: receive requests for timeouts on tickChan and fire timeouts on tockChan // receiveRoutine: serializes processing of proposoals, block parts, votes; coordinates state transitions -func (cs *State) startRoutines(maxSteps int) { - err := cs.timeoutTicker.Start() +// +// this is only used in tests. +func (cs *State) startRoutines(ctx context.Context, maxSteps int) { + err := cs.timeoutTicker.Start(ctx) if err != nil { - cs.Logger.Error("failed to start timeout ticker", "err", err) + cs.logger.Error("failed to start timeout ticker", "err", err) return } - go cs.receiveRoutine(maxSteps) + go cs.receiveRoutine(ctx, maxSteps) } // loadWalFile loads WAL data from file. It overwrites cs.wal. -func (cs *State) loadWalFile() error { - wal, err := cs.OpenWAL(cs.config.WalFile()) +func (cs *State) loadWalFile(ctx context.Context) error { + wal, err := cs.OpenWAL(ctx, cs.config.WalFile()) if err != nil { - cs.Logger.Error("failed to load state WAL", "err", err) + cs.logger.Error("failed to load state WAL", "err", err) return err } @@ -480,50 +524,41 @@ func (cs *State) loadWalFile() error { return nil } +func (cs *State) getOnStopCh() chan *cstypes.RoundState { + cs.mtx.RLock() + defer cs.mtx.RUnlock() + + return cs.onStopCh +} + // OnStop implements service.Service. func (cs *State) OnStop() { - // If the node is committing a new block, wait until it is finished! if cs.GetRoundState().Step == cstypes.RoundStepApplyCommit { select { - case <-cs.onStopCh: - case <-time.After(cs.config.TimeoutCommit): - cs.Logger.Error("OnStop: timeout waiting for commit to finish", "time", cs.config.TimeoutCommit) + case <-cs.getOnStopCh(): + case <-time.After(cs.state.ConsensusParams.Timeout.Commit): + cs.logger.Error("OnStop: timeout waiting for commit to finish", "time", cs.state.ConsensusParams.Timeout.Commit) } } - close(cs.onStopCh) - - if err := cs.evsw.Stop(); err != nil { - cs.Logger.Error("failed trying to stop eventSwitch", "error", err) - } - - if err := cs.timeoutTicker.Stop(); err != nil { - cs.Logger.Error("failed trying to stop timeoutTicket", "error", err) + if cs.timeoutTicker.IsRunning() { + cs.timeoutTicker.Stop() } // WAL is stopped in receiveRoutine. } -// Wait waits for the the main routine to return. -// NOTE: be sure to Stop() the event switch and drain -// any event channels or this may deadlock -func (cs *State) Wait() { - <-cs.done -} - // OpenWAL opens a file to log all consensus messages and timeouts for // deterministic accountability. -func (cs *State) OpenWAL(walFile string) (WAL, error) { - wal, err := NewWAL(walFile) +func (cs *State) OpenWAL(ctx context.Context, walFile string) (WAL, error) { + wal, err := NewWAL(ctx, cs.logger.With("wal", walFile), walFile) if err != nil { - cs.Logger.Error("failed to open WAL", "file", walFile, "err", err) + cs.logger.Error("failed to open WAL", "file", walFile, "err", err) return nil, err } - wal.SetLogger(cs.Logger.With("wal", walFile)) - - if err := wal.Start(); err != nil { - cs.Logger.Error("failed to start WAL", "err", err) + if err := wal.Start(ctx); err != nil { + cs.logger.Error("failed to start WAL", "err", err) return nil, err } @@ -538,58 +573,85 @@ func (cs *State) OpenWAL(walFile string) (WAL, error) { // TODO: should these return anything or let callers just use events? // AddVote inputs a vote. -func (cs *State) AddVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) { +func (cs *State) AddVote(ctx context.Context, vote *types.Vote, peerID types.NodeID) error { if peerID == "" { - cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, "", tmtime.Now()}: + return nil + } } else { - cs.peerMsgQueue <- msgInfo{&VoteMessage{vote}, peerID} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.peerMsgQueue <- msgInfo{&VoteMessage{vote}, peerID, tmtime.Now()}: + return nil + } } // TODO: wait for event?! - return false, nil } // SetProposal inputs a proposal. -func (cs *State) SetProposal(proposal *types.Proposal, peerID types.NodeID) error { +func (cs *State) SetProposal(ctx context.Context, proposal *types.Proposal, peerID types.NodeID) error { if peerID == "" { - cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, "", tmtime.Now()}: + return nil + } } else { - cs.peerMsgQueue <- msgInfo{&ProposalMessage{proposal}, peerID} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.peerMsgQueue <- msgInfo{&ProposalMessage{proposal}, peerID, tmtime.Now()}: + return nil + } } // TODO: wait for event?! - return nil } // AddProposalBlockPart inputs a part of the proposal block. -func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID types.NodeID) error { - +func (cs *State) AddProposalBlockPart(ctx context.Context, height int64, round int32, part *types.Part, peerID types.NodeID) error { if peerID == "" { - cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, "", tmtime.Now()}: + return nil + } } else { - cs.peerMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, peerID} + select { + case <-ctx.Done(): + return ctx.Err() + case cs.peerMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, peerID, tmtime.Now()}: + return nil + } } // TODO: wait for event?! - return nil } // SetProposalAndBlock inputs the proposal and all block parts. func (cs *State) SetProposalAndBlock( + ctx context.Context, proposal *types.Proposal, block *types.Block, parts *types.PartSet, peerID types.NodeID, ) error { - if err := cs.SetProposal(proposal, peerID); err != nil { + if err := cs.SetProposal(ctx, proposal, peerID); err != nil { return err } for i := 0; i < int(parts.Total()); i++ { part := parts.GetPart(i) - if err := cs.AddProposalBlockPart(proposal.Height, proposal.Round, part, peerID); err != nil { + if err := cs.AddProposalBlockPart(ctx, proposal.Height, proposal.Round, part, peerID); err != nil { return err } } @@ -648,7 +710,7 @@ func (cs *State) updateRoundStep(round int32, step cstypes.RoundStepType) { // enterNewRound(height, 0) at cs.StartTime. func (cs *State) scheduleRound0(rs *cstypes.RoundState) { - // cs.Logger.Info("scheduleRound0", "now", tmtime.Now(), "startTime", cs.StartTime) + // cs.logger.Info("scheduleRound0", "now", tmtime.Now(), "startTime", cs.StartTime) sleepDuration := rs.StartTime.Sub(tmtime.Now()) cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight) } @@ -659,16 +721,22 @@ func (cs *State) scheduleTimeout(duration time.Duration, height int64, round int } // send a msg into the receiveRoutine regarding our own proposal, block part, or vote -func (cs *State) sendInternalMessage(mi msgInfo) { +func (cs *State) sendInternalMessage(ctx context.Context, mi msgInfo) { select { + case <-ctx.Done(): case cs.internalMsgQueue <- mi: default: // NOTE: using the go-routine means our votes can // be processed out of order. // TODO: use CList here for strict determinism and // attempt push to internalMsgQueue in receiveRoutine - cs.Logger.Debug("internal msg queue is full; using a go-routine") - go func() { cs.internalMsgQueue <- mi }() + cs.logger.Debug("internal msg queue is full; using a go-routine") + go func() { + select { + case <-ctx.Done(): + case cs.internalMsgQueue <- mi: + } + }() } } @@ -722,13 +790,11 @@ func (cs *State) updateToState(state sm.State, commit *types.Commit) { // signal the new round step, because other services (eg. txNotifier) // depend on having an up-to-date peer state! if state.LastBlockHeight <= cs.state.LastBlockHeight { - if cs.Logger != nil { - cs.Logger.Debug( - "ignoring updateToState()", - "new_height", state.LastBlockHeight+1, - "old_height", cs.state.LastBlockHeight+1, - ) - } + cs.logger.Debug( + "ignoring updateToState()", + "new_height", state.LastBlockHeight+1, + "old_height", cs.state.LastBlockHeight+1, + ) cs.newStep() return } @@ -772,9 +838,7 @@ func (cs *State) updateToState(state sm.State, commit *types.Commit) { height = state.InitialHeight } - if cs.Logger != nil { - cs.Logger.Debug("updating state height", "newHeight", height) - } + cs.logger.Debug("updating state height", "newHeight", height) // RoundState fields cs.updateHeight(height) @@ -786,22 +850,21 @@ func (cs *State) updateToState(state sm.State, commit *types.Commit) { // to be gathered for the first block. // And alternative solution that relies on clocks: // cs.StartTime = state.LastBlockTime.Add(timeoutCommit) - cs.StartTime = cs.config.Commit(tmtime.Now()) + cs.StartTime = cs.commitTime(tmtime.Now()) } else { - cs.StartTime = cs.config.Commit(cs.CommitTime) + cs.StartTime = cs.commitTime(cs.CommitTime) } if cs.Validators == nil || !bytes.Equal(cs.Validators.QuorumHash, validators.QuorumHash) { - if cs.Logger != nil { - cs.Logger.Info("Updating validators", "from", cs.Validators.BasicInfoString(), - "to", validators.BasicInfoString()) - } + cs.logger.Info("Updating validators", "from", cs.Validators.BasicInfoString(), + "to", validators.BasicInfoString()) } stateID := state.StateID() cs.Validators = validators cs.Proposal = nil + cs.ProposalReceiveTime = time.Time{} cs.ProposalBlock = nil cs.ProposalBlockParts = nil cs.LockedRound = -1 @@ -825,7 +888,7 @@ func (cs *State) updateToState(state sm.State, commit *types.Commit) { func (cs *State) newStep() { rs := cs.RoundStateEvent() if err := cs.wal.Write(rs); err != nil { - cs.Logger.Error("failed writing to WAL", "err", err) + cs.logger.Error("failed writing to WAL", "err", err) } cs.nSteps++ @@ -833,7 +896,7 @@ func (cs *State) newStep() { // newStep is called by updateToState in NewState before the eventBus is set! if cs.eventBus != nil { if err := cs.eventBus.PublishEventNewRoundStep(rs); err != nil { - cs.Logger.Error("failed publishing new round step", "err", err) + cs.logger.Error("failed publishing new round step", "err", err) } cs.evsw.FireEvent(types.EventNewRoundStepValue, &cs.RoundState) @@ -848,102 +911,117 @@ func (cs *State) newStep() { // It keeps the RoundState and is the only thing that updates it. // Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. // State must be locked before any internal state is updated. -func (cs *State) receiveRoutine(maxSteps int) { +func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) { onExit := func(cs *State) { // NOTE: the internalMsgQueue may have signed messages from our // priv_val that haven't hit the WAL, but its ok because // priv_val tracks LastSig // close wal now that we're done writing to it - if err := cs.wal.Stop(); err != nil { - cs.Logger.Error("failed trying to stop WAL", "error", err) - } - + cs.wal.Stop() cs.wal.Wait() - close(cs.done) } defer func() { if r := recover(); r != nil { - cs.Logger.Error("CONSENSUS FAILURE!!!", "err", r, "stack", string(debug.Stack())) - // stop gracefully - // - // NOTE: We most probably shouldn't be running any further when there is - // some unexpected panic. Some unknown error happened, and so we don't - // know if that will result in the validator signing an invalid thing. It - // might be worthwhile to explore a mechanism for manual resuming via - // some console or secure RPC system, but for now, halting the chain upon - // unexpected consensus bugs sounds like the better option. + cs.logger.Error("CONSENSUS FAILURE!!!", "err", r, "stack", string(debug.Stack())) + + // Make a best-effort attempt to close the WAL, but otherwise do not + // attempt to gracefully terminate. Once consensus has irrecoverably + // failed, any additional progress we permit the node to make may + // complicate diagnosing and recovering from the failure. onExit(cs) + + // There are a couple of cases where the we + // panic with an error from deeper within the + // state machine and in these cases, typically + // during a normal shutdown, we can continue + // with normal shutdown with safety. These + // cases are: + if err, ok := r.(error); ok { + // TODO(creachadair): In ordinary operation, the WAL autofile should + // never be closed. This only happens during shutdown and production + // nodes usually halt by panicking. Many existing tests, however, + // assume a clean shutdown is possible. Prior to #8111, we were + // swallowing the panic in receiveRoutine, making that appear to + // work. Filtering this specific error is slightly risky, but should + // affect only unit tests. In any case, not re-panicking here only + // preserves the pre-existing behavior for this one error type. + if errors.Is(err, autofile.ErrAutoFileClosed) { + return + } + + // don't re-panic if the panic is just an + // error and we're already trying to shut down + if ctx.Err() != nil { + return + + } + } + + // Re-panic to ensure the node terminates. + // + panic(r) } }() for { if maxSteps > 0 { if cs.nSteps >= maxSteps { - cs.Logger.Debug("reached max steps; exiting receive routine") + cs.logger.Debug("reached max steps; exiting receive routine") cs.nSteps = 0 return } } rs := cs.GetRoundState() - var mi msgInfo select { case <-cs.txNotifier.TxsAvailable(): - cs.handleTxsAvailable() + cs.handleTxsAvailable(ctx) - case mi = <-cs.peerMsgQueue: + case mi := <-cs.peerMsgQueue: if err := cs.wal.Write(mi); err != nil { - cs.Logger.Error("failed writing to WAL", "err", err) + cs.logger.Error("failed writing to WAL", "err", err) } - // handles proposals, block parts, votes // may generate internal events (votes, complete proposals, 2/3 majorities) - cs.handleMsg(mi, false) + cs.handleMsg(ctx, mi, false) - case mi = <-cs.internalMsgQueue: + case mi := <-cs.internalMsgQueue: err := cs.wal.WriteSync(mi) // NOTE: fsync if err != nil { - panic(fmt.Sprintf( - "failed to write %v msg to consensus WAL due to %v; check your file system and restart the node", + panic(fmt.Errorf( + "failed to write %v msg to consensus WAL due to %w; check your file system and restart the node", mi, err, )) } - if _, ok := mi.Msg.(*VoteMessage); ok { - // we actually want to simulate failing during - // the previous WriteSync, but this isn't easy to do. - // Equivalent would be to fail here and manually remove - // some bytes from the end of the wal. - fail.Fail() // XXX - } - // handles proposals, block parts, votes - cs.handleMsg(mi, false) + cs.handleMsg(ctx, mi, false) case ti := <-cs.timeoutTicker.Chan(): // tockChan: if err := cs.wal.Write(ti); err != nil { - cs.Logger.Error("failed writing to WAL", "err", err) + cs.logger.Error("failed writing to WAL", "err", err) } // if the timeout is relevant to the rs // go to the next step - cs.handleTimeout(ti, *rs) + cs.handleTimeout(ctx, ti, *rs) - case <-cs.Quit(): + case <-ctx.Done(): onExit(cs) return + } + // TODO should we handle context cancels here? } } // state transitions on complete-proposal, 2/3-any, 2/3-one -func (cs *State) handleMsg(mi msgInfo, fromReplay bool) { +func (cs *State) handleMsg(ctx context.Context, mi msgInfo, fromReplay bool) { cs.mtx.Lock() defer cs.mtx.Unlock() - var ( added bool err error @@ -954,17 +1032,41 @@ func (cs *State) handleMsg(mi msgInfo, fromReplay bool) { case *ProposalMessage: // will not cause transition. // once proposal is set, we can receive block parts - err = cs.setProposal(msg.Proposal) + err = cs.setProposal(msg.Proposal, mi.ReceiveTime) case *BlockPartMessage: + commitNotExist := cs.Commit == nil + // if the proposal is complete, we'll enterPrevote or tryFinalizeCommit - added, err = cs.addProposalBlockPart(msg, peerID, fromReplay) + added, err = cs.addProposalBlockPart(ctx, msg, peerID) + + // We unlock here to yield to any routines that need to read the the RoundState. + // Previously, this code held the lock from the point at which the final block + // part was received until the block executed against the application. + // This prevented the reactor from being able to retrieve the most updated + // version of the RoundState. The reactor needs the updated RoundState to + // gossip the now completed block. + // + // This code can be further improved by either always operating on a copy + // of RoundState and only locking when switching out State's copy of + // RoundState with the updated copy or by emitting RoundState events in + // more places for routines depending on it to listen for. + cs.mtx.Unlock() + + cs.mtx.Lock() + if added && commitNotExist && cs.ProposalBlockParts.IsComplete() { + cs.handleCompleteProposal(ctx, msg.Height, fromReplay) + } if added { - cs.statsMsgQueue <- mi + select { + case cs.statsMsgQueue <- mi: + case <-ctx.Done(): + return + } } if err != nil && msg.Round != cs.Round { - cs.Logger.Debug( + cs.logger.Debug( "received block part from wrong round", "height", cs.Height, "cs_round", cs.Round, @@ -974,7 +1076,7 @@ func (cs *State) handleMsg(mi msgInfo, fromReplay bool) { err = nil } - cs.Logger.Debug( + cs.logger.Debug( "received block part", "height", cs.Height, "round", cs.Round, @@ -989,18 +1091,20 @@ func (cs *State) handleMsg(mi msgInfo, fromReplay bool) { case *VoteMessage: // attempt to add the vote and dupeout the validator if its a duplicate signature // if the vote gives us a 2/3-any or 2/3-one, we transition - added, err = cs.tryAddVote(msg.Vote, peerID) + added, err = cs.tryAddVote(ctx, msg.Vote, peerID) if added { - cs.statsMsgQueue <- mi + select { + case cs.statsMsgQueue <- mi: + case <-ctx.Done(): + return + } } - // if err == ErrAddingVote { // TODO: punish peer // We probably don't want to stop the peer here. The vote does not // necessarily comes from a malicious peer but can be just broadcasted by // a typical peer. // https://github.com/tendermint/tendermint/issues/1281 - // } // NOTE: the vote is broadcast to peers by the reactor listening // for vote events @@ -1008,7 +1112,7 @@ func (cs *State) handleMsg(mi msgInfo, fromReplay bool) { // TODO: If rs.Height == vote.Height && rs.Round < vote.Round, // the peer is sending us CatchupCommit precommits. // We could make note of this and help filter in broadcastHasVoteMessage(). - cs.Logger.Debug( + cs.logger.Debug( "received vote", "height", cs.Height, "cs_round", cs.Round, @@ -1021,11 +1125,11 @@ func (cs *State) handleMsg(mi msgInfo, fromReplay bool) { case *CommitMessage: // attempt to add the commit and dupeout the validator if its a duplicate signature // if the vote gives us a 2/3-any or 2/3-one, we transition - added, err = cs.tryAddCommit(msg.Commit, peerID) + added, err = cs.tryAddCommit(ctx, msg.Commit, peerID) if added { cs.statsMsgQueue <- mi } - cs.Logger.Debug( + cs.logger.Debug( "received commit", "height", cs.Height, "cs_round", cs.Round, @@ -1036,12 +1140,12 @@ func (cs *State) handleMsg(mi msgInfo, fromReplay bool) { "error", err, ) default: - cs.Logger.Error("unknown msg type", "type", fmt.Sprintf("%T", msg)) + cs.logger.Error("unknown msg type", "type", fmt.Sprintf("%T", msg)) return } if err != nil { - cs.Logger.Error( + cs.logger.Error( "failed to process message", "height", cs.Height, "round", cs.Round, @@ -1052,12 +1156,16 @@ func (cs *State) handleMsg(mi msgInfo, fromReplay bool) { } } -func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { - cs.Logger.Debug("received tock", "timeout", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step.String()) +func (cs *State) handleTimeout( + ctx context.Context, + ti timeoutInfo, + rs cstypes.RoundState, +) { + cs.logger.Debug("received tock", "timeout", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) // timeouts must be for current height, round, step if ti.Height != rs.Height || ti.Round < rs.Round || (ti.Round == rs.Round && ti.Step < rs.Step) { - cs.Logger.Debug("ignoring tock because we are ahead", "height", rs.Height, "round", rs.Round, "step", rs.Step.String()) + cs.logger.Debug("ignoring tock because we are ahead", "height", rs.Height, "round", rs.Round, "step", rs.Step.String()) return } @@ -1069,32 +1177,32 @@ func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { case cstypes.RoundStepNewHeight: // NewRound event fired from enterNewRound. // XXX: should we fire timeout here (for timeout commit)? - cs.enterNewRound(ti.Height, 0) + cs.enterNewRound(ctx, ti.Height, 0) case cstypes.RoundStepNewRound: - cs.enterPropose(ti.Height, 0) + cs.enterPropose(ctx, ti.Height, 0) case cstypes.RoundStepPropose: if err := cs.eventBus.PublishEventTimeoutPropose(cs.RoundStateEvent()); err != nil { - cs.Logger.Error("failed publishing timeout propose", "err", err) + cs.logger.Error("failed publishing timeout propose", "err", err) } - cs.enterPrevote(ti.Height, ti.Round, false) + cs.enterPrevote(ctx, ti.Height, ti.Round, false) case cstypes.RoundStepPrevoteWait: if err := cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()); err != nil { - cs.Logger.Error("failed publishing timeout wait", "err", err) + cs.logger.Error("failed publishing timeout wait", "err", err) } - cs.enterPrecommit(ti.Height, ti.Round) + cs.enterPrecommit(ctx, ti.Height, ti.Round) case cstypes.RoundStepPrecommitWait: if err := cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()); err != nil { - cs.Logger.Error("failed publishing timeout wait", "err", err) + cs.logger.Error("failed publishing timeout wait", "err", err) } - cs.enterPrecommit(ti.Height, ti.Round) - cs.enterNewRound(ti.Height, ti.Round+1) + cs.enterPrecommit(ctx, ti.Height, ti.Round) + cs.enterNewRound(ctx, ti.Height, ti.Round+1) default: panic(fmt.Sprintf("invalid timeout step: %v", ti.Step)) @@ -1102,7 +1210,7 @@ func (cs *State) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { } -func (cs *State) handleTxsAvailable() { +func (cs *State) handleTxsAvailable(ctx context.Context) { cs.mtx.Lock() defer cs.mtx.Unlock() @@ -1123,7 +1231,7 @@ func (cs *State) handleTxsAvailable() { cs.scheduleTimeout(timeoutCommit, cs.Height, 0, cstypes.RoundStepNewRound) case cstypes.RoundStepNewRound: // after timeoutCommit - cs.enterPropose(cs.Height, 0) + cs.enterPropose(ctx, cs.Height, 0) } } @@ -1132,14 +1240,16 @@ func (cs *State) handleTxsAvailable() { // Used internally by handleTimeout and handleMsg to make state transitions // Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit), -// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1) +// or, if SkipTimeoutCommit==true, after receiving all precommits from (height,round-1) // Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1) // Enter: +2/3 precommits for nil at (height,round-1) // Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round) // Enter: A valid commit came in from a future round // NOTE: cs.StartTime was already set for height. -func (cs *State) enterNewRound(height int64, round int32) { - logger := cs.Logger.With("height", height, "round", round) +func (cs *State) enterNewRound(ctx context.Context, height int64, round int32) { + // TODO: remove panics in this function and return an error + + logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) { logger.Debug( @@ -1159,7 +1269,11 @@ func (cs *State) enterNewRound(height int64, round int32) { validators := cs.Validators if cs.Round < round { validators = validators.Copy() - validators.IncrementProposerPriority(tmmath.SafeSubInt32(round, cs.Round)) + r, err := tmmath.SafeSubInt32(round, cs.Round) + if err != nil { + panic(err) + } + validators.IncrementProposerPriority(r) } // Setup new round @@ -1174,15 +1288,21 @@ func (cs *State) enterNewRound(height int64, round int32) { } else { logger.Debug("resetting proposal info") cs.Proposal = nil + cs.ProposalReceiveTime = time.Time{} cs.ProposalBlock = nil cs.ProposalBlockParts = nil } - cs.Votes.SetRound(tmmath.SafeAddInt32(round, 1)) // also track next round (round+1) to allow round-skipping + r, err := tmmath.SafeAddInt32(round, 1) + if err != nil { + panic(err) + } + + cs.Votes.SetRound(r) // also track next round (round+1) to allow round-skipping cs.TriggeredTimeoutPrecommit = false if err := cs.eventBus.PublishEventNewRound(cs.NewRoundEvent()); err != nil { - cs.Logger.Error("failed publishing new round", "err", err) + cs.logger.Error("failed publishing new round", "err", err) } // Wait for txs to be available in the mempool // before we enterPropose in round 0. If the last block changed the app hash, @@ -1196,7 +1316,7 @@ func (cs *State) enterNewRound(height int64, round int32) { } else if !cs.config.DontAutoPropose { // DontAutoPropose should always be false, except for // specific tests where proposals are created manually - cs.enterPropose(height, round) + cs.enterPropose(ctx, height, round) } } @@ -1217,7 +1337,7 @@ func (cs *State) needProofBlock(height int64) bool { panic(fmt.Sprintf("needProofBlock (height=%d): last block meta for height %d not found", height, blockHeight)) } if !bytes.Equal(cs.state.AppHash, blockMeta.Header.AppHash) { - cs.Logger.Debug( + cs.logger.Debug( "needProofBlock: proof block needed", "height", height, "modified_height", blockHeight, @@ -1233,11 +1353,11 @@ func (cs *State) needProofBlock(height int64) bool { // Enter (CreateEmptyBlocks): from enterNewRound(height,round) // Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): -// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval +// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval // Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool // Caller should hold cs.mtx lock -func (cs *State) enterPropose(height int64, round int32) { - logger := cs.Logger.With("height", height, "round", round) +func (cs *State) enterPropose(ctx context.Context, height int64, round int32) { + logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) { logger.Debug( @@ -1247,6 +1367,16 @@ func (cs *State) enterPropose(height int64, round int32) { return } + // If this validator is the proposer of this round, and the previous block time is later than + // our local clock time, wait to propose until our local clock time has passed the block time. + if cs.privValidatorProTxHash != nil && cs.isProposer(cs.privValidatorProTxHash) { + proposerWaitTime := proposerWaitTime(tmtime.DefaultSource{}, cs.state.LastBlockTime) + if proposerWaitTime > 0 { + cs.scheduleTimeout(proposerWaitTime, height, round, cstypes.RoundStepNewRound) + return + } + } + logger.Debug("entering propose step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) defer func() { @@ -1258,16 +1388,16 @@ func (cs *State) enterPropose(height int64, round int32) { // else, we'll enterPrevote when the rest of the proposal is received (in AddProposalBlockPart), // or else after timeoutPropose if cs.isProposalComplete() { - cs.enterPrevote(height, cs.Round, false) + cs.enterPrevote(ctx, height, cs.Round, false) } }() // If we don't get the proposal and all block parts quick enough, enterPrevote - cs.scheduleTimeout(cs.config.Propose(round), height, round, cstypes.RoundStepPropose) + cs.scheduleTimeout(cs.proposeTimeout(round), height, round, cstypes.RoundStepPropose) // Nothing more to do if we're not a validator if cs.privValidator == nil { - logger.Debug("node is not a validator") + logger.Debug("propose step; not proposing since node is not a validator") return } @@ -1281,14 +1411,18 @@ func (cs *State) enterPropose(height int64, round int32) { // if not a validator, we're done if !cs.Validators.HasProTxHash(proTxHash) { - logger.Debug("propose step; this node is not a validator", "proTxHash", proTxHash.ShortString(), "vals", cs.Validators) + logger.Debug("propose step; not proposing since node is not in the validator set", + "proTxHash", proTxHash.ShortString(), + "vals", cs.Validators) return } if cs.isProposer(proTxHash) { - logger.Debug("propose step; our turn to propose", "proposer", proTxHash.ShortString(), "privValidator", - cs.privValidator) - cs.decideProposal(height, round) + logger.Debug("propose step; our turn to propose", + "proposer", proTxHash.ShortString(), + "privValidator", cs.privValidator, + ) + cs.decideProposal(ctx, height, round) } else { logger.Debug("propose step; not our turn to propose", "proposer", @@ -1308,7 +1442,7 @@ func (cs *State) checkValidBlock() bool { return false } if err := cs.blockExec.ValidateBlockTime(cs.config.ProposedBlockTimeWindow, cs.state, cs.ValidBlock); err != nil { - cs.Logger.Debug( + cs.logger.Debug( "proposal block is outdated", "height", cs.Height, "round", cs.Round, @@ -1321,7 +1455,7 @@ func (cs *State) checkValidBlock() bool { return true } -func (cs *State) defaultDecideProposal(height int64, round int32) { +func (cs *State) defaultDecideProposal(ctx context.Context, height int64, round int32) { var block *types.Block var blockParts *types.PartSet @@ -1331,8 +1465,17 @@ func (cs *State) defaultDecideProposal(height int64, round int32) { block, blockParts = cs.ValidBlock, cs.ValidBlockParts } else { // Create a new proposal block from state/txs from the mempool. - block, blockParts = cs.createProposalBlock() - if block == nil { + var err error + block, err = cs.createProposalBlock(ctx) + if err != nil { + cs.logger.Error("unable to create proposal block", "error", err) + return + } else if block == nil { + return + } + blockParts, err = block.MakePartSet(types.BlockPartSizeBytes) + if err != nil { + cs.logger.Error("unable to create proposal block part set", "error", err) return } } @@ -1340,24 +1483,23 @@ func (cs *State) defaultDecideProposal(height int64, round int32) { // Flush the WAL. Otherwise, we may not recompute the same proposal to sign, // and the privValidator will refuse to sign anything. if err := cs.wal.FlushAndSync(); err != nil { - cs.Logger.Error("failed flushing WAL to disk") + cs.logger.Error("failed flushing WAL to disk") } // Make proposal - propBlockID := block.BlockID() - propBlockID.PartSetHeader = blockParts.Header() + propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} proposedChainLockHeight := cs.state.LastCoreChainLockedBlockHeight if cs.blockExec.NextCoreChainLock != nil && cs.blockExec.NextCoreChainLock.CoreBlockHeight > proposedChainLockHeight { proposedChainLockHeight = cs.blockExec.NextCoreChainLock.CoreBlockHeight } - proposal := types.NewProposal(height, proposedChainLockHeight, round, cs.ValidRound, propBlockID) + proposal := types.NewProposal(height, proposedChainLockHeight, round, cs.ValidRound, propBlockID, block.Header.Time) p := proposal.ToProto() validatorsAtProposalHeight := cs.state.ValidatorsAtHeight(p.Height) quorumHash := validatorsAtProposalHeight.QuorumHash - proTxHash, err := cs.privValidator.GetProTxHash(context.Background()) + proTxHash, err := cs.privValidator.GetProTxHash(ctx) if err != nil { - cs.Logger.Error( + cs.logger.Error( "propose step; failed signing proposal; couldn't get proTxHash", "height", height, "round", round, @@ -1365,9 +1507,9 @@ func (cs *State) defaultDecideProposal(height int64, round int32) { ) return } - pubKey, err := cs.privValidator.GetPubKey(context.Background(), quorumHash) + pubKey, err := cs.privValidator.GetPubKey(ctx, quorumHash) if err != nil { - cs.Logger.Error( + cs.logger.Error( "propose step; failed signing proposal; couldn't get pubKey", "height", height, "round", round, @@ -1376,7 +1518,7 @@ func (cs *State) defaultDecideProposal(height int64, round int32) { return } messageBytes := types.ProposalBlockSignBytes(cs.state.ChainID, p) - cs.Logger.Debug( + cs.logger.Debug( "signing proposal", "height", proposal.Height, "round", proposal.Round, @@ -1387,9 +1529,9 @@ func (cs *State) defaultDecideProposal(height int64, round int32) { "quorumHash", quorumHash.ShortString(), ) // wait the max amount we would wait for a proposal - ctx, cancel := context.WithTimeout(context.TODO(), cs.config.TimeoutPropose) + ctxto, cancel := context.WithTimeout(ctx, cs.state.ConsensusParams.Timeout.Propose) defer cancel() - if _, err := cs.privValidator.SignProposal(ctx, + if _, err := cs.privValidator.SignProposal(ctxto, cs.state.ChainID, validatorsAtProposalHeight.QuorumType, quorumHash, @@ -1398,16 +1540,16 @@ func (cs *State) defaultDecideProposal(height int64, round int32) { proposal.Signature = p.Signature // send proposal and block parts on internal msg queue - cs.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""}) + cs.sendInternalMessage(ctx, msgInfo{&ProposalMessage{proposal}, "", tmtime.Now()}) for i := 0; i < int(blockParts.Total()); i++ { part := blockParts.GetPart(i) - cs.sendInternalMessage(msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, ""}) + cs.sendInternalMessage(ctx, msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, "", tmtime.Now()}) } - cs.Logger.Debug("signed proposal", "height", height, "round", round, "proposal", proposal, "pubKey", pubKey.HexString()) + cs.logger.Debug("signed proposal", "height", height, "round", round, "proposal", proposal, "pubKey", pubKey.HexString()) } else if !cs.replayMode { - cs.Logger.Error("propose step; failed signing proposal", "height", height, "round", round, "err", err) + cs.logger.Error("propose step; failed signing proposal", "height", height, "round", round, "err", err) } } @@ -1434,9 +1576,9 @@ func (cs *State) isProposalComplete() bool { // // NOTE: keep it side-effect free for clarity. // CONTRACT: cs.privValidator is not nil. -func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.PartSet) { +func (cs *State) createProposalBlock(ctx context.Context) (*types.Block, error) { if cs.privValidator == nil { - panic("entered createProposalBlock with privValidator being nil") + return nil, errors.New("entered createProposalBlock with privValidator being nil") } var commit *types.Commit @@ -1450,27 +1592,36 @@ func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.Pa commit = cs.LastCommit default: // This shouldn't happen. - cs.Logger.Error("propose step; cannot propose anything without commit for the previous block") - return + cs.logger.Error("propose step; cannot propose anything without commit for the previous block") + return nil, nil } if cs.privValidatorProTxHash == nil { // If this node is a validator & proposer in the current round, it will // miss the opportunity to create a block. - cs.Logger.Error("propose step; empty priv validator pro tx hash", "err", errProTxHashIsNotSet) - return + cs.logger.Error("propose step; empty priv validator pro tx hash", "err", errProTxHashIsNotSet) + return nil, nil } proposerProTxHash := cs.privValidatorProTxHash - return cs.blockExec.CreateProposalBlock(cs.Height, cs.state, commit, proposerProTxHash, cs.proposedAppVersion) + votes := cs.LastPrecommits.GetVotes() + + ret, err := cs.blockExec.CreateProposalBlock(ctx, cs.Height, cs.state, commit, proposerProTxHash, cs.proposedAppVersion, votes) + if err != nil { + panic(err) + } + return ret, nil } // Enter: `timeoutPropose` after entering Propose. // Enter: proposal block and POL is ready. -// Prevote for LockedBlock if we're locked, or ProposalBlock if valid. -// Otherwise vote nil. -func (cs *State) enterPrevote(height int64, round int32, allowOldBlocks bool) { - logger := cs.Logger.With("height", height, "round", round) +// If we received a valid proposal within this round and we are not locked on a block, +// we will prevote for block. +// Otherwise, if we receive a valid proposal that matches the block we are +// locked on or matches a block that received a POL in a round later than our +// locked round, prevote for the proposal, otherwise vote nil. +func (cs *State) enterPrevote(ctx context.Context, height int64, round int32, allowOldBlocks bool) { + logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) { logger.Debug( @@ -1488,45 +1639,152 @@ func (cs *State) enterPrevote(height int64, round int32, allowOldBlocks bool) { logger.Debug("entering prevote step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step)) - // SignDigest and broadcast vote as necessary - cs.doPrevote(height, round, allowOldBlocks) + // Sign and broadcast vote as necessary + cs.doPrevote(ctx, height, round, allowOldBlocks) // Once `addVote` hits any +2/3 prevotes, we will go to PrevoteWait // (so we have more time to try and collect +2/3 prevotes for a single block) } -func (cs *State) defaultDoPrevote(height int64, round int32, allowOldBlocks bool) { - logger := cs.Logger.With("height", height, "round", round) +func (cs *State) proposalIsTimely() bool { + sp := cs.state.ConsensusParams.Synchrony.SynchronyParamsOrDefaults() + return cs.Proposal.IsTimely(cs.ProposalReceiveTime, sp, cs.Round) +} - // If a block is locked, prevote that. - if cs.LockedBlock != nil { - logger.Debug("prevote step; already locked on a block; prevoting locked block") - cs.signAddVote(tmproto.PrevoteType, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header()) +func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32, allowOldBlocks bool) { + logger := cs.logger.With("height", height, "round", round) + + // Check that a proposed block was not received within this round (and thus executing this from a timeout). + if cs.ProposalBlock == nil { + logger.Debug("prevote step: ProposalBlock is nil; prevoting nil") + cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) return } - // If ProposalBlock is nil, prevote nil. - if cs.ProposalBlock == nil { - logger.Debug("prevote step: ProposalBlock is nil") - cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + if cs.Proposal == nil { + logger.Debug("prevote step: did not receive proposal; prevoting nil") + cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) return } - // Validate proposal block - err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock) + if !cs.Proposal.Timestamp.Equal(cs.ProposalBlock.Header.Time) { + logger.Debug("prevote step: proposal timestamp not equal; prevoting nil") + cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) + return + } + + sp := cs.state.ConsensusParams.Synchrony.SynchronyParamsOrDefaults() + if cs.Proposal.POLRound == -1 && cs.LockedRound == -1 && !cs.proposalIsTimely() { + logger.Debug("prevote step: Proposal is not timely; prevoting nil", + "proposed", + tmtime.Canonical(cs.Proposal.Timestamp).Format(time.RFC3339Nano), + "received", + tmtime.Canonical(cs.ProposalReceiveTime).Format(time.RFC3339Nano), + "msg_delay", + sp.MessageDelay, + "precision", + sp.Precision) + cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) + return + } + + // Validate proposal block, from Tendermint's perspective + err := cs.blockExec.ValidateBlock(ctx, cs.state, cs.ProposalBlock) if err != nil { // ProposalBlock is invalid, prevote nil. - logger.Error("prevote step: ProposalBlock is invalid", "err", err) - cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + logger.Error("prevote step: consensus deems this block invalid; prevoting nil", + "err", err) + cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) + return + } + + /* + The block has now passed Tendermint's validation rules. + Before prevoting the block received from the proposer for the current round and height, + we request the Application, via the ProcessProposal, ABCI call to confirm that the block is + valid. If the Application does not accept the block, Tendermint prevotes nil. + + WARNING: misuse of block rejection by the Application can seriously compromise Tendermint's + liveness properties. Please see PrepareProposal-ProcessProposal coherence and determinism + properties in the ABCI++ specification. + */ + isAppValid, err := cs.blockExec.ProcessProposal(ctx, cs.ProposalBlock, cs.state) + if err != nil { + panic(fmt.Sprintf("ProcessProposal: %v", err)) + } + + // Vote nil if the Application rejected the block + if !isAppValid { + logger.Error("prevote step: state machine rejected a proposed block; this should not happen:"+ + "the proposer may be misbehaving; prevoting nil", "err", err) + cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) return } + /* + 22: upon from proposer(h_p, round_p) while step_p = propose do + 23: if valid(v) && (lockedRound_p = −1 || lockedValue_p = v) then + 24: broadcast + + Here, cs.Proposal.POLRound corresponds to the -1 in the above algorithm rule. + This means that the proposer is producing a new proposal that has not previously + seen a 2/3 majority by the network. + + If we have already locked on a different value that is different from the proposed value, + we prevote nil since we are locked on a different value. Otherwise, if we're not locked on a block + or the proposal matches our locked block, we prevote the proposal. + */ + if cs.Proposal.POLRound == -1 { + if cs.LockedRound == -1 { + logger.Debug("prevote step: ProposalBlock is valid and there is no locked block; prevoting the proposal") + cs.signAddVote(ctx, tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) + return + } + if cs.ProposalBlock.HashesTo(cs.LockedBlock.Hash()) { + logger.Debug("prevote step: ProposalBlock is valid and matches our locked block; prevoting the proposal") + cs.signAddVote(ctx, tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) + return + } + } + + /* + 28: upon from proposer(h_p, round_p) AND 2f + 1 while + step_p = propose && (v_r ≥ 0 && v_r < round_p) do + 29: if valid(v) && (lockedRound_p ≤ v_r || lockedValue_p = v) then + 30: broadcast + + This rule is a bit confusing but breaks down as follows: + + If we see a proposal in the current round for value 'v' that lists its valid round as 'v_r' + AND this validator saw a 2/3 majority of the voting power prevote 'v' in round 'v_r', then we will + issue a prevote for 'v' in this round if 'v' is valid and either matches our locked value OR + 'v_r' is a round greater than or equal to our current locked round. + + 'v_r' can be a round greater than to our current locked round if a 2/3 majority of + the network prevoted a value in round 'v_r' but we did not lock on it, possibly because we + missed the proposal in round 'v_r'. + */ + blockID, ok := cs.Votes.Prevotes(cs.Proposal.POLRound).TwoThirdsMajority() + if ok && cs.ProposalBlock.HashesTo(blockID.Hash) && cs.Proposal.POLRound >= 0 && cs.Proposal.POLRound < cs.Round { + if cs.LockedRound <= cs.Proposal.POLRound { + logger.Debug("prevote step: ProposalBlock is valid and received a 2/3" + + "majority in a round later than the locked round; prevoting the proposal") + cs.signAddVote(ctx, tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) + return + } + if cs.ProposalBlock.HashesTo(cs.LockedBlock.Hash()) { + logger.Debug("prevote step: ProposalBlock is valid and matches our locked block; prevoting the proposal") + cs.signAddVote(ctx, tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) + return + } + } + // Validate proposal block - err = cs.blockExec.ValidateBlockChainLock(cs.state, cs.ProposalBlock) + err = cs.blockExec.ValidateBlockChainLock(ctx, cs.state, cs.ProposalBlock) if err != nil { // ProposalBlock is invalid, prevote nil. logger.Error("enterPrevote: ProposalBlock chain lock is invalid", "err", err) - cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) return } @@ -1536,21 +1794,19 @@ func (cs *State) defaultDoPrevote(height int64, round int32, allowOldBlocks bool if err != nil { // ProposalBlock is invalid, prevote nil. logger.Error("enterPrevote: ProposalBlock time is invalid", "err", err) - cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) return } } - // Prevote cs.ProposalBlock - // NOTE: the proposal signature is validated when it is received, - // and the proposal block parts are validated as they are received (against the merkle hash in the proposal) - logger.Debug("prevote step: ProposalBlock is valid") - cs.signAddVote(tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) + logger.Debug("prevote step: ProposalBlock is valid but was not our locked block or " + + "did not receive a more recent majority; prevoting nil") + cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{}) } // Enter: any +2/3 prevotes at next round. func (cs *State) enterPrevoteWait(height int64, round int32) { - logger := cs.Logger.With("height", height, "round", round) + logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) { logger.Debug( @@ -1576,17 +1832,16 @@ func (cs *State) enterPrevoteWait(height int64, round int32) { }() // Wait for some more prevotes; enterPrecommit - cs.scheduleTimeout(cs.config.Prevote(round), height, round, cstypes.RoundStepPrevoteWait) + cs.scheduleTimeout(cs.voteTimeout(round), height, round, cstypes.RoundStepPrevoteWait) } // Enter: `timeoutPrevote` after any +2/3 prevotes. // Enter: `timeoutPrecommit` after any +2/3 precommits. // Enter: +2/3 precomits for block or nil. // Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round) -// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil, // else, precommit nil otherwise. -func (cs *State) enterPrecommit(height int64, round int32) { - logger := cs.Logger.With("height", height, "round", round) +func (cs *State) enterPrecommit(ctx context.Context, height int64, round int32) { + logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) { logger.Debug( @@ -1616,7 +1871,7 @@ func (cs *State) enterPrecommit(height int64, round int32) { logger.Debug("precommit step; no +2/3 prevotes during enterPrecommit; precommitting nil") } - cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) + cs.signAddVote(ctx, tmproto.PrecommitType, nil, types.PartSetHeader{}) return } @@ -1631,47 +1886,50 @@ func (cs *State) enterPrecommit(height int64, round int32) { panic(fmt.Sprintf("this POLRound should be %v but got %v", round, polRound)) } - // +2/3 prevoted nil. Unlock and precommit nil. - if len(blockID.Hash) == 0 { - if cs.LockedBlock == nil { - logger.Debug("precommit step; +2/3 prevoted for nil") - } else { - logger.Debug("precommit step; +2/3 prevoted for nil; unlocking") - cs.LockedRound = -1 - cs.LockedBlock = nil - cs.LockedBlockParts = nil - - if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil { - logger.Error("failed publishing event unlock", "err", err) - } - } + // +2/3 prevoted nil. Precommit nil. + if blockID.IsNil() { + logger.Debug("precommit step: +2/3 prevoted for nil; precommitting nil") + cs.signAddVote(ctx, tmproto.PrecommitType, nil, types.PartSetHeader{}) + return + } + // At this point, +2/3 prevoted for a particular block. - cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) + // If we never received a proposal for this block, we must precommit nil + if cs.Proposal == nil || cs.ProposalBlock == nil { + logger.Debug("precommit step; did not receive proposal, precommitting nil") + cs.signAddVote(ctx, tmproto.PrecommitType, nil, types.PartSetHeader{}) return } - // At this point, +2/3 prevoted for a particular block. + // If the proposal time does not match the block time, precommit nil. + if !cs.Proposal.Timestamp.Equal(cs.ProposalBlock.Header.Time) { + logger.Debug("precommit step: proposal timestamp not equal; precommitting nil") + cs.signAddVote(ctx, tmproto.PrecommitType, nil, types.PartSetHeader{}) + return + } // If we're already locked on that block, precommit it, and update the LockedRound if cs.LockedBlock.HashesTo(blockID.Hash) { - logger.Debug("precommit step; +2/3 prevoted locked block; relocking") + logger.Debug("precommit step: +2/3 prevoted locked block; relocking") cs.LockedRound = round if err := cs.eventBus.PublishEventRelock(cs.RoundStateEvent()); err != nil { - logger.Error("failed publishing event relock", "err", err) + logger.Error("precommit step: failed publishing event relock", "err", err) } - cs.signAddVote(tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader) + cs.signAddVote(ctx, tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader) return } - // If +2/3 prevoted for proposal block, stage and precommit it + // If greater than 2/3 of the voting power on the network prevoted for + // the proposed block, update our locked block to this block and issue a + // precommit vote for it. if cs.ProposalBlock.HashesTo(blockID.Hash) { - logger.Debug("precommit step; +2/3 prevoted proposal block; locking", "hash", blockID.Hash) + logger.Debug("precommit step: +2/3 prevoted proposal block; locking", "hash", blockID.Hash) // Validate the block. - if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil { - panic(fmt.Sprintf("precommit step; +2/3 prevoted for an invalid block: %v", err)) + if err := cs.blockExec.ValidateBlock(ctx, cs.state, cs.ProposalBlock); err != nil { + panic(fmt.Sprintf("precommit step: +2/3 prevoted for an invalid block %v; relocking", err)) } cs.LockedRound = round @@ -1679,21 +1937,16 @@ func (cs *State) enterPrecommit(height int64, round int32) { cs.LockedBlockParts = cs.ProposalBlockParts if err := cs.eventBus.PublishEventLock(cs.RoundStateEvent()); err != nil { - logger.Error("failed publishing event lock", "err", err) + logger.Error("precommit step: failed publishing event lock", "err", err) } - cs.signAddVote(tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader) + cs.signAddVote(ctx, tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader) return } // There was a polka in this round for a block we don't have. - // Fetch that block, unlock, and precommit nil. - // The +2/3 prevotes for this round is the POL for our unlock. - logger.Debug("precommit step; +2/3 prevotes for a block we do not have; voting nil", "block_id", blockID) - - cs.LockedRound = -1 - cs.LockedBlock = nil - cs.LockedBlockParts = nil + // Fetch that block, and precommit nil. + logger.Debug("precommit step: +2/3 prevotes for a block we do not have; voting nil", "block_id", blockID) if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) { cs.ProposalBlock = nil @@ -1701,16 +1954,12 @@ func (cs *State) enterPrecommit(height int64, round int32) { cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) } - if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil { - logger.Error("failed publishing event unlock", "err", err) - } - - cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) + cs.signAddVote(ctx, tmproto.PrecommitType, nil, types.PartSetHeader{}) } // Enter: any +2/3 precommits for next round. func (cs *State) enterPrecommitWait(height int64, round int32) { - logger := cs.Logger.With("height", height, "round", round) + logger := cs.logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cs.TriggeredTimeoutPrecommit) { logger.Debug( @@ -1737,12 +1986,12 @@ func (cs *State) enterPrecommitWait(height int64, round int32) { }() // wait for some more precommits; enterNewRound - cs.scheduleTimeout(cs.config.Precommit(round), height, round, cstypes.RoundStepPrecommitWait) + cs.scheduleTimeout(cs.voteTimeout(round), height, round, cstypes.RoundStepPrecommitWait) } // Enter: +2/3 precommits for block -func (cs *State) enterCommit(height int64, commitRound int32) { - logger := cs.Logger.With("height", height, "commit_round", commitRound) +func (cs *State) enterCommit(ctx context.Context, height int64, commitRound int32) { + logger := cs.logger.With("height", height, "commit_round", commitRound) if cs.Height != height || cstypes.RoundStepApplyCommit <= cs.Step { logger.Debug( @@ -1763,7 +2012,7 @@ func (cs *State) enterCommit(height int64, commitRound int32) { cs.newStep() // Maybe finalize immediately. - cs.tryFinalizeCommit(height) + cs.tryFinalizeCommit(ctx, height) }() blockID, ok := cs.Votes.Precommits(commitRound).TwoThirdsMajority() @@ -1809,15 +2058,15 @@ func (cs *State) updateProposalBlockAndPartsBeforeCommit(blockID types.BlockID, } // If we have the block AND +2/3 commits for it, finalize. -func (cs *State) tryFinalizeCommit(height int64) { - logger := cs.Logger.With("height", height) +func (cs *State) tryFinalizeCommit(ctx context.Context, height int64) { + logger := cs.logger.With("height", height) if cs.Height != height { panic(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) } blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() - if !ok || len(blockID.Hash) == 0 { + if !ok || blockID.IsNil() { logger.Error("failed attempt to finalize commit; there was no +2/3 majority or +2/3 was for nil") return } @@ -1833,12 +2082,12 @@ func (cs *State) tryFinalizeCommit(height int64) { return } - cs.finalizeCommit(height) + cs.finalizeCommit(ctx, height) } // Increment height and goto cstypes.RoundStepNewHeight -func (cs *State) finalizeCommit(height int64) { - logger := cs.Logger.With("height", height) +func (cs *State) finalizeCommit(ctx context.Context, height int64) { + logger := cs.logger.With("height", height) if cs.Height != height || cs.Step != cstypes.RoundStepApplyCommit { logger.Debug( @@ -1861,7 +2110,7 @@ func (cs *State) finalizeCommit(height int64) { panic("cannot finalize commit; proposal block does not hash to commit hash") } - if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil { + if err := cs.blockExec.ValidateBlock(ctx, cs.state, block); err != nil { panic(fmt.Errorf("+2/3 committed an invalid block: %w", err)) } @@ -1873,24 +2122,23 @@ func (cs *State) finalizeCommit(height int64) { ) logger.Debug(fmt.Sprintf("%v", block)) - fail.Fail() // XXX - + // Save to blockStore. if cs.blockStore.Height() < block.Height { // NOTE: the seenCommit is local justification to commit this block, // but may differ from the LastPrecommits included in the next block precommits := cs.Votes.Precommits(cs.CommitRound) seenCommit := precommits.MakeCommit() - cs.applyCommit(seenCommit, logger) + cs.applyCommit(ctx, seenCommit, logger) } else { // Happens during replay if we already saved the block but didn't commit logger.Debug("calling finalizeCommit on already stored block", "height", block.Height) // Todo: do we need this? - cs.applyCommit(nil, logger) + cs.applyCommit(ctx, nil, logger) } } // If we received a commit message from an external source try to add it then finalize it. -func (cs *State) tryAddCommit(commit *types.Commit, peerID types.NodeID) (bool, error) { +func (cs *State) tryAddCommit(ctx context.Context, commit *types.Commit, peerID types.NodeID) (bool, error) { // Let's only add one remote commit if cs.Commit != nil { return false, nil @@ -1901,14 +2149,14 @@ func (cs *State) tryAddCommit(commit *types.Commit, peerID types.NodeID) (bool, // We need to first verify that the commit received wasn't for a future round, // If it was then we must go to next round if commit.Height == rs.Height && commit.Round > rs.Round { - cs.Logger.Debug("Commit received for a later round", "height", commit.Height, "our round", + cs.logger.Debug("Commit received for a later round", "height", commit.Height, "our round", rs.Round, "commit round", commit.Round) - verified, err := cs.verifyCommit(commit, peerID, true) + verified, err := cs.verifyCommit(ctx, commit, peerID, true) if err != nil { return false, err } if verified { - cs.enterNewRound(cs.Height, commit.Round) + cs.enterNewRound(ctx, cs.Height, commit.Round) // We are now going to receive the block, so initialize the block parts. if cs.ProposalBlockParts == nil { cs.ProposalBlockParts = types.NewPartSetFromHeader(commit.BlockID.PartSetHeader) @@ -1919,7 +2167,7 @@ func (cs *State) tryAddCommit(commit *types.Commit, peerID types.NodeID) (bool, } // First lets verify that the commit is what we are expecting - verified, err := cs.verifyCommit(commit, peerID, false) + verified, err := cs.verifyCommit(ctx, commit, peerID, false) if !verified || err != nil { return verified, err } @@ -1932,16 +2180,10 @@ func (cs *State) tryAddCommit(commit *types.Commit, peerID types.NodeID) (bool, return false, nil } - added, err := cs.addCommit(commit) - if err != nil { - return added, ErrAddingCommit - } - return added, nil + return cs.addCommit(ctx, commit) } -func (cs *State) verifyCommit( - commit *types.Commit, peerID types.NodeID, ignoreProposalBlock bool, -) (verified bool, err error) { +func (cs *State) verifyCommit(ctx context.Context, commit *types.Commit, peerID types.NodeID, ignoreProposalBlock bool) (verified bool, err error) { // Lets first do some basic commit validation before more complicated commit verification if err := commit.ValidateBasic(); err != nil { return false, fmt.Errorf("error validating commit: %v", err) @@ -1953,11 +2195,11 @@ func (cs *State) verifyCommit( // A commit for the previous height? // These come in while we wait timeoutCommit if commit.Height+1 == stateHeight { - cs.Logger.Debug("old commit ignored", "commit", commit) + cs.logger.Debug("old commit ignored", "commit", commit) return false, nil } - cs.Logger.Debug( + cs.logger.Debug( "verifying commit from remote", "commit_height", commit.Height, "cs_height", cs.Height, @@ -1966,7 +2208,7 @@ func (cs *State) verifyCommit( // Height mismatch is ignored. // Not necessarily a bad peer, but not favorable behavior. if commit.Height != stateHeight { - cs.Logger.Debug( + cs.logger.Debug( "commit ignored and not added", "commit_height", commit.Height, @@ -1980,7 +2222,7 @@ func (cs *State) verifyCommit( if commit.BlockID.Hash != nil && !bytes.Equal(commit.StateID.LastAppHash, cs.state.AppHash) { err = errors.New("commit state last app hash does not match the known state app hash") - cs.Logger.Error("commit ignored because sending wrong app hash", "voteHeight", commit.Height, + cs.logger.Error("commit ignored because sending wrong app hash", "voteHeight", commit.Height, "csHeight", cs.Height, "peerID", peerID) return false, err } @@ -1989,9 +2231,9 @@ func (cs *State) verifyCommit( if rs.Proposal == nil || ignoreProposalBlock { if ignoreProposalBlock { - cs.Logger.Info("Commit verified for future round", "height", commit.Height, "round", commit.Round) + cs.logger.Info("Commit verified for future round", "height", commit.Height, "round", commit.Round) } else { - cs.Logger.Info("Commit came in before proposal", "height", commit.Height, "round", commit.Round) + cs.logger.Info("Commit came in before proposal", "height", commit.Height, "round", commit.Round) } // We need to verify that it was properly signed @@ -2001,7 +2243,7 @@ func (cs *State) verifyCommit( } if !cs.ProposalBlockParts.HasHeader(commit.BlockID.PartSetHeader) { - cs.Logger.Info("setting proposal block parts from commit", "partSetHeader", commit.BlockID.PartSetHeader) + cs.logger.Info("setting proposal block parts from commit", "partSetHeader", commit.BlockID.PartSetHeader) cs.ProposalBlockParts = types.NewPartSetFromHeader(commit.BlockID.PartSetHeader) } @@ -2028,7 +2270,7 @@ func (cs *State) verifyCommit( return false, fmt.Errorf("expected ProposalBlockParts header to be commit header") } if !block.HashesTo(commit.BlockID.Hash) { - cs.Logger.Error("proposal block does not hash to commit hash", + cs.logger.Error("proposal block does not hash to commit hash", "block", block, "commit", commit, "complete_proposal", cs.isProposalComplete(), @@ -2036,15 +2278,15 @@ func (cs *State) verifyCommit( return false, fmt.Errorf("cannot finalize commit; proposal block does not hash to commit hash") } - if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil { + if err := cs.blockExec.ValidateBlock(ctx, cs.state, block); err != nil { return false, fmt.Errorf("+2/3 committed an invalid block: %w", err) } return true, nil } -func (cs *State) addCommit(commit *types.Commit) (added bool, err error) { +func (cs *State) addCommit(ctx context.Context, commit *types.Commit) (added bool, err error) { // The commit is all good, let's apply it to the state - cs.updateProposalBlockAndPartsBeforeCommit(commit.BlockID, cs.Logger) + cs.updateProposalBlockAndPartsBeforeCommit(commit.BlockID, cs.logger) cs.updateRoundStep(cs.Round, cstypes.RoundStepApplyCommit) cs.CommitRound = commit.Round @@ -2052,23 +2294,23 @@ func (cs *State) addCommit(commit *types.Commit) (added bool, err error) { cs.newStep() // The commit is all good, let's apply it to the state - cs.applyCommit(commit, cs.Logger) + cs.applyCommit(ctx, commit, cs.logger) // This will relay the commit to peers if err := cs.PublishCommitEvent(commit); err != nil { - return added, err + return false, fmt.Errorf("error adding commit: %w", err) } - if cs.config.SkipTimeoutCommit { - cs.enterNewRound(cs.Height, 0) + if cs.bypassCommitTimeout() { + cs.enterNewRound(ctx, cs.Height, 0) } - return added, err + return true, nil } // PublishCommitEvent ... func (cs *State) PublishCommitEvent(commit *types.Commit) error { - cs.Logger.Debug("publish commit event", "commit", commit) + cs.logger.Debug("publish commit event", "commit", commit) if err := cs.eventBus.PublishEventCommit(types.EventDataCommit{Commit: commit}); err != nil { return err } @@ -2076,7 +2318,7 @@ func (cs *State) PublishCommitEvent(commit *types.Commit) error { return nil } -func (cs *State) applyCommit(commit *types.Commit, logger log.Logger) { +func (cs *State) applyCommit(ctx context.Context, commit *types.Commit, logger log.Logger) { logger.Info("applying commit", "commit", commit) var height int64 @@ -2090,8 +2332,6 @@ func (cs *State) applyCommit(commit *types.Commit, logger log.Logger) { height = cs.Height } - fail.Fail() // XXX - // Write EndHeightMessage{} for this height, implying that the blockstore // has saved the block. // @@ -2107,20 +2347,19 @@ func (cs *State) applyCommit(commit *types.Commit, logger log.Logger) { // restart). endMsg := EndHeightMessage{height} if err := cs.wal.WriteSync(endMsg); err != nil { // NOTE: fsync - panic(fmt.Sprintf( - "failed to write %v msg to consensus WAL due to %v; check your file system and restart the node", + panic(fmt.Errorf( + "failed to write %v msg to consensus WAL due to %w; check your file system and restart the node", endMsg, err, )) } - fail.Fail() // XXX - // Create a copy of the state for staging and an event cache for txs. stateCopy := cs.state.Copy() // Execute and commit the block, update and save the state, and update the mempool. // NOTE The block.AppHash wont reflect these txs until the next block. - stateCopy, err := cs.blockExec.ApplyBlockWithLogger( + stateCopy, err := cs.blockExec.ApplyBlock( + ctx, stateCopy, cs.privValidatorProTxHash, types.BlockID{ @@ -2128,22 +2367,22 @@ func (cs *State) applyCommit(commit *types.Commit, logger log.Logger) { PartSetHeader: blockParts.Header(), }, block, - logger, ) if err != nil { logger.Error("failed to apply block", "err", err) return } - fail.Fail() // XXX - // must be called before we update state cs.RecordMetrics(height, block) // NewHeightStep! cs.updateToState(stateCopy, commit) - fail.Fail() // XXX + // Private validator might have changed it's key pair => refetch pubkey. + if err := cs.updatePrivValidatorProTxHash(ctx); err != nil { + logger.Error("failed to get private validator pubkey", "err", err) + } // cs.StartTime is already set. // Schedule Round0 to start soon. @@ -2171,7 +2410,7 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) { if cs.privValidator != nil { if cs.privValidatorProTxHash == nil { // Metrics won't be updated, but it's not critical. - cs.Logger.Error(fmt.Sprintf("recordMetrics: %v", errProTxHashIsNotSet)) + cs.logger.Error(fmt.Sprintf("recordMetrics: %v", errProTxHashIsNotSet)) } } } @@ -2184,7 +2423,7 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) { byzantineValidatorsCount int64 ) - for _, ev := range block.Evidence.Evidence { + for _, ev := range block.Evidence { if dve, ok := ev.(*types.DuplicateVoteEvidence); ok { if _, val := cs.Validators.GetByProTxHash(dve.VoteA.ValidatorProTxHash); val != nil { byzantineValidatorsCount++ @@ -2212,10 +2451,10 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) { //----------------------------------------------------------------------------- -func (cs *State) defaultSetProposal(proposal *types.Proposal) error { +func (cs *State) defaultSetProposal(proposal *types.Proposal, recvTime time.Time) error { // Already have one // TODO: possibly catch double proposals - if cs.Proposal != nil { + if cs.Proposal != nil || proposal == nil { return nil } @@ -2261,7 +2500,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { case proposer.PubKey != nil: // We are part of the validator set if !proposer.PubKey.VerifySignatureDigest(proposalBlockSignID, proposal.Signature) { - cs.Logger.Debug( + cs.logger.Debug( "error verifying signature", "height", proposal.Height, "cs_height", height, @@ -2279,7 +2518,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { // We might have a commit already for the Round State // We need to verify that the commit block id is equal to the proposal block id if !proposal.BlockID.Equals(cs.Commit.BlockID) { - cs.Logger.Debug("proposal blockId isn't the same as the commit blockId", "height", proposal.Height, + cs.logger.Debug("proposal blockId isn't the same as the commit blockId", "height", proposal.Height, "round", proposal.Round, "proposer", proposer.ProTxHash.ShortString()) return ErrInvalidProposalForCommit } @@ -2290,6 +2529,8 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { proposal.Signature = p.Signature cs.Proposal = proposal + cs.ProposalReceiveTime = recvTime + cs.calculateProposalTimestampDifferenceMetric() // We don't update cs.ProposalBlockParts if it is already set. // This happens if we're already in cstypes.RoundStepApplyCommit or if there is a valid block in the current round. // TODO: We can check if Proposal is for a different block as this is a sign of misbehavior! @@ -2298,7 +2539,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartSetHeader) } - cs.Logger.Info("received proposal", "proposal", proposal) + cs.logger.Info("received proposal", "proposal", proposal) return nil } @@ -2306,15 +2547,15 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { // Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, // once we have the full block. func (cs *State) addProposalBlockPart( + ctx context.Context, msg *BlockPartMessage, peerID types.NodeID, - fromReplay bool, ) (added bool, err error) { height, round, part := msg.Height, msg.Round, msg.Part // Blocks might be reused, so round mismatch is OK if cs.Height != height { - cs.Logger.Debug( + cs.logger.Debug( "received block part from wrong height", "height", cs.Height, "round", cs.Round, @@ -2329,7 +2570,7 @@ func (cs *State) addProposalBlockPart( cs.metrics.BlockGossipPartsReceived.With("matches_current", "false").Add(1) // NOTE: this can happen when we've gone to a higher round and // then receive parts from the previous round - not necessarily a bad peer. - cs.Logger.Debug( + cs.logger.Debug( "received a block part when we are not expecting any", "height", cs.Height, "round", cs.Round, @@ -2383,67 +2624,19 @@ func (cs *State) addProposalBlockPart( cs.ProposalBlock = block // NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal - cs.Logger.Info("received complete proposal block", "height", cs.ProposalBlock.Height, - "hash", cs.ProposalBlock.Hash()) + cs.logger.Info("received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash()) if err := cs.eventBus.PublishEventCompleteProposal(cs.CompleteProposalEvent()); err != nil { - cs.Logger.Error("failed publishing event complete proposal", "err", err) + cs.logger.Error("failed publishing event complete proposal", "err", err) } - if cs.Commit == nil { - // No commit has come in yet allowing the fast forwarding of these steps - // Update Valid* if we can. - prevotes := cs.Votes.Prevotes(cs.Round) - blockID, hasThreshold := prevotes.TwoThirdsMajority() - - if hasThreshold && !blockID.IsZero() && (cs.ValidRound < cs.Round) { - if cs.ProposalBlock.HashesTo(blockID.Hash) { - cs.Logger.Debug( - "updating valid block to new proposal block", - "valid_round", cs.Round, - "valid_block_hash", cs.ProposalBlock.Hash(), - ) - - cs.ValidRound = cs.Round - cs.ValidBlock = cs.ProposalBlock - cs.ValidBlockParts = cs.ProposalBlockParts - } - } - - if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() { - // Move onto the next step - // We should allow old blocks if we are recovering from replay - allowOldBlocks := fromReplay - cs.Logger.Debug("entering prevote after complete proposal", "height", cs.ProposalBlock.Height, - "hash", cs.ProposalBlock.Hash()) - cs.enterPrevote(height, cs.Round, allowOldBlocks) - if hasThreshold { // this is optimisation as this will be triggered when prevote is added - cs.Logger.Debug( - "entering precommit after complete proposal with threshold received", - "height", - cs.ProposalBlock.Height, - "hash", - cs.ProposalBlock.Hash(), - ) - cs.enterPrecommit(height, cs.Round) - } - } else if cs.Step == cstypes.RoundStepApplyCommit { - // If we're waiting on the proposal block... - cs.Logger.Debug("trying to finalize commit after complete proposal", "height", cs.ProposalBlock.Height, - "hash", cs.ProposalBlock.Hash()) - cs.tryFinalizeCommit(height) - } - } else { - cs.Logger.Info("Proposal block fully received", "proposal", cs.ProposalBlock) - cs.Logger.Info("Commit already present", "commit", cs.Commit) - cs.Logger.Debug("adding commit after complete proposal", "height", cs.ProposalBlock.Height, + if cs.Commit != nil { + cs.logger.Info("Proposal block fully received", "proposal", cs.ProposalBlock) + cs.logger.Info("Commit already present", "commit", cs.Commit) + cs.logger.Debug("adding commit after complete proposal", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash()) // We received a commit before the block - added, err := cs.addCommit(cs.Commit) - if err != nil { - return added, ErrAddingCommit - } - return added, nil + return cs.addCommit(ctx, cs.Commit) } return added, nil @@ -2452,9 +2645,57 @@ func (cs *State) addProposalBlockPart( return added, nil } +func (cs *State) handleCompleteProposal(ctx context.Context, height int64, fromReplay bool) { + // Update Valid* if we can. + prevotes := cs.Votes.Prevotes(cs.Round) + blockID, hasTwoThirds := prevotes.TwoThirdsMajority() + if hasTwoThirds && !blockID.IsNil() && (cs.ValidRound < cs.Round) { + if cs.ProposalBlock.HashesTo(blockID.Hash) { + cs.logger.Debug( + "updating valid block to new proposal block", + "valid_round", cs.Round, + "valid_block_hash", cs.ProposalBlock.Hash(), + ) + + cs.ValidRound = cs.Round + cs.ValidBlock = cs.ProposalBlock + cs.ValidBlockParts = cs.ProposalBlockParts + } + // TODO: In case there is +2/3 majority in Prevotes set for some + // block and cs.ProposalBlock contains different block, either + // proposer is faulty or voting power of faulty processes is more + // than 1/3. We should trigger in the future accountability + // procedure at this point. + } + + if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() { + // Move onto the next step + // We should allow old blocks if we are recovering from replay + allowOldBlocks := fromReplay + cs.logger.Debug("entering prevote after complete proposal", "height", cs.ProposalBlock.Height, + "hash", cs.ProposalBlock.Hash()) + cs.enterPrevote(ctx, height, cs.Round, allowOldBlocks) + if hasTwoThirds { // this is optimisation as this will be triggered when prevote is added + cs.logger.Debug( + "entering precommit after complete proposal with threshold received", + "height", + cs.ProposalBlock.Height, + "hash", + cs.ProposalBlock.Hash(), + ) + cs.enterPrecommit(ctx, height, cs.Round) + } + } else if cs.Step == cstypes.RoundStepApplyCommit { + // If we're waiting on the proposal block... + cs.logger.Debug("trying to finalize commit after complete proposal", "height", cs.ProposalBlock.Height, + "hash", cs.ProposalBlock.Hash()) + cs.tryFinalizeCommit(ctx, height) + } +} + // Attempt to add the vote. if its a duplicate signature, dupeout the validator -func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error) { - added, err := cs.addVote(vote, peerID) +func (cs *State) tryAddVote(ctx context.Context, vote *types.Vote, peerID types.NodeID) (bool, error) { + added, err := cs.addVote(ctx, vote, peerID) if err != nil { // If the vote height is off, we'll just ignore it, // But if it's a conflicting sig, add it to the cs.evpool. @@ -2465,7 +2706,7 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error) } if bytes.Equal(vote.ValidatorProTxHash, cs.privValidatorProTxHash) { - cs.Logger.Error( + cs.logger.Error( "found conflicting vote from ourselves; did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, @@ -2477,21 +2718,22 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error) // report conflicting votes to the evidence pool cs.evpool.ReportConflictingVotes(voteErr.VoteA, voteErr.VoteB) - cs.Logger.Debug("found and sent conflicting votes to the evidence pool", + cs.logger.Debug( + "found and sent conflicting votes to the evidence pool", "vote_a", voteErr.VoteA, "vote_b", voteErr.VoteB, ) return added, err } else if errors.Is(err, types.ErrVoteNonDeterministicSignature) { - cs.Logger.Debug("vote has non-deterministic signature", "err", err) + cs.logger.Debug("vote has non-deterministic signature", "err", err) } else { // Either // 1) bad peer OR // 2) not a bad peer? this can also err sometimes with "Unexpected step" OR // 3) tmkms use with multiple validators connecting to a single tmkms instance - // (https://github.com/tendermint/tendermint/issues/3839). - cs.Logger.Info("failed attempting to add vote", "err", err) + // (https://github.com/tendermint/tendermint/issues/3839). + cs.logger.Info("failed attempting to add vote", "err", err) return added, ErrAddingVote } } @@ -2499,8 +2741,12 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error) return added, nil } -func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) { - cs.Logger.Debug( +func (cs *State) addVote( + ctx context.Context, + vote *types.Vote, + peerID types.NodeID, +) (added bool, err error) { + cs.logger.Debug( "adding vote", "vote", vote, "height", cs.Height, @@ -2512,17 +2758,17 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err if vote.Height+1 == cs.Height && vote.Type == tmproto.PrecommitType { if cs.Step != cstypes.RoundStepNewHeight { // Late precommit at prior height is ignored - cs.Logger.Debug("precommit vote came in after commit timeout and has been ignored", "vote", vote) + cs.logger.Debug("precommit vote came in after commit timeout and has been ignored", "vote", vote) return } if cs.LastPrecommits == nil { - cs.Logger.Debug("no last round precommits on node", "vote", vote) + cs.logger.Debug("no last round precommits on node", "vote", vote) return } added, err = cs.LastPrecommits.AddVote(vote) if !added { - cs.Logger.Debug( + cs.logger.Debug( "vote not added", "height", vote.Height, "vote_type", vote.Type, @@ -2533,7 +2779,7 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err return } - cs.Logger.Debug("added vote to last precommits", "last_precommits", cs.LastPrecommits.StringShort()) + cs.logger.Debug("added vote to last precommits", "last_precommits", cs.LastPrecommits.StringShort()) if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil { return added, err } @@ -2541,10 +2787,10 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err cs.evsw.FireEvent(types.EventVoteValue, vote) // if we can skip timeoutCommit and have all the votes now, - if cs.config.SkipTimeoutCommit && cs.LastPrecommits.HasAll() { + if cs.bypassCommitTimeout() && cs.LastPrecommits.HasAll() { // go straight to new round (skip timeout commit) // cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight) - cs.enterNewRound(cs.Height, 0) + cs.enterNewRound(ctx, cs.Height, 0) } return @@ -2554,19 +2800,26 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err // Not necessarily a bad peer, but not favorable behavior. if vote.Height != cs.Height { added = false - cs.Logger.Debug("vote ignored and not added", "vote_height", vote.Height, "cs_height", cs.Height, "peer", peerID) + cs.logger.Debug("vote ignored and not added", "vote_height", vote.Height, "cs_height", cs.Height, "peer", peerID) return } + // Verify VoteExtension if precommit + if vote.Type == tmproto.PrecommitType { + if err = cs.blockExec.VerifyVoteExtension(ctx, vote); err != nil { + return false, err + } + } + // Ignore vote if we do not have public keys to verify votes if !cs.Validators.HasPublicKeys { added = false - cs.Logger.Debug("vote received on non-validator, ignoring it", "vote", vote, + cs.logger.Debug("vote received on non-validator, ignoring it", "vote", vote, "cs_height", cs.Height, "peer", peerID) return } - cs.Logger.Debug( + cs.logger.Debug( "adding vote to vote set", "height", cs.Height, "round", cs.Round, @@ -2577,7 +2830,7 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err added, err = cs.Votes.AddVote(vote, peerID) if !added { if err != nil { - cs.Logger.Error( + cs.logger.Error( "error adding vote", "vote", vote, "cs_height", cs.Height, @@ -2596,44 +2849,22 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err switch vote.Type { case tmproto.PrevoteType: prevotes := cs.Votes.Prevotes(vote.Round) - cs.Logger.Debug("added vote to prevote", "vote", vote, "prevotes", prevotes.LogString()) - - // If +2/3 prevotes for a block or nil for *any* round: - if blockID, ok := prevotes.TwoThirdsMajority(); ok { - // There was a polka! - // If we're locked but this is a recent polka, unlock. - // If it matches our ProposalBlock, update the ValidBlock + cs.logger.Debug("added vote to prevote", "vote", vote, "prevotes", prevotes.StringShort()) - // Unlock if `cs.LockedRound < vote.Round <= cs.Round` - // NOTE: If vote.Round > cs.Round, we'll deal with it when we get to vote.Round - if (cs.LockedBlock != nil) && - (cs.LockedRound < vote.Round) && - (vote.Round <= cs.Round) && - !cs.LockedBlock.HashesTo(blockID.Hash) { - - cs.Logger.Debug("unlocking because of POL", "locked_round", cs.LockedRound, - "pol_round", vote.Round) - - cs.LockedRound = -1 - cs.LockedBlock = nil - cs.LockedBlockParts = nil - - if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil { - return added, err - } - } + // Check to see if >2/3 of the voting power on the network voted for any non-nil block. + if blockID, ok := prevotes.TwoThirdsMajority(); ok && !blockID.IsNil() { + // Greater than 2/3 of the voting power on the network voted for some + // non-nil block // Update Valid* if we can. - // NOTE: our proposal block may be nil or not what received a polka.. - if len(blockID.Hash) != 0 && (cs.ValidRound < vote.Round) && (vote.Round == cs.Round) { + if cs.ValidRound < vote.Round && vote.Round == cs.Round { if cs.ProposalBlock.HashesTo(blockID.Hash) { - cs.Logger.Debug("updating valid block because of POL", "valid_round", cs.ValidRound, - "pol_round", vote.Round) + cs.logger.Debug("updating valid block because of POL", "valid_round", cs.ValidRound, "pol_round", vote.Round) cs.ValidRound = vote.Round cs.ValidBlock = cs.ProposalBlock cs.ValidBlockParts = cs.ProposalBlockParts } else { - cs.Logger.Debug( + cs.logger.Debug( "valid block we do not know about; set ProposalBlock=nil", "proposal", cs.ProposalBlock.Hash(), "block_id", blockID.Hash, @@ -2659,12 +2890,12 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err switch { case cs.Round < vote.Round && prevotes.HasTwoThirdsAny(): // Round-skip if there is any 2/3+ of votes ahead of us - cs.enterNewRound(height, vote.Round) + cs.enterNewRound(ctx, height, vote.Round) case cs.Round == vote.Round && cstypes.RoundStepPrevote <= cs.Step: // current round blockID, ok := prevotes.TwoThirdsMajority() - if ok && (cs.isProposalComplete() || len(blockID.Hash) == 0) { - cs.enterPrecommit(height, vote.Round) + if ok && (cs.isProposalComplete() || blockID.IsNil()) { + cs.enterPrecommit(ctx, height, vote.Round) } else if prevotes.HasTwoThirdsAny() { cs.enterPrevoteWait(height, vote.Round) } @@ -2672,13 +2903,13 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err case cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round: // If the proposal is now complete, enter prevote of cs.Round. if cs.isProposalComplete() { - cs.enterPrevote(height, cs.Round, false) + cs.enterPrevote(ctx, height, cs.Round, false) } } case tmproto.PrecommitType: precommits := cs.Votes.Precommits(vote.Round) - cs.Logger.Debug("added vote to precommit", + cs.logger.Debug("added vote to precommit", "height", vote.Height, "round", vote.Round, "validator", vote.ValidatorProTxHash.String(), @@ -2688,19 +2919,19 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err blockID, ok := precommits.TwoThirdsMajority() if ok { // Executed as TwoThirdsMajority could be from a higher round - cs.enterNewRound(height, vote.Round) - cs.enterPrecommit(height, vote.Round) + cs.enterNewRound(ctx, height, vote.Round) + cs.enterPrecommit(ctx, height, vote.Round) - if len(blockID.Hash) != 0 { - cs.enterCommit(height, vote.Round) - if cs.config.SkipTimeoutCommit && precommits.HasAll() { - cs.enterNewRound(cs.Height, 0) + if !blockID.IsNil() { + cs.enterCommit(ctx, height, vote.Round) + if cs.bypassCommitTimeout() && precommits.HasAll() { + cs.enterNewRound(ctx, cs.Height, 0) } } else { cs.enterPrecommitWait(height, vote.Round) } } else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() { - cs.enterNewRound(height, vote.Round) + cs.enterNewRound(ctx, height, vote.Round) cs.enterPrecommitWait(height, vote.Round) } @@ -2713,6 +2944,7 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err // CONTRACT: cs.privValidator is not nil. func (cs *State) signVote( + ctx context.Context, msgType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader, @@ -2741,78 +2973,81 @@ func (cs *State) signVote( stateID := cs.state.StateID() - protoVote := vote.ToProto() - // If the signedMessageType is for precommit, // use our local precommit Timeout as the max wait time for getting a singed commit. The same goes for prevote. - var timeout time.Duration + timeout := cs.voteTimeout(cs.Round) switch msgType { case tmproto.PrecommitType: - timeout = cs.config.TimeoutPrecommit - case tmproto.PrevoteType: - timeout = cs.config.TimeoutPrevote + // if the signedMessage type is for a precommit, add VoteExtension + ext, err := cs.blockExec.ExtendVote(ctx, vote) + if err != nil { + return nil, err + } + vote.Extension = ext default: timeout = time.Second } - ctx, cancel := context.WithTimeout(context.TODO(), timeout) + v := vote.ToProto() + + ctxto, cancel := context.WithTimeout(ctx, timeout) defer cancel() - err := cs.privValidator.SignVote( - ctx, cs.state.ChainID, cs.state.Validators.QuorumType, cs.state.Validators.QuorumHash, - protoVote, stateID, cs.Logger) - vote.BlockSignature = protoVote.BlockSignature - vote.StateSignature = protoVote.StateSignature + err := cs.privValidator.SignVote(ctxto, cs.state.ChainID, cs.state.Validators.QuorumType, cs.state.Validators.QuorumHash, + v, stateID, cs.logger) + vote.BlockSignature = v.BlockSignature + vote.StateSignature = v.StateSignature + vote.ExtensionSignature = v.ExtensionSignature return vote, err } // sign the vote and publish on internalMsgQueue -func (cs *State) signAddVote(msgType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { +func (cs *State) signAddVote( + ctx context.Context, + msgType tmproto.SignedMsgType, + hash []byte, + header types.PartSetHeader, +) *types.Vote { if cs.privValidator == nil { // the node does not have a key return nil } if cs.privValidatorProTxHash == nil { // Vote won't be signed, but it's not critical. - cs.Logger.Error(fmt.Sprintf("signAddVote: %v", errProTxHashIsNotSet)) + cs.logger.Error("signAddVote", "err", errProTxHashIsNotSet) return nil } // If the node not in the validator set, do nothing. if !cs.Validators.HasProTxHash(cs.privValidatorProTxHash) { - cs.Logger.Debug("do nothing, node is not a part of validator set") + cs.logger.Debug("do nothing, node is not a part of validator set") return nil } // TODO: pass pubKey to signVote start := time.Now() - vote, err := cs.signVote(msgType, hash, header) + vote, err := cs.signVote(ctx, msgType, hash, header) if err == nil { - cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""}) - cs.Logger.Debug("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "took", time.Since(start).String()) + cs.sendInternalMessage(ctx, msgInfo{&VoteMessage{vote}, "", tmtime.Now()}) + cs.logger.Debug("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "took", time.Since(start).String()) return vote } - cs.Logger.Error("failed signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) + cs.logger.Error("failed signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) return nil } // updatePrivValidatorProTxHash get's the private validator proTxHash and // memoizes it. This func returns an error if the private validator is not // responding or responds with an error. -func (cs *State) updatePrivValidatorProTxHash() error { +func (cs *State) updatePrivValidatorProTxHash(ctx context.Context) error { if cs.privValidator == nil { return nil } - var timeout time.Duration - if cs.config.TimeoutPrecommit > cs.config.TimeoutPrevote { - timeout = cs.config.TimeoutPrecommit - } else { - timeout = cs.config.TimeoutPrevote - } + timeout := cs.voteTimeout(cs.Round) // no GetPubKey retry beyond the proposal/voting in RetrySignerClient if cs.Step >= cstypes.RoundStepPrecommit && cs.privValidatorType == types.RetrySignerClient { @@ -2821,17 +3056,13 @@ func (cs *State) updatePrivValidatorProTxHash() error { // set context timeout depending on the configuration and the State step, // this helps in avoiding blocking of the remote signer connection. - ctx, cancel := context.WithTimeout(context.TODO(), timeout) + ctxto, cancel := context.WithTimeout(ctx, timeout) defer cancel() - proTxHash, err := cs.privValidator.GetProTxHash(ctx) + proTxHash, err := cs.privValidator.GetProTxHash(ctxto) if err != nil { return err } cs.privValidatorProTxHash = proTxHash - if len(proTxHash.Bytes()) != crypto.ProTxHashSize { - return fmt.Errorf("proTxHash must be 32 bytes") - } - return nil } @@ -2891,3 +3122,71 @@ func repairWalFile(src, dst string) error { return nil } + +func (cs *State) proposeTimeout(round int32) time.Duration { + tp := cs.state.ConsensusParams.Timeout.TimeoutParamsOrDefaults() + p := tp.Propose + if cs.config.UnsafeProposeTimeoutOverride != 0 { + p = cs.config.UnsafeProposeTimeoutOverride + } + pd := tp.ProposeDelta + if cs.config.UnsafeProposeTimeoutDeltaOverride != 0 { + pd = cs.config.UnsafeProposeTimeoutDeltaOverride + } + return time.Duration( + p.Nanoseconds()+pd.Nanoseconds()*int64(round), + ) * time.Nanosecond +} + +func (cs *State) voteTimeout(round int32) time.Duration { + tp := cs.state.ConsensusParams.Timeout.TimeoutParamsOrDefaults() + v := tp.Vote + if cs.config.UnsafeVoteTimeoutOverride != 0 { + v = cs.config.UnsafeVoteTimeoutOverride + } + vd := tp.VoteDelta + if cs.config.UnsafeVoteTimeoutDeltaOverride != 0 { + vd = cs.config.UnsafeVoteTimeoutDeltaOverride + } + return time.Duration( + v.Nanoseconds()+vd.Nanoseconds()*int64(round), + ) * time.Nanosecond +} + +func (cs *State) commitTime(t time.Time) time.Time { + c := cs.state.ConsensusParams.Timeout.Commit + if cs.config.UnsafeCommitTimeoutOverride != 0 { + c = cs.config.UnsafeProposeTimeoutOverride + } + return t.Add(c) +} + +func (cs *State) bypassCommitTimeout() bool { + if cs.config.UnsafeBypassCommitTimeoutOverride != nil { + return *cs.config.UnsafeBypassCommitTimeoutOverride + } + return cs.state.ConsensusParams.Timeout.BypassCommitTimeout +} + +func (cs *State) calculateProposalTimestampDifferenceMetric() { + if cs.Proposal != nil && cs.Proposal.POLRound == -1 { + sp := cs.state.ConsensusParams.Synchrony.SynchronyParamsOrDefaults() + isTimely := cs.Proposal.IsTimely(cs.ProposalReceiveTime, sp, cs.Round) + cs.metrics.ProposalTimestampDifference.With("is_timely", fmt.Sprintf("%t", isTimely)). + Observe(cs.ProposalReceiveTime.Sub(cs.Proposal.Timestamp).Seconds()) + } +} + +// proposerWaitTime determines how long the proposer should wait to propose its next block. +// If the result is zero, a block can be proposed immediately. +// +// Block times must be monotonically increasing, so if the block time of the previous +// block is larger than the proposer's current time, then the proposer will sleep +// until its local clock exceeds the previous block time. +func proposerWaitTime(lt tmtime.Source, bt time.Time) time.Duration { + t := lt.Now() + if bt.After(t) { + return bt.Sub(t) + } + return 0 +} diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index 7df137c052..be8e4c4933 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -3,22 +3,26 @@ package consensus import ( "bytes" "context" - "fmt" "strconv" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/crypto/tmhash" + abci "github.com/tendermint/tendermint/abci/types" + abcimocks "github.com/tendermint/tendermint/abci/types/mocks" + "github.com/tendermint/tendermint/crypto" cstypes "github.com/tendermint/tendermint/internal/consensus/types" - p2pmock "github.com/tendermint/tendermint/internal/p2p/mock" + "github.com/tendermint/tendermint/internal/eventbus" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" + tmtime "github.com/tendermint/tendermint/libs/time" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -37,16 +41,23 @@ x * TestFullRound1 - 1 val, full successful round x * TestFullRoundNil - 1 val, full round of nil x * TestFullRound2 - 2 vals, both required for full round LockSuite -x * TestLockNoPOL - 2 vals, 4 rounds. one val locked, precommits nil every round except first. -x * TestLockPOLRelock - 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka -x * TestLockPOLUnlock - 4 vals, one precommits, other 3 polka nil at next round, so we unlock and precomit nil -x * TestLockPOLSafety1 - 4 vals. We shouldn't change lock based on polka at earlier round -x * TestLockPOLSafety2 - 4 vals. After unlocking, we shouldn't relock based on polka at earlier round +x * TestStateLock_NoPOL - 2 vals, 4 rounds. one val locked, precommits nil every round except first. +x * TestStateLock_POLUpdateLock - 4 vals, one precommits, +other 3 polka at next round, so we unlock and precomit the polka +x * TestStateLock_POLRelock - 4 vals, polka in round 1 and polka in round 2. +Ensure validator updates locked round. +x_*_TestStateLock_POLDoesNotUnlock 4 vals, one precommits, other 3 polka nil at +next round, so we precommit nil but maintain lock +x * TestStateLock_MissingProposalWhenPOLSeenDoesNotUpdateLock - 4 vals, 1 misses proposal but sees POL. +x * TestStateLock_MissingProposalWhenPOLSeenDoesNotUnlock - 4 vals, 1 misses proposal but sees POL. +x * TestStateLock_POLSafety1 - 4 vals. We shouldn't change lock based on polka at earlier round +x * TestStateLock_POLSafety2 - 4 vals. After unlocking, we shouldn't relock based on polka at earlier round +x_*_TestState_PrevotePOLFromPreviousRound 4 vals, prevote a proposal if a POL was seen for it in a previous round. * TestNetworkLock - once +1/3 precommits, network should be locked * TestNetworkLockPOL - once +1/3 precommits, the block with more recent polka is committed SlashingSuite -x * TestSlashingPrevotes - a validator prevoting twice in a round gets slashed -x * TestSlashingPrecommits - a validator precomitting twice in a round gets slashed +x * TestStateSlashing_Prevotes - a validator prevoting twice in a round gets slashed +x * TestStateSlashing_Precommits - a validator precomitting twice in a round gets slashed CatchupSuite * TestCatchup - if we might be behind and we've seen any 2/3 prevotes, round skip to new round, precommit, or prevote HaltSuite @@ -58,81 +69,77 @@ x * TestHalt1 - if we see +2/3 precommits after timing out into new round, we sh // ProposeSuite func TestStateProposerSelection0(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := configSetup(t) - cs1, vss, err := randState(config, 4) - require.NoError(t, err) - + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) height, round := cs1.Height, cs1.Round - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) // Wait for new round so proposer is set. - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) // Commit a block and ensure proposer for the next height is correct. prop := cs1.GetRoundState().Validators.GetProposer() - proTxHash, err := cs1.privValidator.GetProTxHash(context.Background()) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) require.NoError(t, err) - if !bytes.Equal(prop.ProTxHash, proTxHash) { - t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.ProTxHash) - } + require.Truef(t, bytes.Equal(prop.ProTxHash, proTxHash), "expected proposer to be validator %d. Got %X", 0, prop.ProTxHash.ShortString()) // Wait for complete proposal. - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() - signAddVotes(config, cs1, tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + }, vss[1:]...) // Wait for new round so next validator is set. - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) prop = cs1.GetRoundState().Validators.GetProposer() - proTxHash, err = vss[1].GetProTxHash(context.Background()) + proTxHash, err = vss[1].GetProTxHash(ctx) require.NoError(t, err) - if !bytes.Equal(prop.ProTxHash, proTxHash) { - panic(fmt.Sprintf("expected proposer to be validator %d. Got %X", 1, prop.ProTxHash)) - } + require.True(t, bytes.Equal(prop.ProTxHash, proTxHash), "expected proposer to be validator %d. Got %X", 1, prop.ProTxHash.ShortString()) } // Now let's do it all again, but starting from round 2 instead of 0 func TestStateProposerSelection2(t *testing.T) { config := configSetup(t) - cs1, vss, err := randState(config, 4) // test needs more work for more than 3 validators - require.NoError(t, err) - + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) // test needs more work for more than 3 validators height := cs1.Height - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) // this time we jump in at round 2 incrementRound(vss[1:]...) incrementRound(vss[1:]...) var round int32 = 2 - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) - ensureNewRound(newRoundCh, height, round) // wait for the new round + ensureNewRound(t, newRoundCh, height, round) // wait for the new round // everyone just votes nil. we get a new proposer each round for i := int32(0); int(i) < len(vss); i++ { prop := cs1.GetRoundState().Validators.GetProposer() - proTxHash, err := vss[int(i+round)%len(vss)].GetProTxHash(context.Background()) + proTxHash, err := vss[int(i+round)%len(vss)].GetProTxHash(ctx) require.NoError(t, err) correctProposer := proTxHash - if !bytes.Equal(prop.ProTxHash, correctProposer) { - panic(fmt.Sprintf( - "expected RoundState.Validators.GetProposer() to be validator %d. Got %X", - int(i+2)%len(vss), - prop.ProTxHash)) - } + require.True(t, bytes.Equal(prop.ProTxHash, correctProposer), + "expected RoundState.Validators.GetProposer() to be validator %d. Got %X", + int(i+2)%len(vss), + prop.ProTxHash) - rs := cs1.GetRoundState() - signAddVotes(config, cs1, tmproto.PrecommitType, nil, rs.ProposalBlockParts.Header(), vss[1:]...) - ensureNewRound(newRoundCh, height, i+round+1) // wait for the new round event each round + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vss[1:]...) + ensureNewRound(t, newRoundCh, height, i+round+1) // wait for the new round event each round incrementRound(vss[1:]...) } @@ -141,19 +148,20 @@ func TestStateProposerSelection2(t *testing.T) { // a non-validator should timeout into the prevote round func TestStateEnterProposeNoPrivValidator(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, _, err := randState(config, 1) - require.NoError(t, err) - cs.SetPrivValidator(nil) + cs, _ := makeState(ctx, t, makeStateArgs{config: config, validators: 1}) + cs.SetPrivValidator(ctx, nil) height, round := cs.Height, cs.Round // Listen for propose timeout event - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) + timeoutCh := subscribe(ctx, t, cs.eventBus, types.EventQueryTimeoutPropose) - startTestRound(cs, height, round) + startTestRound(ctx, cs, height, round) // if we're not a validator, EnterPropose should timeout - ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) + ensureNewTimeout(t, timeoutCh, height, round, cs.state.ConsensusParams.Timeout.ProposeTimeout(round).Nanoseconds()) if cs.GetRoundState().Proposal != nil { t.Error("Expected to make no proposal, since no privValidator") @@ -163,20 +171,21 @@ func TestStateEnterProposeNoPrivValidator(t *testing.T) { // a validator should not timeout of the prevote round (TODO: unless the block is really big!) func TestStateEnterProposeYesPrivValidator(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, _, err := randState(config, 1) - require.NoError(t, err) + cs, _ := makeState(ctx, t, makeStateArgs{config: config, validators: 1}) height, round := cs.Height, cs.Round // Listen for propose timeout event - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) - proposalCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) + timeoutCh := subscribe(ctx, t, cs.eventBus, types.EventQueryTimeoutPropose) + proposalCh := subscribe(ctx, t, cs.eventBus, types.EventQueryCompleteProposal) - cs.enterNewRound(height, round) - cs.startRoutines(3) + cs.enterNewRound(ctx, height, round) + cs.startRoutines(ctx, 3) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) // Check that Proposal, ProposalBlock, ProposalBlockParts are set. rs := cs.GetRoundState() @@ -191,23 +200,25 @@ func TestStateEnterProposeYesPrivValidator(t *testing.T) { } // if we're a validator, enterPropose should not timeout - ensureNoNewTimeout(timeoutCh, cs.config.TimeoutPropose.Nanoseconds()) + ensureNoNewTimeout(t, timeoutCh, cs.state.ConsensusParams.Timeout.ProposeTimeout(round).Nanoseconds()) } func TestStateBadProposal(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, 2) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, validators: 2}) height, round := cs1.Height, cs1.Round vs2 := vss[1] partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) - propBlock, _ := cs1.createProposalBlock() // changeProposer(t, cs1, vs2) + propBlock, err := cs1.createProposalBlock(ctx) // changeProposer(t, cs1, vs2) + require.NoError(t, err) // make the second validator the proposer by incrementing round round++ @@ -221,59 +232,60 @@ func TestStateBadProposal(t *testing.T) { copy(stateHash, propBlock.AppHash) stateHash[0] = (stateHash[0] + 1) % 255 propBlock.AppHash = stateHash - propBlockParts := propBlock.MakePartSet(partSize) + propBlockParts, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal := types.NewProposal(vs2.Height, 1, round, -1, blockID) + proposal := types.NewProposal(vs2.Height, 1, round, -1, blockID, propBlock.Header.Time) p := proposal.ToProto() - if _, err := vs2.SignProposal(context.Background(), config.ChainID(), cs1.Validators.QuorumType, cs1.Validators.QuorumHash, p); err != nil { - t.Fatal("failed to sign bad proposal", err) - } + _, err = vs2.SignProposal(ctx, config.ChainID(), cs1.Validators.QuorumType, cs1.Validators.QuorumHash, p) + require.NoError(t, err) proposal.Signature = p.Signature // set the proposal block - if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + err = cs1.SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer") + require.NoError(t, err) // start the machine - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) // wait for proposal - ensureProposal(proposalCh, height, round, blockID) + ensureProposal(t, proposalCh, height, round, blockID) // wait for prevote - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + ensurePrevoteMatch(t, voteCh, height, round, nil) // add bad prevote from vs2 and wait for it - signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - ensurePrevote(voteCh, height, round) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2) + ensurePrevote(t, voteCh, height, round) // wait for precommit - ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(config, cs1, tmproto.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2) } // TestStateProposalTime tries to sign and vote on proposal with invalid time. func TestStateProposalTime(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + config := configSetup(t) - cs1, _, _ := randState(config, 1) + cs1, _ := makeState(ctx, t, makeStateArgs{config: config, validators: 1}) height, round := cs1.Height, cs1.Round cs1.config.ProposedBlockTimeWindow = 1 * time.Second cs1.config.DontAutoPropose = true cs1.config.CreateEmptyBlocksInterval = 0 - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) cs1.mtx.Lock() - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) cs1.mtx.Unlock() // Wait for new round so proposer is set. - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) testCases := []struct { blockTimeFunc func(*State) time.Time @@ -304,23 +316,25 @@ func TestStateProposalTime(t *testing.T) { cs := cs1 // Generate proposal block cs.mtx.Lock() - propBlock, propBlockParts := cs.createProposalBlock() + propBlock, err := cs.createProposalBlock(ctx) + require.NoError(t, err) if tc.blockTimeFunc != nil { propBlock.Time = tc.blockTimeFunc(cs) } - blockID := propBlock.BlockID() - + parSet, err := propBlock.MakePartSet(types.BlockPartSizeBytes) + blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: parSet.Header()} + require.NoError(t, err) cs.ValidBlock = propBlock - cs.ValidBlockParts = propBlockParts + cs.ValidBlockParts = parSet // sleep if needed time.Sleep(tc.sleep) // Wait for complete proposal. - cs.enterPropose(height, round) + cs.enterPropose(ctx, height, round) cs.mtx.Unlock() - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) if tc.expectNewBlock { assert.NotEqual(t, blockID, cs.LastCommit.BlockID, "expected that block will be regenerated") @@ -333,19 +347,21 @@ func TestStateProposalTime(t *testing.T) { func TestStateOversizedBlock(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, 2) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, validators: 2}) cs1.state.ConsensusParams.Block.MaxBytes = 2000 height, round := cs1.Height, cs1.Round vs2 := vss[1] partSize := types.BlockPartSizeBytes - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) - propBlock, _ := cs1.createProposalBlock() + propBlock, err := cs1.createProposalBlock(ctx) + require.NoError(t, err) propBlock.Data.Txs = []types.Tx{tmrand.Bytes(2001)} propBlock.Header.DataHash = propBlock.Data.Hash() @@ -353,14 +369,14 @@ func TestStateOversizedBlock(t *testing.T) { round++ incrementRound(vss[1:]...) - propBlockParts := propBlock.MakePartSet(partSize) + propBlockParts, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal := types.NewProposal(height, 1, round, -1, blockID) + proposal := types.NewProposal(height, 1, round, -1, blockID, propBlock.Header.Time) p := proposal.ToProto() - if _, err := vs2.SignProposal(context.Background(), config.ChainID(), cs1.Validators.QuorumType, - cs1.Validators.QuorumHash, p); err != nil { - t.Fatal("failed to sign bad proposal", err) - } + _, err = vs2.SignProposal(ctx, config.ChainID(), cs1.Validators.QuorumType, + cs1.Validators.QuorumHash, p) + require.NoError(t, err) proposal.Signature = p.Signature totalBytes := 0 @@ -369,28 +385,24 @@ func TestStateOversizedBlock(t *testing.T) { totalBytes += len(part.Bytes) } - if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + err = cs1.SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer") + require.NoError(t, err) // start the machine - startTestRound(cs1, height, round) - - t.Log("Block Sizes", "Limit", cs1.state.ConsensusParams.Block.MaxBytes, "Current", totalBytes) + startTestRound(ctx, cs1, height, round) // c1 should log an error with the block part message as it exceeds the consensus params. The // block is not added to cs.ProposalBlock so the node timeouts. - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + ensureNewTimeout(t, timeoutProposeCh, height, round, cs1.proposeTimeout(round).Nanoseconds()) // and then should send nil prevote and precommit regardless of whether other validators prevote and // precommit on it - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) - signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - ensurePrevote(voteCh, height, round) - ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(config, cs1, tmproto.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + ensurePrevoteMatch(t, voteCh, height, round, nil) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2) + ensurePrevote(t, voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2) } //---------------------------------------------------------------------------------------------------- @@ -399,104 +411,90 @@ func TestStateOversizedBlock(t *testing.T) { // propose, prevote, and precommit a block func TestStateFullRound1(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, vss, err := randState(config, 1) - require.NoError(t, err) + cs, vss := makeState(ctx, t, makeStateArgs{config: config, validators: 1}) height, round := cs.Height, cs.Round - // NOTE: buffer capacity of 0 ensures we can validate prevote and last commit - // before consensus can move to the next height (and cause a race condition) - if err := cs.eventBus.Stop(); err != nil { - t.Error(err) - } - eventBus := types.NewEventBusWithBufferCapacity(0) - eventBus.SetLogger(log.TestingLogger().With("module", "events")) - cs.SetEventBus(eventBus) - if err := eventBus.Start(); err != nil { - t.Error(err) - } - - voteCh := subscribe(cs.eventBus, types.EventQueryVote) - propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) + voteCh := subscribe(ctx, t, cs.eventBus, types.EventQueryVote) + propCh := subscribe(ctx, t, cs.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewRound) // Maybe it would be better to call explicitly startRoutines(4) - startTestRound(cs, height, round) + startTestRound(ctx, cs, height, round) - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(propCh, height, round) - propBlockHash := cs.GetRoundState().ProposalBlock.Hash() + propBlock := ensureNewProposal(t, propCh, height, round) - ensurePrevote(voteCh, height, round) // wait for prevote - validatePrevote(t, cs, round, vss[0], propBlockHash) + ensurePrevoteMatch(t, voteCh, height, round, propBlock.Hash) // wait for prevote - ensurePrecommit(voteCh, height, round) // wait for precommit + ensurePrecommit(t, voteCh, height, round) // wait for precommit // we're going to roll right into new height - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) - validateLastCommit(t, cs, vss[0], propBlockHash) + validateLastCommit(ctx, t, cs, vss[0], propBlock.Hash) } // nil is proposed, so prevote and precommit nil func TestStateFullRoundNil(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, vss, err := randState(config, 1) - require.NoError(t, err) + cs, _ := makeState(ctx, t, makeStateArgs{config: config, validators: 1}) height, round := cs.Height, cs.Round - voteCh := subscribe(cs.eventBus, types.EventQueryVote) + voteCh := subscribe(ctx, t, cs.eventBus, types.EventQueryVote) - cs.enterPrevote(height, round, false) - cs.startRoutines(4) + cs.enterPrevote(ctx, height, round, false) + cs.startRoutines(ctx, 4) - ensurePrevote(voteCh, height, round) // prevote - ensurePrecommit(voteCh, height, round) // precommit - - // should prevote and precommit nil - validatePrevoteAndPrecommit(t, cs, round, -1, vss[0], nil, nil) + ensurePrevoteMatch(t, voteCh, height, round, nil) // prevote + ensurePrecommitMatch(t, voteCh, height, round, nil) // precommit } // run through propose, prevote, precommit commit with two validators // where the first validator has to wait for votes from the second func TestStateFullRound2(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, 2) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, validators: 2}) vs2 := vss[1] height, round := cs1.Height, cs1.Round - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) + voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) + newBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlock) // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) - ensurePrevote(voteCh, height, round) // prevote + ensurePrevote(t, voteCh, height, round) // prevote // we should be stuck in limbo waiting for more prevotes rs := cs1.GetRoundState() - propBlockHash, propPartSetHeader := rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header() + blockID := types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()} // prevote arrives from vs2: - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propPartSetHeader, vs2) - ensurePrevote(voteCh, height, round) // prevote + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2) + ensurePrevote(t, voteCh, height, round) // prevote - ensurePrecommit(voteCh, height, round) // precommit + ensurePrecommit(t, voteCh, height, round) // precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash) + validatePrecommit(ctx, t, cs1, 0, 0, vss[0], blockID.Hash, blockID.Hash) // we should be stuck in limbo waiting for more precommits // precommit arrives from vs2: - signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash, propPartSetHeader, vs2) - ensurePrecommit(voteCh, height, round) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2) + ensurePrecommit(t, voteCh, height, round) // wait to finish commit, propose in next height - ensureNewBlock(newBlockCh, height) + ensureNewBlock(t, newBlockCh, height) } //------------------------------------------------------------------------------------------ @@ -504,65 +502,71 @@ func TestStateFullRound2(t *testing.T) { // two validators, 4 rounds. // two vals take turns proposing. val1 locks on first one, precommits nil on everything else -func TestStateLockNoPOL(t *testing.T) { +func TestStateLock_NoPOL(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, 2) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, validators: 2}) vs2 := vss[1] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + voteCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryVote) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* Round1 (cs1, B) // B B // B B2 */ // start round and wait for prevote - cs1.enterNewRound(height, round) - cs1.startRoutines(0) + cs1.enterNewRound(ctx, height, round) + cs1.startRoutines(ctx, 0) - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) roundState := cs1.GetRoundState() - theBlockHash := roundState.ProposalBlock.Hash() - thePartSetHeader := roundState.ProposalBlockParts.Header() + initialBlockID := types.BlockID{ + Hash: roundState.ProposalBlock.Hash(), + PartSetHeader: roundState.ProposalBlockParts.Header(), + } - ensurePrevote(voteCh, height, round) // prevote + ensurePrevote(t, voteCh, height, round) // prevote // we should now be stuck in limbo forever, waiting for more prevotes // prevote arrives from vs2: - signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, thePartSetHeader, vs2) - ensurePrevote(voteCh, height, round) // prevote + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), initialBlockID, vs2) + ensurePrevote(t, voteCh, height, round) // prevote + validatePrevote(ctx, t, cs1, round, vss[0], initialBlockID.Hash) - ensurePrecommit(voteCh, height, round) // precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, round, vss[0], initialBlockID.Hash, initialBlockID.Hash) // we should now be stuck in limbo forever, waiting for more precommits // lets add one for a different block - hash := make([]byte, len(theBlockHash)) - copy(hash, theBlockHash) + hash := make([]byte, len(initialBlockID.Hash)) + copy(hash, initialBlockID.Hash) hash[0] = (hash[0] + 1) % 255 - signAddVotes(config, cs1, tmproto.PrecommitType, hash, thePartSetHeader, vs2) - ensurePrecommit(voteCh, height, round) // precommit + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{ + Hash: hash, + PartSetHeader: initialBlockID.PartSetHeader, + }, vs2) + ensurePrecommit(t, voteCh, height, round) // precommit // (note we're entering precommit for a second time this round) // but with invalid args. then we enterPrecommitWait, and the timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) /// round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) - t.Log("#### ONTO ROUND 1") + ensureNewRound(t, newRoundCh, height, round) /* Round2 (cs1, B) // B B2 */ @@ -570,576 +574,932 @@ func TestStateLockNoPOL(t *testing.T) { incrementRound(vs2) // now we're on a new round and not the proposer, so wait for timeout - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + ensureNewTimeout(t, timeoutProposeCh, height, round, cs1.proposeTimeout(round).Nanoseconds()) rs := cs1.GetRoundState() - if rs.ProposalBlock != nil { - panic("Expected proposal block to be nil") - } + require.Nil(t, rs.ProposalBlock, "Expected proposal block to be nil") - // wait to finish prevote - ensurePrevote(voteCh, height, round) - // we should have prevoted our locked block - validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) + // we should have prevoted nil since we did not see a proposal in the round. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], nil) // add a conflicting prevote from the other validator - signAddVotes(config, cs1, tmproto.PrevoteType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) - ensurePrevote(voteCh, height, round) + partSet, err := rs.LockedBlock.MakePartSet(partSize) + require.NoError(t, err) + conflictingBlockID := types.BlockID{Hash: hash, PartSetHeader: partSet.Header()} + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), conflictingBlockID, vs2) + ensurePrevote(t, voteCh, height, round) // now we're going to enter prevote again, but with invalid args // and then prevote wait, which should timeout. then wait for precommit - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) - - ensurePrecommit(voteCh, height, round) // precommit - // the proposed block should still be locked and our precommit added - // we should precommit nil and be locked on the proposal - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) + // the proposed block should still be locked block. + // we should precommit nil and be locked on the proposal. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, initialBlockID.Hash) // add conflicting precommit from vs2 - signAddVotes(config, cs1, tmproto.PrecommitType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) - ensurePrecommit(voteCh, height, round) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), conflictingBlockID, vs2) + ensurePrecommit(t, voteCh, height, round) // (note we're entering precommit for a second time this round, but with invalid args // then we enterPrecommitWait and timeout into NewRound - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) round++ // entering new round - ensureNewRound(newRoundCh, height, round) - t.Log("#### ONTO ROUND 2") + ensureNewRound(t, newRoundCh, height, round) /* Round3 (vs2, _) // B, B2 */ incrementRound(vs2) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs = cs1.GetRoundState() // now we're on a new round and are the proposer - if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) { - panic(fmt.Sprintf( - "Expected proposal block to be locked block. Got %v, Expected %v", - rs.ProposalBlock, - rs.LockedBlock)) - } - - ensurePrevote(voteCh, height, round) // prevote - validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) - - signAddVotes(config, cs1, tmproto.PrevoteType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) - ensurePrevote(voteCh, height, round) + require.True(t, bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()), + "Expected proposal block to be locked block. Got %v, Expected %v", + rs.ProposalBlock, + rs.LockedBlock) + + ensurePrevote(t, voteCh, height, round) // prevote + validatePrevote(ctx, t, cs1, round, vss[0], rs.LockedBlock.Hash()) + partSet, err = rs.ProposalBlock.MakePartSet(partSize) + require.NoError(t, err) + newBlockID := types.BlockID{Hash: hash, PartSetHeader: partSet.Header()} + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), newBlockID, vs2) + ensurePrevote(t, voteCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) - ensurePrecommit(voteCh, height, round) // precommit + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) + ensurePrecommit(t, voteCh, height, round) // precommit - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, initialBlockID.Hash) // precommit nil but be locked on proposal signAddVotes( - config, + ctx, + t, cs1, tmproto.PrecommitType, - hash, - rs.ProposalBlock.MakePartSet(partSize).Header(), + config.ChainID(), + newBlockID, vs2) // NOTE: conflicting precommits at same height - ensurePrecommit(voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) + + // cs1 is locked on a block at this point, so we must generate a new consensus + // state to force a new proposal block to be generated. + cs2, _ := makeState(ctx, t, makeStateArgs{config: config, validators: 2}) - cs2, _, err := randState(config, 2) // needed so generated block is different than locked block - require.NoError(t, err) // Since the quorum hash is also part of the sign ID we must make sure it's the same cs2.LastValidators.QuorumHash = cs1.LastValidators.QuorumHash - _, valSet := cs1.GetValidatorSet() cs2.Validators.QuorumHash = valSet.QuorumHash + // before we time out into new round, set next proposal block - prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) - if prop == nil || propBlock == nil { - t.Fatal("Failed to create proposal block with vs2") + prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round+1) + require.NotNil(t, propBlock, "Failed to create proposal block with vs2") + require.NotNil(t, prop, "Failed to create proposal block with vs2") + propBlockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), } incrementRound(vs2) round++ // entering new round - ensureNewRound(newRoundCh, height, round) - t.Log("#### ONTO ROUND 3") + ensureNewRound(t, newRoundCh, height, round) /* Round4 (vs2, C) // B C // B C */ // now we're on a new round and not the proposer // so set the proposal block - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), ""); err != nil { - t.Fatal(err) - } + bps3, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + err = cs1.SetProposalAndBlock(ctx, prop, propBlock, bps3, "") + require.NoError(t, err) + + ensureNewProposal(t, proposalCh, height, round) - ensureNewProposal(proposalCh, height, round) - ensurePrevote(voteCh, height, round) // prevote - // prevote for locked block (not proposal) - validatePrevote(t, cs1, 3, vss[0], cs1.LockedBlock.Hash()) + // prevote for nil since we did not see a proposal for our locked block in the round. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, 3, vss[0], nil) // prevote for proposed block - signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - ensurePrevote(voteCh, height, round) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), propBlockID, vs2) + ensurePrevote(t, voteCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) - ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, initialBlockID.Hash) // precommit nil but locked on proposal signAddVotes( - config, + ctx, + t, cs1, tmproto.PrecommitType, - propBlock.Hash(), - propBlock.MakePartSet(partSize).Header(), + config.ChainID(), + propBlockID, vs2) // NOTE: conflicting precommits at same height - ensurePrecommit(voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) } -// 4 vals in two rounds, -// in round one: v1 precommits, other 3 only prevote so the block isn't committed -// in round two: v1 prevotes the same block that the node is locked on -// the others prevote a new block hence v1 changes lock and precommits the new block with the others -func TestStateLockPOLRelock(t *testing.T) { +// TestStateLock_POLUpdateLock tests that a validator updates its locked +// block if the following conditions are met within a round: +// 1. The validator received a valid proposal for the block +// 2. The validator received prevotes representing greater than 2/3 of the voting +// power on the network for the block. +func TestStateLock_POLUpdateLock(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, logger: logger}) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - proTxHash1, err := cs1.privValidator.GetProTxHash(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) require.NoError(t, err) - voteCh := subscribeToVoter(cs1, proTxHash1) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) - - // everything done from perspective of cs1 + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* - Round1 (cs1, B) // B B B B// B nil B nil + Round 0: + cs1 creates a proposal for block B. + Send a prevote for B from each of the validators to cs1. + Send a precommit for nil from all of the validators to cs1. - eg. vs2 and vs4 didn't see the 2/3 prevotes + This ensures that cs1 will lock on B in this round but not precommit it. */ // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) - ensureNewRound(newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() - theBlockHash := rs.ProposalBlock.Hash() - theBlockParts := rs.ProposalBlockParts.Header() + initialBlockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } - ensurePrevote(voteCh, height, round) // prevote + ensurePrevote(t, voteCh, height, round) - signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), initialBlockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) // our precommit - // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + // check that the validator generates a Lock event. + ensureLock(t, lockCh, height, round) - // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + // the proposed block should now be locked and our precommit added. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, round, vss[0], initialBlockID.Hash, initialBlockID.Hash) - // before we timeout to the new round set the new proposal - cs2, err := newState(cs1.state, vs2, kvstore.NewApplication()) - require.NoError(t, err) + // add precommits from the rest of the validators. + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) - prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) - if prop == nil || propBlock == nil { - t.Fatal("Failed to create proposal block with vs2") - } - propBlockParts := propBlock.MakePartSet(partSize) - propBlockHash := propBlock.Hash() - require.NotEqual(t, propBlockHash, theBlockHash) + // timeout to new round. + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) + + /* + Round 1: + Create a block, D and send a proposal for it to cs1 + Send a prevote for D from each of the validators to cs1. + Send a precommit for nil from all of the validtors to cs1. + Check that cs1 is now locked on the new block, D and no longer on the old block. + */ incrementRound(vs2, vs3, vs4) + round++ - // timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + // Generate a new proposal block. + cs2 := newState(ctx, t, logger, cs1.state, vs2, kvstore.NewApplication()) + require.NoError(t, err) + propR1, propBlockR1 := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) + propBlockR1Parts, err := propBlockR1.MakePartSet(partSize) + require.NoError(t, err) + propBlockR1Hash := propBlockR1.Hash() + r1BlockID := types.BlockID{ + Hash: propBlockR1Hash, + PartSetHeader: propBlockR1Parts.Header(), + } + require.NotEqual(t, propBlockR1Hash, initialBlockID.Hash) + err = cs1.SetProposalAndBlock(ctx, propR1, propBlockR1, propBlockR1Parts, "some peer") + require.NoError(t, err) - round++ // moving to the next round - //XXX: this isnt guaranteed to get there before the timeoutPropose ... - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) + ensureNewRound(t, newRoundCh, height, round) + + // ensure that the validator receives the proposal. + ensureNewProposal(t, proposalCh, height, round) + + // Prevote our nil since the proposal does not match our locked block. + ensurePrevoteMatch(t, voteCh, height, round, nil) + + // Add prevotes from the remainder of the validators for the new locked block. + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), r1BlockID, vs2, vs3, vs4) + + // Check that we lock on a new block. + ensureLock(t, lockCh, height, round) + + ensurePrecommit(t, voteCh, height, round) + + // We should now be locked on the new block and prevote it since we saw a sufficient amount + // prevote for the block. + validatePrecommit(ctx, t, cs1, round, round, vss[0], propBlockR1Hash, propBlockR1Hash) +} + +// TestStateLock_POLRelock tests that a validator updates its locked round if +// it receives votes representing over 2/3 of the voting power on the network +// for a block that it is already locked in. +func TestStateLock_POLRelock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + config := configSetup(t) + + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) + require.NoError(t, err) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + relockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryRelock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + + /* + Round 0: + cs1 creates a proposal for block B. + Send a prevote for B from each of the validators to cs1. + Send a precommit for nil from all of the validators to cs1. + This ensures that cs1 will lock on B in this round but not precommit it. + */ + + startTestRound(ctx, cs1, height, round) + + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + rs := cs1.GetRoundState() + theBlock := rs.ProposalBlock + theBlockParts := rs.ProposalBlockParts + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), } - ensureNewRound(newRoundCh, height, round) - t.Log("### ONTO ROUND 1") + ensurePrevote(t, voteCh, height, round) + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + + // check that the validator generates a Lock event. + ensureLock(t, lockCh, height, round) + + // the proposed block should now be locked and our precommit added. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) + + // add precommits from the rest of the validators. + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + // timeout to new round. + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) /* - Round2 (vs2, C) // B C C C // C C C _) + Round 1: + Create a proposal for block B, the same block from round 1. + Send a prevote for B from each of the validators to cs1. + Send a precommit for nil from all of the validtors to cs1. - cs1 changes lock! + Check that cs1 updates its 'locked round' value to the current round. */ + incrementRound(vs2, vs3, vs4) + round++ + propR1 := types.NewProposal(height, 1, round, cs1.ValidRound, blockID, theBlock.Header.Time) + p := propR1.ToProto() + _, valSet := cs1.GetValidatorSet() + _, err = vs2.SignProposal(ctx, cs1.state.ChainID, valSet.QuorumType, valSet.QuorumHash, p) + require.NoError(t, err) + propR1.Signature = p.Signature + err = cs1.SetProposalAndBlock(ctx, propR1, theBlock, theBlockParts, "") + require.NoError(t, err) - // now we're on a new round and not the proposer - // but we should receive the proposal - ensureNewProposal(proposalCh, height, round) + ensureNewRound(t, newRoundCh, height, round) - // go to prevote, node should prevote for locked block (not the new proposal) - this is relocking - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], theBlockHash) + // ensure that the validator receives the proposal. + ensureNewProposal(t, proposalCh, height, round) - // now lets add prevotes from everyone else for the new block - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + // Prevote our locked block since it matches the propsal seen in this round. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], blockID.Hash) - ensurePrecommit(voteCh, height, round) - // we should have unlocked and locked on the new block, sending a precommit for this new block - validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) + // Add prevotes from the remainder of the validators for the locked block. + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) - // more prevote creating a majority on the new block and this is then committed - signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3) - ensureNewBlockHeader(newBlockCh, height, propBlockHash) + // Check that we relock. + ensureRelock(t, relockCh, height, round) - ensureNewRound(newRoundCh, height+1, 0) + ensurePrecommit(t, voteCh, height, round) + + // We should now be locked on the same block but with an updated locked round. + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) } -// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka -func TestStateLockPOLUnlock(t *testing.T) { +// TestStateLock_PrevoteNilWhenLockedAndMissProposal tests that a validator prevotes nil +// if it is locked on a block and misses the proposal in a round. +func TestStateLock_PrevoteNilWhenLockedAndMissProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := configSetup(t) - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - partSize := types.BlockPartSizeBytes - - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - proTxHash1, err := cs1.privValidator.GetProTxHash(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + ptoTxHash, err := cs1.privValidator.GetProTxHash(ctx) require.NoError(t, err) - voteCh := subscribeToVoter(cs1, proTxHash1) - - // everything done from perspective of cs1 + voteCh := subscribeToVoter(ctx, t, cs1, ptoTxHash) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* - Round1 (cs1, B) // B B B B // B nil B nil - eg. didn't see the 2/3 prevotes + Round 0: + cs1 creates a proposal for block B. + Send a prevote for B from each of the validators to cs1. + Send a precommit for nil from all of the validators to cs1. + + This ensures that cs1 will lock on B in this round but not precommit it. */ - // start round and wait for propose and prevote - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() - theBlockHash := rs.ProposalBlock.Hash() - theBlockParts := rs.ProposalBlockParts.Header() + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], theBlockHash) + ensurePrevote(t, voteCh, height, round) - signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) - // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + // check that the validator generates a Lock event. + ensureLock(t, lockCh, height, round) - // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) + // the proposed block should now be locked and our precommit added. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) - // before we time out into new round, set next proposal block - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) - propBlockParts := propBlock.MakePartSet(partSize) + // add precommits from the rest of the validators. + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) - // timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - rs = cs1.GetRoundState() - lockedBlockHash := rs.LockedBlock.Hash() + // timeout to new round. + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) + + /* + Round 1: + Send a prevote for nil from each of the validators to cs1. + Send a precommit for nil from all of the validtors to cs1. + Check that cs1 prevotes nil instead of its locked block, but ensure + that it maintains its locked block. + */ incrementRound(vs2, vs3, vs4) - round++ // moving to the next round + round++ + + ensureNewRound(t, newRoundCh, height, round) + + // Prevote our nil. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], nil) - ensureNewRound(newRoundCh, height, round) - t.Log("#### ONTO ROUND 1") + // Add prevotes from the remainder of the validators nil. + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + ensurePrecommit(t, voteCh, height, round) + // We should now be locked on the same block but with an updated locked round. + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, blockID.Hash) +} + +// TestStateLock_PrevoteNilWhenLockedAndMissProposal tests that a validator prevotes nil +// if it is locked on a block and misses the proposal in a round. +func TestStateLock_PrevoteNilWhenLockedAndDifferentProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewNopLogger() + config := configSetup(t) /* - Round2 (vs2, C) // B nil nil nil // nil nil nil _ - cs1 unlocks! + All of the assertions in this test occur on the `cs1` validator. + The test sends signed votes from the other validators to cs1 and + cs1's state is then examined to verify that it now matches the expected + state. */ - //XXX: this isnt guaranteed to get there before the timeoutPropose ... - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) + + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, logger: logger}) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) + require.NoError(t, err) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + + /* + Round 0: + cs1 creates a proposal for block B. + Send a prevote for B from each of the validators to cs1. + Send a precommit for nil from all of the validators to cs1. + + This ensures that cs1 will lock on B in this round but not precommit it. + */ + startTestRound(ctx, cs1, height, round) + + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + rs := cs1.GetRoundState() + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), } - ensureNewProposal(proposalCh, height, round) + ensurePrevote(t, voteCh, height, round) + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + + // check that the validator generates a Lock event. + ensureLock(t, lockCh, height, round) + + // the proposed block should now be locked and our precommit added. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) + + // add precommits from the rest of the validators. + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + // timeout to new round. + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) - // go to prevote, prevote for locked block (not proposal) - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], lockedBlockHash) - // now lets add prevotes from everyone else for nil (a polka!) - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + /* + Round 1: + Create a proposal for a new block. + Send a prevote for nil from each of the validators to cs1. + Send a precommit for nil from all of the validtors to cs1. + + Check that cs1 prevotes nil instead of its locked block, but ensure + that it maintains its locked block. + */ + incrementRound(vs2, vs3, vs4) + round++ + cs2 := newState(ctx, t, logger, cs1.state, vs2, kvstore.NewApplication()) + propR1, propBlockR1 := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) + propBlockR1Parts, err := propBlockR1.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + propBlockR1Hash := propBlockR1.Hash() + require.NotEqual(t, propBlockR1Hash, blockID.Hash) + err = cs1.SetProposalAndBlock(ctx, propR1, propBlockR1, propBlockR1Parts, "some peer") + require.NoError(t, err) + + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) - // the polka makes us unlock and precommit nil - ensureNewUnlock(unlockCh, height, round) - ensurePrecommit(voteCh, height, round) + // Prevote our nil. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], nil) - // we should have unlocked and committed nil - // NOTE: since we don't relock on nil, the lock round is -1 - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + // Add prevotes from the remainder of the validators for nil. + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3) - ensureNewRound(newRoundCh, height, round+1) + // We should now be locked on the same block but prevote nil. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, blockID.Hash) } -// 4 vals, v1 locks on proposed block in the first round but the other validators only prevote -// In the second round, v1 misses the proposal but sees a majority prevote an unknown block so -// v1 should unlock and precommit nil. In the third round another block is proposed, all vals -// prevote and now v1 can lock onto the third block and precommit that -func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { +// TestStateLock_POLDoesNotUnlock tests that a validator maintains its locked block +// despite receiving +2/3 nil prevotes and nil precommits from other validators. +// Tendermint used to 'unlock' its locked block when greater than 2/3 prevotes +// for a nil block were seen. This behavior has been removed and this test ensures +// that it has been completely removed. +func TestStateLock_POLDoesNotUnlock(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + /* + All of the assertions in this test occur on the `cs1` validator. + The test sends signed votes from the other validators to cs1 and + cs1's state is then examined to verify that it now matches the expected + state. + */ - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, logger: logger}) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - partSize := types.BlockPartSizeBytes - - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - proTxHash1, err := cs1.privValidator.GetProTxHash(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) require.NoError(t, err) - voteCh := subscribeToVoter(cs1, proTxHash1) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - // everything done from perspective of cs1 + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) /* - Round0 (cs1, A) // A A A A// A nil nil nil + Round 0: + Create a block, B + Send a prevote for B from each of the validators to `cs1`. + Send a precommit for B from one of the validtors to `cs1`. + + This ensures that cs1 will lock on B in this round. */ // start round and wait for propose and prevote - startTestRound(cs1, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewRound(newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() - firstBlockHash := rs.ProposalBlock.Hash() - firstBlockParts := rs.ProposalBlockParts.Header() + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } - ensurePrevote(voteCh, height, round) // prevote + ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) - signAddVotes(config, cs1, tmproto.PrevoteType, firstBlockHash, firstBlockParts, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) // our precommit - // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], firstBlockHash, firstBlockHash) + // the validator should have locked a block in this round. + ensureLock(t, lockCh, height, round) - // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + ensurePrecommit(t, voteCh, height, round) + // the proposed block should now be locked and our should be for this locked block. - // before we timeout to the new round set the new proposal - cs2, err := newState(cs1.state, vs2, kvstore.NewApplication()) - require.NoError(t, err) - prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) - if prop == nil || propBlock == nil { - t.Fatal("Failed to create proposal block with vs2") - } - secondBlockParts := propBlock.MakePartSet(partSize) - secondBlockHash := propBlock.Hash() - require.NotEqual(t, secondBlockHash, firstBlockHash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) - incrementRound(vs2, vs3, vs4) + // Add precommits from the other validators. + // We only issue 1/2 Precommits for the block in this round. + // This ensures that the validator being tested does not commit the block. + // We do not want the validator to commit the block because we want the test + // test to proceeds to the next consensus round. + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs3) // timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) - round++ // moving to the next round + /* + Round 1: + Send a prevote for nil from >2/3 of the validators to `cs1`. + Check that cs1 maintains its lock on B but precommits nil. + Send a precommit for nil from >2/3 of the validators to `cs1`. + */ + round++ + incrementRound(vs2, vs3, vs4) + cs2 := newState(ctx, t, logger, cs1.state, vs2, kvstore.NewApplication()) + prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) + propBlockParts, err := propBlock.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + require.NotEqual(t, propBlock.Hash(), blockID.Hash) + err = cs1.SetProposalAndBlock(ctx, prop, propBlock, propBlockParts, "") + require.NoError(t, err) + + ensureNewRound(t, newRoundCh, height, round) + + ensureNewProposal(t, proposalCh, height, round) + + // Prevote for nil since the proposed block does not match our locked block. + ensurePrevoteMatch(t, voteCh, height, round, nil) + + // add >2/3 prevotes for nil from all other validators + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + ensurePrecommit(t, voteCh, height, round) - ensureNewRound(newRoundCh, height, round) - t.Log("### ONTO ROUND 1") + // verify that we haven't update our locked block since the first round + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, blockID.Hash) + + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) /* - Round1 (vs2, B) // A B B B // nil nil nil nil) + Round 2: + The validator cs1 saw >2/3 precommits for nil in the previous round. + Send the validator >2/3 prevotes for nil and ensure that it did not + unlock its block at the end of the previous round. */ + round++ + incrementRound(vs2, vs3, vs4) + cs3 := newState(ctx, t, logger, cs1.state, vs2, kvstore.NewApplication()) + prop, propBlock = decideProposal(ctx, t, cs3, vs3, vs3.Height, vs3.Round) + propBlockParts, err = propBlock.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + err = cs1.SetProposalAndBlock(ctx, prop, propBlock, propBlockParts, "") + require.NoError(t, err) - // now we're on a new round but v1 misses the proposal + ensureNewRound(t, newRoundCh, height, round) - // go to prevote, node should prevote for locked block (not the new proposal) - this is relocking - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], firstBlockHash) + ensureNewProposal(t, proposalCh, height, round) - // now lets add prevotes from everyone else for the new block - signAddVotes(config, cs1, tmproto.PrevoteType, secondBlockHash, secondBlockParts.Header(), vs2, vs3, vs4) + // Prevote for nil since the proposal does not match our locked block. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], nil) - ensurePrecommit(voteCh, height, round) - // we should have unlocked and locked on the new block, sending a precommit for this new block - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) - if err := cs1.SetProposalAndBlock(prop, propBlock, secondBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + ensurePrecommit(t, voteCh, height, round) + + // verify that we haven't update our locked block since the first round + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, blockID.Hash) - // more prevote creating a majority on the new block and this is then committed - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) +} + +// TestStateLock_MissingProposalWhenPOLSeenDoesNotUnlock tests that observing +// a two thirds majority for a block does not cause a validator to upate its lock on the +// new block if a proposal was not seen for that block. +func TestStateLock_MissingProposalWhenPOLSeenDoesNotUpdateLock(t *testing.T) { + config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, logger: logger}) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + partSize := types.BlockPartSizeBytes - // before we timeout to the new round set the new proposal - cs3, err := newState(cs1.state, vs3, kvstore.NewApplication()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) require.NoError(t, err) - prop, propBlock = decideProposal(cs3, vs3, vs3.Height, vs3.Round+1) - if prop == nil || propBlock == nil { - t.Fatal("Failed to create proposal block with vs2") + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + /* + Round 0: + cs1 creates a proposal for block B. + Send a prevote for B from each of the validators to cs1. + Send a precommit for nil from all of the validators to cs1. + + This ensures that cs1 will lock on B in this round but not precommit it. + */ + startTestRound(ctx, cs1, height, round) + + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + rs := cs1.GetRoundState() + firstBlockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), } - thirdPropBlockParts := propBlock.MakePartSet(partSize) - thirdPropBlockHash := propBlock.Hash() - require.NotEqual(t, secondBlockHash, thirdPropBlockHash) - incrementRound(vs2, vs3, vs4) + ensurePrevote(t, voteCh, height, round) // prevote + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), firstBlockID, vs2, vs3, vs4) + + ensurePrecommit(t, voteCh, height, round) // our precommit + // the proposed block should now be locked and our precommit added + validatePrecommit(ctx, t, cs1, round, round, vss[0], firstBlockID.Hash, firstBlockID.Hash) + + // add precommits from the rest + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) // timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) - round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) - t.Log("### ONTO ROUND 2") + /* + Round 1: + Create a new block, D but do not send it to cs1. + Send a prevote for D from each of the validators to cs1. + + Check that cs1 does not update its locked block to this missed block D. + */ + incrementRound(vs2, vs3, vs4) + round++ + cs2 := newState(ctx, t, logger, cs1.state, vs2, kvstore.NewApplication()) + require.NoError(t, err) + prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) + require.NotNil(t, propBlock, "Failed to create proposal block with vs2") + require.NotNil(t, prop, "Failed to create proposal block with vs2") + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + secondBlockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } + require.NotEqual(t, secondBlockID.Hash, firstBlockID.Hash) + + ensureNewRound(t, newRoundCh, height, round) + + // prevote for nil since the proposal was not seen. + ensurePrevoteMatch(t, voteCh, height, round, nil) + + // now lets add prevotes from everyone else for the new block + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), secondBlockID, vs2, vs3, vs4) + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, firstBlockID.Hash) +} + +// TestStateLock_DoesNotLockOnOldProposal tests that observing +// a two thirds majority for a block does not cause a validator to lock on the +// block if a proposal was not seen for that block in the current round, but +// was seen in a previous round. +func TestStateLock_DoesNotLockOnOldProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + config := configSetup(t) + + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + proTxHash, err := cs1.privValidator.GetProTxHash(context.Background()) + require.NoError(t, err) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) /* - Round2 (vs3, C) // C C C C // C nil nil nil) + Round 0: + cs1 creates a proposal for block B. + Send a prevote for nil from each of the validators to cs1. + Send a precommit for nil from all of the validators to cs1. + + This ensures that cs1 will not lock on B. */ + startTestRound(ctx, cs1, height, round) - if err := cs1.SetProposalAndBlock(prop, propBlock, thirdPropBlockParts, "some peer"); err != nil { - t.Fatal(err) + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + rs := cs1.GetRoundState() + firstBlockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), } - ensurePrevote(voteCh, height, round) - // we are no longer locked to the first block so we should be able to prevote - validatePrevote(t, cs1, round, vss[0], thirdPropBlockHash) + ensurePrevote(t, voteCh, height, round) + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + // The proposed block should not have been locked. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) + + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + incrementRound(vs2, vs3, vs4) + + // timeout to new round + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) + + /* + Round 1: + No proposal new proposal is created. + Send a prevote for B, the block from round 0, from each of the validators to cs1. + Send a precommit for nil from all of the validators to cs1. + cs1 saw a POL for the block it saw in round 0. We ensure that it does not + lock on this block, since it did not see a proposal for it in this round. + */ + round++ + ensureNewRound(t, newRoundCh, height, round) + + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], nil) // All validators prevote for the old block. - signAddVotes(config, cs1, tmproto.PrevoteType, thirdPropBlockHash, thirdPropBlockParts.Header(), vs2, vs3, vs4) + // All validators prevote for the old block. + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), firstBlockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) - // we have a majority, now vs1 can change lock to the third block - validatePrecommit(t, cs1, round, round, vss[0], thirdPropBlockHash, thirdPropBlockHash) + // Make sure that cs1 did not lock on the block since it did not receive a proposal for it. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) } // 4 vals // a polka at round 1 but we miss it // then a polka at round 2 that we lock on // then we see the polka from round 1 but shouldn't unlock -func TestStateLockPOLSafety1(t *testing.T) { +func TestStateLock_POLSafety1(t *testing.T) { config := configSetup(t) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, logger: logger}) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - proTxHash1, err := cs1.privValidator.GetProTxHash(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) require.NoError(t, err) - voteCh := subscribeToVoter(cs1, proTxHash1) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, cs1.Height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() propBlock := rs.ProposalBlock - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlock.Hash()) - + ensurePrevoteMatch(t, voteCh, height, round, propBlock.Hash()) + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: partSet.Header()} // the others sign a polka but we don't see it - prevotes := signVotes(config, tmproto.PrevoteType, propBlock.Hash(), cs1.state.AppHash, cs1.Validators.QuorumType, - cs1.Validators.QuorumHash, - propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) - - t.Logf("old prop hash %v", fmt.Sprintf("%X", propBlock.Hash())) + prevotes := signVotes(ctx, t, tmproto.PrevoteType, config.ChainID(), blockID, cs1.state.AppHash, + cs1.Validators.QuorumType, cs1.Validators.QuorumHash, + vs2, vs3, vs4) // we do see them precommit nil - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) // cs1 precommit nil - ensurePrecommit(voteCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - - t.Log("### ONTO ROUND 1") - - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) - propBlockHash := propBlock.Hash() - propBlockParts := propBlock.MakePartSet(partSize) + ensurePrecommit(t, voteCh, height, round) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) incrementRound(vs2, vs3, vs4) - round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) + cs2 := newState(ctx, t, logger, cs1.state, vs2, kvstore.NewApplication()) + prop, propBlock := decideProposal(ctx, t, cs2, vs2, vs2.Height, vs2.Round) + propBlockParts, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + r2BlockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: propBlockParts.Header(), + } + + ensureNewRound(t, newRoundCh, height, round) //XXX: this isnt guaranteed to get there before the timeoutPropose ... - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + err = cs1.SetProposalAndBlock(ctx, prop, propBlock, propBlockParts, "some peer") + require.NoError(t, err) /*Round2 // we timeout and prevote our lock // a polka happened but we didn't see it! */ - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs = cs1.GetRoundState() - if rs.LockedBlock != nil { - panic("we should not be locked!") - } - t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash)) + require.Nil(t, rs.LockedBlock, "we should not be locked!") // go to prevote, prevote for proposal block - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash) + ensurePrevoteMatch(t, voteCh, height, round, r2BlockID.Hash) // now we see the others prevote for it, so we should lock on it - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), r2BlockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) // we should have precommitted - validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], r2BlockID.Hash, r2BlockID.Hash) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) incrementRound(vs2, vs3, vs4) round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) - t.Log("### ONTO ROUND 2") /*Round3 we see the polka from round 1 but we shouldn't unlock! */ // timeout of propose - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + ensureNewTimeout(t, timeoutProposeCh, height, round, cs1.proposeTimeout(round).Nanoseconds()) // finish prevote - ensurePrevote(voteCh, height, round) - // we should prevote what we're locked on - validatePrevote(t, cs1, round, vss[0], propBlockHash) + ensurePrevoteMatch(t, voteCh, height, round, nil) - newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep) + newStepCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRoundStep) // before prevotes from the previous round are added // add prevotes from the earlier round addVotes(cs1, prevotes...) - t.Log("Done adding prevotes!") - - ensureNoNewRoundStep(newStepCh) + ensureNoNewRoundStep(t, newStepCh) } // 4 vals. @@ -1149,193 +1509,330 @@ func TestStateLockPOLSafety1(t *testing.T) { // What we want: // dont see P0, lock on P1 at R1, dont unlock using P0 at R2 -func TestStateLockPOLSafety2(t *testing.T) { +func TestStateLock_POLSafety2(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - proTxHash1, err := cs1.privValidator.GetProTxHash(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) require.NoError(t, err) - voteCh := subscribeToVoter(cs1, proTxHash1) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) // the block for R0: gets polkad but we miss it // (even though we signed it, shhh) - _, propBlock0 := decideProposal(cs1, vss[0], height, round) + _, propBlock0 := decideProposal(ctx, t, cs1, vss[0], height, round) propBlockHash0 := propBlock0.Hash() - propBlockParts0 := propBlock0.MakePartSet(partSize) + propBlockParts0, err := propBlock0.MakePartSet(partSize) + require.NoError(t, err) propBlockID0 := types.BlockID{Hash: propBlockHash0, PartSetHeader: propBlockParts0.Header()} // the others sign a polka but we don't see it - prevotes := signVotes(config, tmproto.PrevoteType, propBlockHash0, cs1.state.AppHash, cs1.Validators.QuorumType, - cs1.Validators.QuorumHash, - propBlockParts0.Header(), vs2, vs3, vs4) + prevotes := signVotes(ctx, t, tmproto.PrevoteType, config.ChainID(), propBlockID0, cs1.state.AppHash, + cs1.Validators.QuorumType, cs1.Validators.QuorumHash, + vs2, vs3, vs4) // the block for round 1 - prop1, propBlock1 := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) - propBlockHash1 := propBlock1.Hash() - propBlockParts1 := propBlock1.MakePartSet(partSize) + prop1, propBlock1 := decideProposal(ctx, t, cs1, vs2, vs2.Height, vs2.Round+1) + propBlockParts1, err := propBlock1.MakePartSet(partSize) + require.NoError(t, err) + propBlockID1 := types.BlockID{Hash: propBlock1.Hash(), PartSetHeader: propBlockParts1.Header()} incrementRound(vs2, vs3, vs4) round++ // moving to the next round - t.Log("### ONTO Round 1") + // jump in at round 1 - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) - if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil { - t.Fatal(err) - } - ensureNewProposal(proposalCh, height, round) + err = cs1.SetProposalAndBlock(ctx, prop1, propBlock1, propBlockParts1, "some peer") + require.NoError(t, err) + ensureNewProposal(t, proposalCh, height, round) - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash1) + ensurePrevoteMatch(t, voteCh, height, round, propBlockID1.Hash) - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), propBlockID1, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], propBlockHash1, propBlockHash1) + validatePrecommit(ctx, t, cs1, round, round, vss[0], propBlockID1.Hash, propBlockID1.Hash) // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash1, propBlockParts1.Header(), vs3) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), propBlockID1, vs3) incrementRound(vs2, vs3, vs4) // timeout of precommit wait to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) round++ // moving to the next round // in round 2 we see the polkad block from round 0 - newProp := types.NewProposal(height, 1, round, 0, propBlockID0) + newProp := types.NewProposal(height, 1, round, 0, propBlockID0, propBlock0.Header.Time) p := newProp.ToProto() _, valSet := cs1.GetValidatorSet() - if _, err := vs3.SignProposal(context.Background(), config.ChainID(), valSet.QuorumType, valSet.QuorumHash, p); err != nil { - t.Fatal(err) - } + _, err = vs3.SignProposal(ctx, config.ChainID(), valSet.QuorumType, valSet.QuorumHash, p) + require.NoError(t, err) newProp.Signature = p.Signature - if err := cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer"); err != nil { - t.Fatal(err) - } + err = cs1.SetProposalAndBlock(ctx, newProp, propBlock0, propBlockParts0, "some peer") + require.NoError(t, err) // Add the pol votes addVotes(cs1, prevotes...) - ensureNewRound(newRoundCh, height, round) - t.Log("### ONTO Round 2") + ensureNewRound(t, newRoundCh, height, round) + /*Round2 // now we see the polka from round 1, but we shouldnt unlock */ - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) - ensureNoNewUnlock(unlockCh) - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash1) + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], nil) } +// TestState_PrevotePOLFromPreviousRound tests that a validator will prevote +// for a block if it is locked on a different block but saw a POL for the block +// it is not locked on in a previous round. +func TestState_PrevotePOLFromPreviousRound(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + config := configSetup(t) + logger := log.NewNopLogger() + + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, logger: logger}) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + partSize := types.BlockPartSizeBytes + + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) + require.NoError(t, err) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) + lockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryLock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + + /* + Round 0: + cs1 creates a proposal for block B. + Send a prevote for B from each of the validators to cs1. + Send a precommit for nil from all of the validators to cs1. + + This ensures that cs1 will lock on B in this round but not precommit it. + */ + + startTestRound(ctx, cs1, height, round) + + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + rs := cs1.GetRoundState() + r0BlockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } + + ensurePrevote(t, voteCh, height, round) + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), r0BlockID, vs2, vs3, vs4) + + // check that the validator generates a Lock event. + ensureLock(t, lockCh, height, round) + + // the proposed block should now be locked and our precommit added. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, round, vss[0], r0BlockID.Hash, r0BlockID.Hash) + + // add precommits from the rest of the validators. + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + // timeout to new round. + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) + + /* + Round 1: + Create a block, D but do not send a proposal for it to cs1. + Send a prevote for D from each of the validators to cs1 so that cs1 sees a POL. + Send a precommit for nil from all of the validtors to cs1. + + cs1 has now seen greater than 2/3 of the voting power prevote D in this round + but cs1 did not see the proposal for D in this round so it will not prevote or precommit it. + */ + + incrementRound(vs2, vs3, vs4) + round++ + // Generate a new proposal block. + cs2 := newState(ctx, t, logger, cs1.state, vs2, kvstore.NewApplication()) + cs2.ValidRound = 1 + propR1, propBlockR1 := decideProposal(ctx, t, cs2, vs2, vs2.Height, round) + + assert.EqualValues(t, 1, propR1.POLRound) + + propBlockR1Parts, err := propBlockR1.MakePartSet(partSize) + require.NoError(t, err) + r1BlockID := types.BlockID{ + Hash: propBlockR1.Hash(), + PartSetHeader: propBlockR1Parts.Header(), + } + require.NotEqual(t, r1BlockID.Hash, r0BlockID.Hash) + + ensureNewRound(t, newRoundCh, height, round) + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), r1BlockID, vs2, vs3, vs4) + + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], nil) + + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + ensurePrecommit(t, voteCh, height, round) + + // timeout to new round. + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) + + /* + Create a new proposal for D, the same block from Round 1. + cs1 already saw greater than 2/3 of the voting power on the network vote for + D in a previous round, so it should prevote D once it receives a proposal for it. + + cs1 does not need to receive prevotes from other validators before the proposal + in this round. It will still prevote the block. + + Send cs1 prevotes for nil and check that it still prevotes its locked block + and not the block that it prevoted. + */ + incrementRound(vs2, vs3, vs4) + round++ + propR2 := types.NewProposal(height, 1, round, 1, r1BlockID, propBlockR1.Header.Time) + p := propR2.ToProto() + _, valSet := cs1.GetValidatorSet() + _, err = vs3.SignProposal(ctx, cs1.state.ChainID, valSet.QuorumType, valSet.QuorumHash, p) + require.NoError(t, err) + propR2.Signature = p.Signature + + // cs1 receives a proposal for D, the block that received a POL in round 1. + err = cs1.SetProposalAndBlock(ctx, propR2, propBlockR1, propBlockR1Parts, "") + require.NoError(t, err) + + ensureNewRound(t, newRoundCh, height, round) + + ensureNewProposal(t, proposalCh, height, round) + + // We should now prevote this block, despite being locked on the block from + // round 0. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], r1BlockID.Hash) + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + // cs1 did not receive a POL within this round, so it should remain locked + // on the block from round 0. + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, r0BlockID.Hash) +} + // 4 vals. -// polka P0 at R0 for B0. We lock B0 on P0 at R0. P0 unlocks value at R1. +// polka P0 at R0 for B0. We lock B0 on P0 at R0. // What we want: // P0 proposes B0 at R3. func TestProposeValidBlock(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - proTxHash1, err := cs1.privValidator.GetProTxHash(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) require.NoError(t, err) - voteCh := subscribeToVoter(cs1, proTxHash1) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, cs1.Height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() propBlock := rs.ProposalBlock - propBlockHash := propBlock.Hash() + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash) + ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) // the others sign a polka - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) - // we should have precommitted - validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) + ensurePrecommit(t, voteCh, height, round) + // we should have precommitted the proposed block in this round. - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) + + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) incrementRound(vs2, vs3, vs4) round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) - - t.Log("### ONTO ROUND 2") + ensureNewRound(t, newRoundCh, height, round) // timeout of propose - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + ensureNewTimeout(t, timeoutProposeCh, height, round, cs1.proposeTimeout(round).Nanoseconds()) - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash) + // We did not see a valid proposal within this round, so prevote nil. + ensurePrevoteMatch(t, voteCh, height, round, nil) - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) - ensureNewUnlock(unlockCh, height, round) - - ensurePrecommit(voteCh, height, round) - // we should have precommitted - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + ensurePrecommit(t, voteCh, height, round) + // we should have precommitted nil during this round because we received + // >2/3 precommits for nil from the other validators. + validatePrecommit(ctx, t, cs1, round, 0, vss[0], nil, blockID.Hash) incrementRound(vs2, vs3, vs4) incrementRound(vs2, vs3, vs4) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) - round += 2 // moving to the next round + round += 2 // increment by multiple rounds - ensureNewRound(newRoundCh, height, round) - t.Log("### ONTO ROUND 3") + ensureNewRound(t, newRoundCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) - - t.Log("### ONTO ROUND 4") + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs = cs1.GetRoundState() - assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), propBlockHash)) + assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), blockID.Hash)) assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), rs.ValidBlock.Hash())) assert.True(t, rs.Proposal.POLRound == rs.ValidRound) assert.True(t, bytes.Equal(rs.Proposal.BlockID.Hash, rs.ValidBlock.Hash())) @@ -1345,46 +1842,50 @@ func TestProposeValidBlock(t *testing.T) { // P0 miss to lock B but set valid block to B after receiving delayed prevote. func TestSetValidBlockOnDelayedPrevote(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - proTxHash1, err := cs1.privValidator.GetProTxHash(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryValidBlock) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) require.NoError(t, err) - voteCh := subscribeToVoter(cs1, proTxHash1) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, cs1.Height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() propBlock := rs.ProposalBlock - propBlockHash := propBlock.Hash() - propBlockParts := propBlock.MakePartSet(partSize) + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], propBlockHash) + ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) // vs2 send prevote for propBlock - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2) // vs3 send prevote nil - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs3) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs3) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) - ensurePrecommit(voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) // we should have precommitted - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) rs = cs1.GetRoundState() @@ -1393,14 +1894,14 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { assert.True(t, rs.ValidRound == -1) // vs2 send (delayed) prevote for propBlock - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs4) - ensureNewValidBlock(validBlockCh, height, round) + ensureNewValidBlock(t, validBlockCh, height, round) rs = cs1.GetRoundState() - assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), propBlockHash)) - assert.True(t, rs.ValidBlockParts.Header().Equals(propBlockParts.Header())) + assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), blockID.Hash)) + assert.True(t, rs.ValidBlockParts.Header().Equals(blockID.PartSetHeader)) assert.True(t, rs.ValidRound == round) } @@ -1409,81 +1910,451 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { // receiving delayed Block Proposal. func TestSetValidBlockOnDelayedProposal(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - proTxHash1, err := cs1.privValidator.GetProTxHash(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryValidBlock) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) require.NoError(t, err) - voteCh := subscribeToVoter(cs1, proTxHash1) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) round++ // move to round in which P0 is not proposer incrementRound(vs2, vs3, vs4) - startTestRound(cs1, cs1.Height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, cs1.Height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + ensureNewTimeout(t, timeoutProposeCh, height, round, cs1.proposeTimeout(round).Nanoseconds()) - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + ensurePrevoteMatch(t, voteCh, height, round, nil) - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) - propBlockHash := propBlock.Hash() - propBlockParts := propBlock.MakePartSet(partSize) + prop, propBlock := decideProposal(ctx, t, cs1, vs2, vs2.Height, vs2.Round+1) + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } // vs2, vs3 and vs4 send prevote for propBlock - signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - ensureNewValidBlock(validBlockCh, height, round) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + ensureNewValidBlock(t, validBlockCh, height, round) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) - ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + partSet, err = propBlock.MakePartSet(partSize) + require.NoError(t, err) + err = cs1.SetProposalAndBlock(ctx, prop, propBlock, partSet, "some peer") + require.NoError(t, err) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() - assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), propBlockHash)) - assert.True(t, rs.ValidBlockParts.Header().Equals(propBlockParts.Header())) + assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), blockID.Hash)) + assert.True(t, rs.ValidBlockParts.Header().Equals(blockID.PartSetHeader)) assert.True(t, rs.ValidRound == round) } +func TestProcessProposalAccept(t *testing.T) { + for _, testCase := range []struct { + name string + accept bool + expectedNilPrevote bool + }{ + { + name: "accepted block is prevoted", + accept: true, + expectedNilPrevote: false, + }, + { + name: "rejected block is not prevoted", + accept: false, + expectedNilPrevote: true, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m := abcimocks.NewApplication(t) + status := abci.ResponseProcessProposal_REJECT + if testCase.accept { + status = abci.ResponseProcessProposal_ACCEPT + } + m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: status}, nil) + m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil).Maybe() + cs1, _ := makeState(ctx, t, makeStateArgs{config: config, application: m}) + height, round := cs1.Height, cs1.Round + + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) + require.NoError(t, err) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) + + startTestRound(ctx, cs1, cs1.Height, round) + ensureNewRound(t, newRoundCh, height, round) + + ensureNewProposal(t, proposalCh, height, round) + rs := cs1.GetRoundState() + var prevoteHash tmbytes.HexBytes + if !testCase.expectedNilPrevote { + prevoteHash = rs.ProposalBlock.Hash() + } + ensurePrevoteMatch(t, voteCh, height, round, prevoteHash) + }) + } +} + +func TestFinalizeBlockCalled(t *testing.T) { + for _, testCase := range []struct { + name string + voteNil bool + expectCalled bool + }{ + { + name: "finalze block called when block committed", + voteNil: false, + expectCalled: true, + }, + { + name: "not called when block not committed", + voteNil: true, + expectCalled: false, + }, + } { + t.Run(testCase.name, func(t *testing.T) { + config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m := abcimocks.NewApplication(t) + m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{ + Status: abci.ResponseProcessProposal_ACCEPT, + }, nil) + m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) + m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ + Status: abci.ResponseVerifyVoteExtension_ACCEPT, + }, nil) + m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() + m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{}, nil) + m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() + + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) + height, round := cs1.Height, cs1.Round + + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) + require.NoError(t, err) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) + + startTestRound(ctx, cs1, cs1.Height, round) + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + rs := cs1.GetRoundState() + + blockID := types.BlockID{} + nextRound := round + 1 + nextHeight := height + if !testCase.voteNil { + nextRound = 0 + nextHeight = height + 1 + blockID = types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } + } + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[1:]...) + ensurePrevoteMatch(t, voteCh, height, round, rs.ProposalBlock.Hash()) + + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[1:]...) + ensurePrecommit(t, voteCh, height, round) + + ensureNewRound(t, newRoundCh, nextHeight, nextRound) + m.AssertExpectations(t) + + if !testCase.expectCalled { + m.AssertNotCalled(t, "FinalizeBlock", ctx, mock.Anything) + } else { + m.AssertCalled(t, "FinalizeBlock", ctx, mock.Anything) + } + }) + } +} + +// TestExtendVoteCalled tests that the vote extension methods are called at the +// correct point in the consensus algorithm. +func TestExtendVoteCalled(t *testing.T) { + config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m := abcimocks.NewApplication(t) + m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil) + m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) + m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ + VoteExtension: []byte("extension"), + }, nil) + m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ + Status: abci.ResponseVerifyVoteExtension_ACCEPT, + }, nil) + m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() + m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) + height, round := cs1.Height, cs1.Round + + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) + require.NoError(t, err) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) + + startTestRound(ctx, cs1, cs1.Height, round) + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + + m.AssertNotCalled(t, "ExtendVote", mock.Anything, mock.Anything) + + rs := cs1.GetRoundState() + + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[1:]...) + ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) + + ensurePrecommit(t, voteCh, height, round) + + m.AssertCalled(t, "ExtendVote", ctx, &abci.RequestExtendVote{ + Height: height, + Hash: blockID.Hash, + }) + + m.AssertCalled(t, "VerifyVoteExtension", ctx, &abci.RequestVerifyVoteExtension{ + Hash: blockID.Hash, + ValidatorProTxHash: proTxHash, + Height: height, + VoteExtension: []byte("extension"), + }) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[1:]...) + ensureNewRound(t, newRoundCh, height+1, 0) + m.AssertExpectations(t) + + // Only 3 of the vote extensions are seen, as consensus proceeds as soon as the +2/3 threshold + // is observed by the consensus engine. + for _, pv := range vss[:3] { + proTxHash, err := pv.GetProTxHash(ctx) + require.NoError(t, err) + m.AssertCalled(t, "VerifyVoteExtension", ctx, &abci.RequestVerifyVoteExtension{ + Hash: blockID.Hash, + ValidatorProTxHash: proTxHash, + Height: height, + VoteExtension: []byte("extension"), + }) + } + +} + +// TestVerifyVoteExtensionNotCalledOnAbsentPrecommit tests that the VerifyVoteExtension +// method is not called for a validator's vote that is never delivered. +func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { + config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m := abcimocks.NewApplication(t) + m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil) + m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) + m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ + VoteExtension: []byte("extension"), + }, nil) + m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{ + Status: abci.ResponseVerifyVoteExtension_ACCEPT, + }, nil) + m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) + height, round := cs1.Height, cs1.Round + + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) + require.NoError(t, err) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) + + startTestRound(ctx, cs1, cs1.Height, round) + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + rs := cs1.GetRoundState() + + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[2:]...) + ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) + + ensurePrecommit(t, voteCh, height, round) + + m.AssertCalled(t, "ExtendVote", mock.Anything, &abci.RequestExtendVote{ + Height: height, + Hash: blockID.Hash, + }) + + m.AssertCalled(t, "VerifyVoteExtension", mock.Anything, &abci.RequestVerifyVoteExtension{ + Hash: blockID.Hash, + ValidatorProTxHash: proTxHash, + Height: height, + VoteExtension: []byte("extension"), + }) + + m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[2:]...) + ensureNewRound(t, newRoundCh, height+1, 0) + m.AssertExpectations(t) + + // vss[1] did not issue a precommit for the block, ensure that a vote extension + // for its address was not sent to the application. + proTxHash, err = vss[1].GetProTxHash(ctx) + require.NoError(t, err) + m.AssertNotCalled(t, "VerifyVoteExtension", ctx, &abci.RequestVerifyVoteExtension{ + Hash: blockID.Hash, + ValidatorProTxHash: proTxHash, + Height: height, + VoteExtension: []byte("extension"), + }) + +} + +// TestPrepareProposalReceivesVoteExtensions tests that the PrepareProposal method +// is called with the vote extensions from the previous height. The test functions +// be completing a consensus height with a mock application as the proposer. The +// test then proceeds to fail sever rounds of consensus until the mock application +// is the proposer again and ensures that the mock application receives the set of +// vote extensions from the previous consensus instance. +func TestPrepareProposalReceivesVoteExtensions(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + config := configSetup(t) + + // create a list of vote extensions, one for each validator. + voteExtensions := [][]byte{ + []byte("extension 0"), + []byte("extension 1"), + []byte("extension 2"), + []byte("extension 3"), + } + + // m := abcimocks.NewApplication(t) + m := &abcimocks.Application{} + m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ + VoteExtension: voteExtensions[0], + }, nil) + m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil) + + // capture the prepare proposal request. + rpp := &abci.RequestPrepareProposal{} + m.On("PrepareProposal", mock.Anything, mock.MatchedBy(func(r *abci.RequestPrepareProposal) bool { + rpp = r + return true + })).Return(&abci.ResponsePrepareProposal{}, nil) + + m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil).Once() + m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_ACCEPT}, nil) + m.On("Commit", mock.Anything).Return(&abci.ResponseCommit{}, nil).Maybe() + m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil) + + cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) + height, round := cs1.Height, cs1.Round + + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) + require.NoError(t, err) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) + + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + + rs := cs1.GetRoundState() + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[1:]...) + + // create a precommit for each validator with the associated vote extension. + for i, vs := range vss[1:] { + signAddPrecommitWithExtension(ctx, t, cs1, config.ChainID(), blockID, voteExtensions[i+1], vs) + } + + ensurePrevote(t, voteCh, height, round) + + // ensure that the height is committed. + ensurePrecommitMatch(t, voteCh, height, round, blockID.Hash) + incrementHeight(vss[1:]...) + + height++ + round = 0 + ensureNewRound(t, newRoundCh, height, round) + incrementRound(vss[1:]...) + incrementRound(vss[1:]...) + incrementRound(vss[1:]...) + round = 3 + + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vss[1:]...) + ensureNewRound(t, newRoundCh, height, round) + ensureNewProposal(t, proposalCh, height, round) + + // ensure that the proposer received the list of vote extensions from the + // previous height. + require.Len(t, rpp.LocalLastCommit.Votes, len(vss)) + for i := range vss { + require.Equal(t, rpp.LocalLastCommit.Votes[i].VoteExtension, voteExtensions[i]) + } +} + // 4 vals, 3 Nil Precommits at P0 // What we want: // P0 waits for timeoutPrecommit before starting next round func TestWaitingTimeoutOnNilPolka(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config := configSetup(t) - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) // start round - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) - ensureNewRound(newRoundCh, height, round+1) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) + ensureNewRound(t, newRoundCh, height, round+1) } // 4 vals, 3 Prevotes for nil from the higher round. @@ -1491,37 +2362,37 @@ func TestWaitingTimeoutOnNilPolka(t *testing.T) { // P0 waits for timeoutPropose in the next round before entering prevote func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - proTxHash1, err := cs1.privValidator.GetProTxHash(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) require.NoError(t, err) - voteCh := subscribeToVoter(cs1, proTxHash1) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) // start round - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensurePrevote(voteCh, height, round) + ensurePrevote(t, voteCh, height, round) incrementRound(vss[1:]...) - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) rs := cs1.GetRoundState() assert.True(t, rs.Step == cstypes.RoundStepPropose) // P0 does not prevote before timeoutPropose expires - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Propose(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.proposeTimeout(round).Nanoseconds()) - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + ensurePrevoteMatch(t, voteCh, height, round, nil) } // 4 vals, 3 Precommits for nil from the higher round. @@ -1529,37 +2400,38 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { // P0 jump to higher round, precommit and start precommit wait func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - proTxHash1, err := cs1.privValidator.GetProTxHash(context.Background()) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) require.NoError(t, err) - voteCh := subscribeToVoter(cs1, proTxHash1) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) // start round - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensurePrevote(voteCh, height, round) + ensurePrevote(t, voteCh, height, round) incrementRound(vss[1:]...) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) + ensureNewRound(t, newRoundCh, height, round) } // 4 vals, 3 Prevotes for nil in the current round. @@ -1567,38 +2439,39 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { // P0 wait for timeoutPropose to expire before sending prevote. func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - proTxHash1, err := cs1.privValidator.GetProTxHash(context.Background()) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) require.NoError(t, err) - voteCh := subscribeToVoter(cs1, proTxHash1) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) // start round in which PO is not proposer - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) incrementRound(vss[1:]...) - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), types.BlockID{}, vs2, vs3, vs4) - ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) + ensureNewTimeout(t, timeoutProposeCh, height, round, cs1.proposeTimeout(round).Nanoseconds()) - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], nil) + ensurePrevoteMatch(t, voteCh, height, round, nil) } // What we want: // P0 emit NewValidBlock event upon receiving 2/3+ Precommit for B but hasn't received block B yet func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) @@ -1606,25 +2479,29 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { partSize := types.BlockPartSizeBytes - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryValidBlock) - _, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round) - propBlockHash := propBlock.Hash() - propBlockParts := propBlock.MakePartSet(partSize) + _, propBlock := decideProposal(ctx, t, cs1, vs2, vs2.Height, vs2.Round) + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } // start round in which PO is not proposer - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) // vs2, vs3 and vs4 send precommit for propBlock - signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - ensureNewValidBlock(validBlockCh, height, round) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2, vs3, vs4) + ensureNewValidBlock(t, validBlockCh, height, round) rs := cs1.GetRoundState() assert.True(t, rs.Step == cstypes.RoundStepApplyCommit) assert.True(t, rs.ProposalBlock == nil) - assert.True(t, rs.ProposalBlockParts.Header().Equals(propBlockParts.Header())) + assert.True(t, rs.ProposalBlockParts.Header().Equals(blockID.PartSetHeader)) } @@ -1633,43 +2510,48 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { // After receiving block, it executes block and moves to the next height. func TestCommitFromPreviousRound(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) partSize := types.BlockPartSizeBytes - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryValidBlock) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round) - propBlockHash := propBlock.Hash() - propBlockParts := propBlock.MakePartSet(partSize) + prop, propBlock := decideProposal(ctx, t, cs1, vs2, vs2.Height, vs2.Round) + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } // start round in which PO is not proposer - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) // vs2, vs3 and vs4 send precommit for propBlock for the previous round - signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs2, vs3, vs4) - ensureNewValidBlock(validBlockCh, height, round) + ensureNewValidBlock(t, validBlockCh, height, round) rs := cs1.GetRoundState() assert.True(t, rs.Step == cstypes.RoundStepApplyCommit) assert.True(t, rs.CommitRound == vs2.Round) assert.True(t, rs.ProposalBlock == nil) - assert.True(t, rs.ProposalBlockParts.Header().Equals(propBlockParts.Header())) - - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + assert.True(t, rs.ProposalBlockParts.Header().Equals(blockID.PartSetHeader)) + partSet, err = propBlock.MakePartSet(partSize) + require.NoError(t, err) + err = cs1.SetProposalAndBlock(ctx, prop, propBlock, partSet, "some peer") + require.NoError(t, err) - ensureNewProposal(proposalCh, height, round) - ensureNewRound(newRoundCh, height+1, 0) + ensureNewProposal(t, proposalCh, height, round) + ensureNewRound(t, newRoundCh, height+1, 0) } type fakeTxNotifier struct { @@ -1689,60 +2571,62 @@ func (n *fakeTxNotifier) Notify() { // start of the next round func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - config.Consensus.SkipTimeoutCommit = false - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) + cs1.state.ConsensusParams.Timeout.BypassCommitTimeout = false cs1.txNotifier = &fakeTxNotifier{ch: make(chan struct{})} vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - precommitTimeoutCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutPropose) + precommitTimeoutCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) - proTxHash1, err := cs1.privValidator.GetProTxHash(context.Background()) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + newBlockHeader := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlockHeader) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) require.NoError(t, err) - voteCh := subscribeToVoter(cs1, proTxHash1) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) // start round and wait for propose and prevote - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() - theBlockHash := rs.ProposalBlock.Hash() - theBlockParts := rs.ProposalBlockParts.Header() + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], theBlockHash) + ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) - signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // add precommits - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) - signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs3) // wait till timeout occurs - ensurePrecommitTimeout(precommitTimeoutCh) + ensureNewTimeout(t, precommitTimeoutCh, height, round, cs1.voteTimeout(round).Nanoseconds()) - ensureNewRound(newRoundCh, height, round+1) + ensureNewRound(t, newRoundCh, height, round+1) // majority is now reached - signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs4) - ensureNewBlockHeader(newBlockHeader, height, theBlockHash) + ensureNewBlockHeader(t, newBlockHeader, height, blockID.Hash) cs1.txNotifier.(*fakeTxNotifier).Notify() - ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.Propose(round).Nanoseconds()) + ensureNewTimeout(t, timeoutProposeCh, height+1, round, cs1.proposeTimeout(round).Nanoseconds()) rs = cs1.GetRoundState() assert.False( t, @@ -1752,55 +2636,57 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - config.Consensus.SkipTimeoutCommit = false - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) + cs1.state.ConsensusParams.Timeout.BypassCommitTimeout = false vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) - proTxHash1, err := cs1.privValidator.GetProTxHash(context.Background()) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + newBlockHeader := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlockHeader) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) require.NoError(t, err) - voteCh := subscribeToVoter(cs1, proTxHash1) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) // start round and wait for propose and prevote - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() - theBlockHash := rs.ProposalBlock.Hash() - theBlockParts := rs.ProposalBlockParts.Header() + blockID := types.BlockID{ + Hash: rs.ProposalBlock.Hash(), + PartSetHeader: rs.ProposalBlockParts.Header(), + } - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], theBlockHash) + ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) - signAddVotes(config, cs1, tmproto.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) - validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, round, vss[0], blockID.Hash, blockID.Hash) // add precommits - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) - signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) - signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs3) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs4) - ensureNewBlockHeader(newBlockHeader, height, theBlockHash) + ensureNewBlockHeader(t, newBlockHeader, height, blockID.Hash) - prop, propBlock := decideProposal(cs1, vs2, height+1, 0) - propBlockParts := propBlock.MakePartSet(partSize) + prop, propBlock := decideProposal(ctx, t, cs1, vs2, height+1, 0) + propBlockParts, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } - ensureNewProposal(proposalCh, height+1, 0) + err = cs1.SetProposalAndBlock(ctx, prop, propBlock, propBlockParts, "some peer") + require.NoError(t, err) + ensureNewProposal(t, proposalCh, height+1, 0) rs = cs1.GetRoundState() assert.False( @@ -1809,83 +2695,6 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { "triggeredTimeoutPrecommit should be false at the beginning of each height") } -//------------------------------------------------------------------------------------------ -// SlashingSuite -// TODO: Slashing - -/* -func TestStateSlashingPrevotes(t *testing.T) { - cs1, vss := randState(2) - vs2 := vss[1] - - - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) - - // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - <-newRoundCh - re := <-proposalCh - <-voteCh // prevote - - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - - // we should now be stuck in limbo forever, waiting for more prevotes - // add one for a different block should cause us to go into prevote wait - hash := rs.ProposalBlock.Hash() - hash[0] = byte(hash[0]+1) % 255 - signAddVotes(cs1, tmproto.PrevoteType, hash, rs.ProposalBlockParts.Header(), vs2) - - <-timeoutWaitCh - - // NOTE: we have to send the vote for different block first so we don't just go into precommit round right - // away and ignore more prevotes (and thus fail to slash!) - - // add the conflicting vote - signAddVotes(cs1, tmproto.PrevoteType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) - - // XXX: Check for existence of Dupeout info -} - -func TestStateSlashingPrecommits(t *testing.T) { - cs1, vss := randState(2) - vs2 := vss[1] - - - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) - - // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - <-newRoundCh - re := <-proposalCh - <-voteCh // prevote - - // add prevote from vs2 - signAddVotes(cs1, tmproto.PrevoteType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) - - <-voteCh // precommit - - // we should now be stuck in limbo forever, waiting for more prevotes - // add one for a different block should cause us to go into prevote wait - hash := rs.ProposalBlock.Hash() - hash[0] = byte(hash[0]+1) % 255 - signAddVotes(cs1, tmproto.PrecommitType, hash, rs.ProposalBlockParts.Header(), vs2) - - // NOTE: we have to send the vote for different block first so we don't just go into precommit round right - // away and ignore more prevotes (and thus fail to slash!) - - // add precommit from vs2 - signAddVotes(cs1, tmproto.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) - - // XXX: Check for existence of Dupeout info -} -*/ - //------------------------------------------------------------------------------------------ // CatchupSuite @@ -1896,84 +2705,88 @@ func TestStateSlashingPrecommits(t *testing.T) { // we receive a final precommit after going into next round, but others might have gone to commit already! func TestStateHalt1(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs1, vss, err := randState(config, 4) - require.NoError(t, err) + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round partSize := types.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) - proTxHash1, err := cs1.privValidator.GetProTxHash(context.Background()) + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewRound) + newBlockCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryNewBlock) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) require.NoError(t, err) - voteCh := subscribeToVoter(cs1, proTxHash1) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) // start round and wait for propose and prevote - startTestRound(cs1, height, round) - ensureNewRound(newRoundCh, height, round) + startTestRound(ctx, cs1, height, round) + ensureNewRound(t, newRoundCh, height, round) - ensureNewProposal(proposalCh, height, round) + ensureNewProposal(t, proposalCh, height, round) rs := cs1.GetRoundState() propBlock := rs.ProposalBlock - propBlockParts := propBlock.MakePartSet(partSize) + partSet, err := propBlock.MakePartSet(partSize) + require.NoError(t, err) + blockID := types.BlockID{ + Hash: propBlock.Hash(), + PartSetHeader: partSet.Header(), + } - ensurePrevote(voteCh, height, round) + ensurePrevote(t, voteCh, height, round) - signAddVotes(config, cs1, tmproto.PrevoteType, propBlock.Hash(), propBlockParts.Header(), vs2, vs3, vs4) + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) - ensurePrecommit(voteCh, height, round) + ensurePrecommit(t, voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash()) + validatePrecommit(ctx, t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash()) // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) // didnt receive proposal - signAddVotes(config, cs1, tmproto.PrecommitType, propBlock.Hash(), propBlockParts.Header(), vs3) + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), types.BlockID{}, vs2) // didnt receive proposal + signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vs3) // we receive this later, but vs3 might receive it earlier and with ours will go to commit! - precommit4 := signVote( - vs4, config, tmproto.PrecommitType, propBlock.Hash(), + precommit4 := signVote(ctx, t, vs4, tmproto.PrecommitType, config.ChainID(), blockID, cs1.state.AppHash, cs1.state.Validators.QuorumType, - cs1.state.Validators.QuorumHash, propBlockParts.Header(), + cs1.state.Validators.QuorumHash, ) incrementRound(vs2, vs3, vs4) // timeout to new round - ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) + ensureNewTimeout(t, timeoutWaitCh, height, round, cs1.voteTimeout(round).Nanoseconds()) round++ // moving to the next round - ensureNewRound(newRoundCh, height, round) - rs = cs1.GetRoundState() + ensureNewRound(t, newRoundCh, height, round) - t.Log("### ONTO ROUND 1") /*Round2 - // we timeout and prevote our lock + // we timeout and prevote // a polka happened but we didn't see it! */ - // go to prevote, prevote for locked block - ensurePrevote(voteCh, height, round) - validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) + // prevote for nil since we did not receive a proposal in this round. + ensurePrevoteMatch(t, voteCh, height, round, rs.LockedBlock.Hash()) // now we receive the precommit from the previous round addVotes(cs1, precommit4) // receiving that precommit should take us straight to commit - ensureNewBlock(newBlockCh, height) + ensureNewBlock(t, newBlockCh, height) - ensureNewRound(newRoundCh, height+1, 0) + ensureNewRound(t, newRoundCh, height+1, 0) } func TestStateOutputsBlockPartsStats(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // create dummy peer - cs, _, err := randState(config, 1) + cs, _ := makeState(ctx, t, makeStateArgs{config: config, validators: 1}) + peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") require.NoError(t, err) - peer := p2pmock.NewPeer(nil) // 1) new block part parts := types.NewPartSetFromData(tmrand.Bytes(100), 10) @@ -1984,26 +2797,26 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { } cs.ProposalBlockParts = types.NewPartSetFromHeader(parts.Header()) - cs.handleMsg(msgInfo{msg, peer.ID()}, false) + cs.handleMsg(ctx, msgInfo{msg, peerID, tmtime.Now()}, false) statsMessage := <-cs.statsMsgQueue require.Equal(t, msg, statsMessage.Msg, "") - require.Equal(t, peer.ID(), statsMessage.PeerID, "") + require.Equal(t, peerID, statsMessage.PeerID, "") // sending the same part from different peer - cs.handleMsg(msgInfo{msg, "peer2"}, false) + cs.handleMsg(ctx, msgInfo{msg, "peer2", tmtime.Now()}, false) // sending the part with the same height, but different round msg.Round = 1 - cs.handleMsg(msgInfo{msg, peer.ID()}, false) + cs.handleMsg(ctx, msgInfo{msg, peerID, tmtime.Now()}, false) // sending the part from the smaller height msg.Height = 0 - cs.handleMsg(msgInfo{msg, peer.ID()}, false) + cs.handleMsg(ctx, msgInfo{msg, peerID, tmtime.Now()}, false) // sending the part from the bigger height msg.Height = 3 - cs.handleMsg(msgInfo{msg, peer.ID()}, false) + cs.handleMsg(ctx, msgInfo{msg, peerID, tmtime.Now()}, false) select { case <-cs.statsMsgQueue: @@ -2015,33 +2828,40 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { func TestStateOutputVoteStats(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, vss, err := randState(config, 2) - require.NoError(t, err) + cs, vss := makeState(ctx, t, makeStateArgs{config: config, validators: 2}) // create dummy peer - peer := p2pmock.NewPeer(nil) + peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") + require.NoError(t, err) - randBytes := tmrand.Bytes(tmhash.Size) + randBytes := tmrand.Bytes(crypto.HashSize) + blockID := types.BlockID{ + Hash: randBytes, + } - vote := signVote(vss[1], config, tmproto.PrecommitType, randBytes, cs.state.AppHash, cs.state.Validators.QuorumType, - cs.state.Validators.QuorumHash, types.PartSetHeader{}) + vote := signVote(ctx, t, vss[1], tmproto.PrecommitType, config.ChainID(), blockID, cs.state.AppHash, + cs.state.Validators.QuorumType, cs.state.Validators.QuorumHash, + ) voteMessage := &VoteMessage{vote} - cs.handleMsg(msgInfo{voteMessage, peer.ID()}, false) + cs.handleMsg(ctx, msgInfo{voteMessage, peerID, tmtime.Now()}, false) statsMessage := <-cs.statsMsgQueue require.Equal(t, voteMessage, statsMessage.Msg, "") - require.Equal(t, peer.ID(), statsMessage.PeerID, "") + require.Equal(t, peerID, statsMessage.PeerID, "") // sending the same part from different peer - cs.handleMsg(msgInfo{&VoteMessage{vote}, "peer2"}, false) + cs.handleMsg(ctx, msgInfo{&VoteMessage{vote}, "peer2", tmtime.Now()}, false) // sending the vote for the bigger height incrementHeight(vss[1]) - vote = signVote(vss[1], config, tmproto.PrecommitType, randBytes, cs.state.AppHash, cs.state.Validators.QuorumType, - cs.state.Validators.QuorumHash, types.PartSetHeader{}) + vote = signVote(ctx, t, vss[1], tmproto.PrecommitType, config.ChainID(), blockID, cs.state.AppHash, + cs.state.Validators.QuorumType, cs.state.Validators.QuorumHash, + ) - cs.handleMsg(msgInfo{&VoteMessage{vote}, peer.ID()}, false) + cs.handleMsg(ctx, msgInfo{&VoteMessage{vote}, peerID, tmtime.Now()}, false) select { case <-cs.statsMsgQueue: @@ -2053,40 +2873,185 @@ func TestStateOutputVoteStats(t *testing.T) { func TestSignSameVoteTwice(t *testing.T) { config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - cs, vss, err := randState(config, 2) - require.NoError(t, err) + cs, vss := makeState(ctx, t, makeStateArgs{config: config, validators: 2}) - randBytes := tmrand.Bytes(tmhash.Size) + randBytes := tmrand.Bytes(crypto.HashSize) - vote := signVote(vss[1], - config, + vote := signVote( + ctx, + t, + vss[1], tmproto.PrecommitType, - randBytes, + config.ChainID(), + types.BlockID{ + Hash: randBytes, + PartSetHeader: types.PartSetHeader{Total: 10, Hash: randBytes}, + }, cs.state.AppHash, cs.state.Validators.QuorumType, cs.state.Validators.QuorumHash, - types.PartSetHeader{Total: 10, Hash: randBytes}, ) - - vote2 := signVote(vss[1], - config, + vote2 := signVote( + ctx, + t, + vss[1], tmproto.PrecommitType, - randBytes, + config.ChainID(), + types.BlockID{ + Hash: randBytes, + PartSetHeader: types.PartSetHeader{Total: 10, Hash: randBytes}, + }, cs.state.AppHash, cs.state.Validators.QuorumType, cs.state.Validators.QuorumHash, - types.PartSetHeader{Total: 10, Hash: randBytes}, ) require.Equal(t, vote, vote2) } +// TestStateTimestamp_ProposalNotMatch tests that a validator does not prevote a +// proposed block if the timestamp in the block does not matche the timestamp in the +// corresponding proposal message. +func TestStateTimestamp_ProposalNotMatch(t *testing.T) { + config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) + height, round := cs1.Height, cs1.Round + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) + require.NoError(t, err) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) + + propBlock, err := cs1.createProposalBlock(ctx) + require.NoError(t, err) + round++ + incrementRound(vss[1:]...) + + propBlockParts, err := propBlock.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + + // Create a proposal with a timestamp that does not match the timestamp of the block. + proposal := types.NewProposal(vs2.Height, 1, round, -1, blockID, propBlock.Header.Time.Add(time.Millisecond)) + p := proposal.ToProto() + _, valSet := cs1.GetValidatorSet() + _, err = vs2.SignProposal(ctx, config.ChainID(), valSet.QuorumType, valSet.QuorumHash, p) + require.NoError(t, err) + proposal.Signature = p.Signature + require.NoError(t, cs1.SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer")) + + startTestRound(ctx, cs1, height, round) + ensureProposal(t, proposalCh, height, round, blockID) + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + + // ensure that the validator prevotes nil. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], nil) + + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, -1, vss[0], nil, nil) +} + +// TestStateTimestamp_ProposalMatch tests that a validator prevotes a +// proposed block if the timestamp in the block matches the timestamp in the +// corresponding proposal message. +func TestStateTimestamp_ProposalMatch(t *testing.T) { + config := configSetup(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) + height, round := cs1.Height, cs1.Round + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + + proposalCh := subscribe(ctx, t, cs1.eventBus, types.EventQueryCompleteProposal) + proTxHash, err := cs1.privValidator.GetProTxHash(ctx) + require.NoError(t, err) + voteCh := subscribeToVoter(ctx, t, cs1, proTxHash) + + propBlock, err := cs1.createProposalBlock(ctx) + require.NoError(t, err) + round++ + incrementRound(vss[1:]...) + + propBlockParts, err := propBlock.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + + // Create a proposal with a timestamp that matches the timestamp of the block. + proposal := types.NewProposal(vs2.Height, 1, round, -1, blockID, propBlock.Header.Time) + p := proposal.ToProto() + _, valSet := cs1.GetValidatorSet() + _, err = vs2.SignProposal(ctx, config.ChainID(), valSet.QuorumType, valSet.QuorumHash, p) + require.NoError(t, err) + proposal.Signature = p.Signature + require.NoError(t, cs1.SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer")) + + startTestRound(ctx, cs1, height, round) + ensureProposal(t, proposalCh, height, round, blockID) + + signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vs2, vs3, vs4) + + // ensure that the validator prevotes the block. + ensurePrevote(t, voteCh, height, round) + validatePrevote(ctx, t, cs1, round, vss[0], propBlock.Hash()) + + ensurePrecommit(t, voteCh, height, round) + validatePrecommit(ctx, t, cs1, round, 1, vss[0], propBlock.Hash(), propBlock.Hash()) +} + // subscribe subscribes test client to the given query and returns a channel with cap = 1. -func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message { - sub, err := eventBus.Subscribe(context.Background(), testSubscriber, q) - if err != nil { - panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q)) - } - return sub.Out() +func subscribe( + ctx context.Context, + t *testing.T, + eventBus *eventbus.EventBus, + q *tmquery.Query, +) <-chan tmpubsub.Message { + t.Helper() + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: testSubscriber, + Query: q, + }) + require.NoErrorf(t, err, "Failed to subscribe %q to %v: %v", testSubscriber, q, err) + ch := make(chan tmpubsub.Message) + go func() { + for { + next, err := sub.Next(ctx) + if err != nil { + if ctx.Err() != nil { + return + } + t.Errorf("Subscription for %v unexpectedly terminated: %v", q, err) + return + } + select { + case ch <- next: + case <-ctx.Done(): + return + } + } + }() + return ch +} + +func signAddPrecommitWithExtension(ctx context.Context, + t *testing.T, + cs *State, + chainID string, + blockID types.BlockID, + extension []byte, + stub *validatorStub) { + _, valSet := cs.GetValidatorSet() + v, err := stub.signVote(ctx, tmproto.PrecommitType, chainID, blockID, cs.state.AppHash, valSet.QuorumType, + valSet.QuorumHash, extension) + require.NoError(t, err, "failed to sign vote") + addVotes(cs, v) } diff --git a/internal/consensus/ticker.go b/internal/consensus/ticker.go index 7cc7198f58..9d7a94fefc 100644 --- a/internal/consensus/ticker.go +++ b/internal/consensus/ticker.go @@ -1,6 +1,7 @@ package consensus import ( + "context" "time" "github.com/tendermint/tendermint/libs/log" @@ -15,12 +16,11 @@ var ( // conditional on the height/round/step in the timeoutInfo. // The timeoutInfo.Duration may be non-positive. type TimeoutTicker interface { - Start() error - Stop() error + Start(context.Context) error + Stop() + IsRunning() bool Chan() <-chan timeoutInfo // on which to receive a timeout ScheduleTimeout(ti timeoutInfo) // reset the timer - - SetLogger(log.Logger) } // timeoutTicker wraps time.Timer, @@ -30,6 +30,7 @@ type TimeoutTicker interface { // and fired on the tockChan. type timeoutTicker struct { service.BaseService + logger log.Logger timer *time.Timer tickChan chan timeoutInfo // for scheduling timeouts @@ -37,30 +38,27 @@ type timeoutTicker struct { } // NewTimeoutTicker returns a new TimeoutTicker. -func NewTimeoutTicker() TimeoutTicker { +func NewTimeoutTicker(logger log.Logger) TimeoutTicker { tt := &timeoutTicker{ + logger: logger, timer: time.NewTimer(0), tickChan: make(chan timeoutInfo, tickTockBufferSize), tockChan: make(chan timeoutInfo, tickTockBufferSize), } - tt.BaseService = *service.NewBaseService(nil, "TimeoutTicker", tt) + tt.BaseService = *service.NewBaseService(logger, "TimeoutTicker", tt) tt.stopTimer() // don't want to fire until the first scheduled timeout return tt } // OnStart implements service.Service. It starts the timeout routine. -func (t *timeoutTicker) OnStart() error { - - go t.timeoutRoutine() +func (t *timeoutTicker) OnStart(ctx context.Context) error { + go t.timeoutRoutine(ctx) return nil } // OnStop implements service.Service. It stops the timeout routine. -func (t *timeoutTicker) OnStop() { - t.BaseService.OnStop() - t.stopTimer() -} +func (t *timeoutTicker) OnStop() { t.stopTimer() } // Chan returns a channel on which timeouts are sent. func (t *timeoutTicker) Chan() <-chan timeoutInfo { @@ -83,7 +81,6 @@ func (t *timeoutTicker) stopTimer() { select { case <-t.timer.C: default: - t.Logger.Debug("Timer already stopped") } } } @@ -91,13 +88,12 @@ func (t *timeoutTicker) stopTimer() { // send on tickChan to start a new timer. // timers are interupted and replaced by new ticks from later steps // timeouts of 0 on the tickChan will be immediately relayed to the tockChan -func (t *timeoutTicker) timeoutRoutine() { - t.Logger.Debug("Starting timeout routine") +func (t *timeoutTicker) timeoutRoutine(ctx context.Context) { var ti timeoutInfo for { select { case newti := <-t.tickChan: - t.Logger.Debug("Received tick", "old_ti", ti, "new_ti", newti) + t.logger.Debug("Received tick", "old_ti", ti, "new_ti", newti) // ignore tickers for old height/round/step if newti.Height < ti.Height { @@ -119,15 +115,20 @@ func (t *timeoutTicker) timeoutRoutine() { // NOTE time.Timer allows duration to be non-positive ti = newti t.timer.Reset(ti.Duration) - t.Logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step.String()) + t.logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step.String()) case <-t.timer.C: - t.Logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step.String()) + t.logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step.String()) // go routine here guarantees timeoutRoutine doesn't block. // Determinism comes from playback in the receiveRoutine. // We can eliminate it by merging the timeoutRoutine into receiveRoutine // and managing the timeouts ourselves with a millisecond ticker - go func(toi timeoutInfo) { t.tockChan <- toi }(ti) - case <-t.Quit(): + go func(toi timeoutInfo) { + select { + case t.tockChan <- toi: + case <-ctx.Done(): + } + }(ti) + case <-ctx.Done(): return } } diff --git a/internal/consensus/types/height_vote_set.go b/internal/consensus/types/height_vote_set.go index 2b9015f1f9..29e159f6bd 100644 --- a/internal/consensus/types/height_vote_set.go +++ b/internal/consensus/types/height_vote_set.go @@ -1,12 +1,12 @@ package types import ( + "encoding/json" "errors" "fmt" "strings" "sync" - tmjson "github.com/tendermint/tendermint/libs/json" tmmath "github.com/tendermint/tendermint/libs/math" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -92,7 +92,11 @@ func (hvs *HeightVoteSet) Round() int32 { func (hvs *HeightVoteSet) SetRound(round int32) { hvs.mtx.Lock() defer hvs.mtx.Unlock() - newRound := tmmath.SafeSubInt32(hvs.round, 1) + newRound, err := tmmath.SafeSubInt32(hvs.round, 1) + if err != nil { + panic(err) + } + if hvs.round != 0 && (round < newRound) { panic("SetRound() must increment hvs.round") } @@ -210,7 +214,7 @@ func (hvs *HeightVoteSet) SetPeerMaj23( if voteSet == nil { return nil // something we don't know about yet } - return voteSet.SetPeerMaj23(types.P2PID(peerID), blockID) + return voteSet.SetPeerMaj23(string(peerID), blockID) } //--------------------------------------------------------- @@ -252,7 +256,7 @@ func (hvs *HeightVoteSet) StringIndented(indent string) string { func (hvs *HeightVoteSet) MarshalJSON() ([]byte, error) { hvs.mtx.Lock() defer hvs.mtx.Unlock() - return tmjson.Marshal(hvs.toAllRoundVotes()) + return json.Marshal(hvs.toAllRoundVotes()) } func (hvs *HeightVoteSet) toAllRoundVotes() []roundVotes { diff --git a/internal/consensus/types/height_vote_set_test.go b/internal/consensus/types/height_vote_set_test.go index 4cb653a7bd..a69a48f148 100644 --- a/internal/consensus/types/height_vote_set_test.go +++ b/internal/consensus/types/height_vote_set_test.go @@ -2,8 +2,6 @@ package types import ( "context" - "fmt" - "os" "testing" "github.com/dashevo/dashd-go/btcjson" @@ -11,45 +9,40 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/tmhash" tmrand "github.com/tendermint/tendermint/libs/rand" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) -var cfg *config.Config // NOTE: must be reset for each _test.go file - -func TestMain(m *testing.M) { - var err error - cfg, err = config.ResetTestRoot("consensus_height_vote_set_test") +func TestPeerCatchupRounds(t *testing.T) { + cfg, err := config.ResetTestRoot(t.TempDir(), "consensus_height_vote_set_test") if err != nil { - panic(err) + t.Fatal(err) } - code := m.Run() - os.RemoveAll(cfg.RootDir) - os.Exit(code) -} -func TestPeerCatchupRounds(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + valSet, privVals := types.RandValidatorSet(10) stateID := types.StateID{} - hvs := NewHeightVoteSet(cfg.ChainID(), 1, stateID, valSet) + chainID := cfg.ChainID() + hvs := NewHeightVoteSet(chainID, 1, stateID, valSet) - vote999_0 := makeVoteHR(t, 1, 0, 999, privVals, valSet.QuorumType, valSet.QuorumHash, stateID) + vote999_0 := makeVoteHR(ctx, t, 1, 0, 999, privVals, chainID, valSet.QuorumType, valSet.QuorumHash, stateID) added, err := hvs.AddVote(vote999_0, "peer1") if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1000_0 := makeVoteHR(t, 1, 0, 1000, privVals, valSet.QuorumType, valSet.QuorumHash, stateID) + vote1000_0 := makeVoteHR(ctx, t, 1, 0, 1000, privVals, chainID, valSet.QuorumType, valSet.QuorumHash, stateID) added, err = hvs.AddVote(vote1000_0, "peer1") if !added || err != nil { t.Error("Expected to successfully add vote from peer", added, err) } - vote1001_0 := makeVoteHR(t, 1, 0, 1001, privVals, valSet.QuorumType, valSet.QuorumHash, stateID) + vote1001_0 := makeVoteHR(ctx, t, 1, 0, 1001, privVals, chainID, valSet.QuorumType, valSet.QuorumHash, stateID) added, err = hvs.AddVote(vote1001_0, "peer1") if err != ErrGotVoteFromUnwantedRound { t.Errorf("expected GotVoteFromUnwantedRoundError, but got %v", err) @@ -65,13 +58,22 @@ func TestPeerCatchupRounds(t *testing.T) { } -func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []types.PrivValidator, - quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, stateID types.StateID) *types.Vote { +func makeVoteHR( + ctx context.Context, + t *testing.T, + height int64, + valIndex, round int32, + privVals []types.PrivValidator, + chainID string, + quorumType btcjson.LLMQType, + quorumHash crypto.QuorumHash, + stateID types.StateID, +) *types.Vote { privVal := privVals[valIndex] - proTxHash, err := privVal.GetProTxHash(context.Background()) + proTxHash, err := privVal.GetProTxHash(ctx) require.NoError(t, err) - randBytes1 := tmrand.Bytes(tmhash.Size) + randBytes := tmrand.Bytes(crypto.HashSize) vote := &types.Vote{ ValidatorProTxHash: proTxHash, @@ -79,18 +81,16 @@ func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []ty Height: height, Round: round, Type: tmproto.PrecommitType, - BlockID: types.BlockID{Hash: randBytes1, PartSetHeader: types.PartSetHeader{}}, + BlockID: types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}}, } - chainID := cfg.ChainID() v := vote.ToProto() - err = privVal.SignVote(context.Background(), chainID, quorumType, quorumHash, v, stateID, nil) - if err != nil { - panic(fmt.Sprintf("Error signing vote: %v", err)) - } + err = privVal.SignVote(ctx, chainID, quorumType, quorumHash, v, stateID, nil) + require.NoError(t, err, "Error signing vote") vote.BlockSignature = v.BlockSignature vote.StateSignature = v.StateSignature + vote.ExtensionSignature = v.ExtensionSignature return vote } diff --git a/internal/consensus/types/peer_round_state_test.go b/internal/consensus/types/peer_round_state_test.go index 393fd20565..6d76750a76 100644 --- a/internal/consensus/types/peer_round_state_test.go +++ b/internal/consensus/types/peer_round_state_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/bits" ) diff --git a/internal/consensus/types/round_state.go b/internal/consensus/types/round_state.go index 5f638ca279..f33d8f7537 100644 --- a/internal/consensus/types/round_state.go +++ b/internal/consensus/types/round_state.go @@ -74,15 +74,25 @@ type RoundState struct { StartTime time.Time `json:"start_time"` // Subjective time when +2/3 precommits for Block at Round were found - CommitTime time.Time `json:"commit_time"` - Validators *types.ValidatorSet `json:"validators"` - Proposal *types.Proposal `json:"proposal"` - ProposalBlock *types.Block `json:"proposal_block"` - ProposalBlockParts *types.PartSet `json:"proposal_block_parts"` - LockedRound int32 `json:"locked_round"` - LockedBlock *types.Block `json:"locked_block"` - LockedBlockParts *types.PartSet `json:"locked_block_parts"` - Commit *types.Commit `json:"commit"` + CommitTime time.Time `json:"commit_time"` + Validators *types.ValidatorSet `json:"validators"` + Proposal *types.Proposal `json:"proposal"` + ProposalReceiveTime time.Time `json:"proposal_receive_time"` + ProposalBlock *types.Block `json:"proposal_block"` + ProposalBlockParts *types.PartSet `json:"proposal_block_parts"` + LockedRound int32 `json:"locked_round"` + LockedBlock *types.Block `json:"locked_block"` + LockedBlockParts *types.PartSet `json:"locked_block_parts"` + Commit *types.Commit `json:"commit"` + + // The variables below starting with "Valid..." derive their name from + // the algorithm presented in this paper: + // [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938). + // Therefore, "Valid...": + // * means that the block or round that the variable refers to has + // received 2/3+ non-`nil` prevotes (a.k.a. a *polka*) + // * has nothing to do with whether the Application returned "Accept" in its + // response to `ProcessProposal`, or "Reject" // Last known round with POL for non-nil valid block. ValidRound int32 `json:"valid_round"` diff --git a/internal/consensus/wal.go b/internal/consensus/wal.go index 7d22c4b5fe..68a11f80f3 100644 --- a/internal/consensus/wal.go +++ b/internal/consensus/wal.go @@ -1,6 +1,7 @@ package consensus import ( + "context" "encoding/binary" "errors" "fmt" @@ -11,8 +12,8 @@ import ( "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/internal/jsontypes" auto "github.com/tendermint/tendermint/internal/libs/autofile" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/libs/service" @@ -40,15 +41,17 @@ type TimedWALMessage struct { // EndHeightMessage marks the end of the given height inside WAL. // @internal used by scripts/wal2json util. type EndHeightMessage struct { - Height int64 `json:"height"` + Height int64 `json:"height,string"` } +func (EndHeightMessage) TypeTag() string { return "tendermint/wal/EndHeightMessage" } + type WALMessage interface{} func init() { - tmjson.RegisterType(msgInfo{}, "tendermint/wal/MsgInfo") - tmjson.RegisterType(timeoutInfo{}, "tendermint/wal/TimeoutInfo") - tmjson.RegisterType(EndHeightMessage{}, "tendermint/wal/EndHeightMessage") + jsontypes.MustRegister(msgInfo{}) + jsontypes.MustRegister(timeoutInfo{}) + jsontypes.MustRegister(EndHeightMessage{}) } //-------------------------------------------------------- @@ -63,8 +66,8 @@ type WAL interface { SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) // service methods - Start() error - Stop() error + Start(context.Context) error + Stop() Wait() } @@ -75,6 +78,7 @@ type WAL interface { // again. type BaseWAL struct { service.BaseService + logger log.Logger group *auto.Group @@ -88,22 +92,23 @@ var _ WAL = &BaseWAL{} // NewWAL returns a new write-ahead logger based on `baseWAL`, which implements // WAL. It's flushed and synced to disk every 2s and once when stopped. -func NewWAL(walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) { +func NewWAL(ctx context.Context, logger log.Logger, walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) { err := tmos.EnsureDir(filepath.Dir(walFile), 0700) if err != nil { return nil, fmt.Errorf("failed to ensure WAL directory is in place: %w", err) } - group, err := auto.OpenGroup(walFile, groupOptions...) + group, err := auto.OpenGroup(ctx, logger, walFile, groupOptions...) if err != nil { return nil, err } wal := &BaseWAL{ + logger: logger, group: group, enc: NewWALEncoder(group), flushInterval: walDefaultFlushInterval, } - wal.BaseService = *service.NewBaseService(nil, "baseWAL", wal) + wal.BaseService = *service.NewBaseService(logger, "baseWAL", wal) return wal, nil } @@ -116,12 +121,7 @@ func (wal *BaseWAL) Group() *auto.Group { return wal.group } -func (wal *BaseWAL) SetLogger(l log.Logger) { - wal.BaseService.Logger = l - wal.group.SetLogger(l) -} - -func (wal *BaseWAL) OnStart() error { +func (wal *BaseWAL) OnStart(ctx context.Context) error { size, err := wal.group.Head.Size() if err != nil { return err @@ -130,23 +130,23 @@ func (wal *BaseWAL) OnStart() error { return err } } - err = wal.group.Start() + err = wal.group.Start(ctx) if err != nil { return err } wal.flushTicker = time.NewTicker(wal.flushInterval) - go wal.processFlushTicks() + go wal.processFlushTicks(ctx) return nil } -func (wal *BaseWAL) processFlushTicks() { +func (wal *BaseWAL) processFlushTicks(ctx context.Context) { for { select { case <-wal.flushTicker.C: if err := wal.FlushAndSync(); err != nil { - wal.Logger.Error("Periodic WAL flush failed", "err", err) + wal.logger.Error("Periodic WAL flush failed", "err", err) } - case <-wal.Quit(): + case <-ctx.Done(): return } } @@ -164,18 +164,21 @@ func (wal *BaseWAL) FlushAndSync() error { func (wal *BaseWAL) OnStop() { wal.flushTicker.Stop() if err := wal.FlushAndSync(); err != nil { - wal.Logger.Error("error on flush data to disk", "error", err) - } - if err := wal.group.Stop(); err != nil { - wal.Logger.Error("error trying to stop wal", "error", err) + wal.logger.Error("error on flush data to disk", "error", err) } + wal.group.Stop() wal.group.Close() } // Wait for the underlying autofile group to finish shutting down // so it's safe to cleanup files. func (wal *BaseWAL) Wait() { - wal.group.Wait() + if wal.IsRunning() { + wal.BaseService.Wait() + } + if wal.group.IsRunning() { + wal.group.Wait() + } } // Write is called in newStep and for each receive on the @@ -187,7 +190,7 @@ func (wal *BaseWAL) Write(msg WALMessage) error { } if err := wal.enc.Encode(&TimedWALMessage{tmtime.Now(), msg}); err != nil { - wal.Logger.Error("Error writing msg to consensus wal. WARNING: recover may not be possible for the current height", + wal.logger.Error("error writing msg to consensus wal. WARNING: recover may not be possible for the current height", "err", err, "msg", msg) return err } @@ -208,7 +211,7 @@ func (wal *BaseWAL) WriteSync(msg WALMessage) error { } if err := wal.FlushAndSync(); err != nil { - wal.Logger.Error(`WriteSync failed to flush consensus wal. + wal.logger.Error(`WriteSync failed to flush consensus wal. WARNING: may result in creating alternative proposals / votes for the current height if the node restarted`, "err", err) return err @@ -240,7 +243,7 @@ func (wal *BaseWAL) SearchForEndHeight( // NOTE: starting from the last file in the group because we're usually // searching for the last height. See replay.go min, max := wal.group.MinIndex(), wal.group.MaxIndex() - wal.Logger.Info("Searching for height", "height", height, "min", min, "max", max) + wal.logger.Info("Searching for height", "height", height, "min", min, "max", max) for index := max; index >= min; index-- { gr, err = wal.group.NewReader(index) if err != nil { @@ -260,7 +263,7 @@ func (wal *BaseWAL) SearchForEndHeight( break } if options.IgnoreDataCorruptionErrors && IsDataCorruptionError(err) { - wal.Logger.Error("Corrupted entry. Skipping...", "err", err) + wal.logger.Error("Corrupted entry. Skipping...", "err", err) // do nothing continue } else if err != nil { @@ -271,7 +274,7 @@ func (wal *BaseWAL) SearchForEndHeight( if m, ok := msg.Msg.(EndHeightMessage); ok { lastHeightFound = m.Height if m.Height == height { // found - wal.Logger.Info("Found", "height", height, "index", index) + wal.logger.Info("Found", "height", height, "index", index) return gr, true, nil } } @@ -370,14 +373,14 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) { return nil, err } if err != nil { - return nil, DataCorruptionError{fmt.Errorf("failed to read checksum: %v", err)} + return nil, DataCorruptionError{fmt.Errorf("failed to read checksum: %w", err)} } crc := binary.BigEndian.Uint32(b) b = make([]byte, 4) _, err = dec.rd.Read(b) if err != nil { - return nil, DataCorruptionError{fmt.Errorf("failed to read length: %v", err)} + return nil, DataCorruptionError{fmt.Errorf("failed to read length: %w", err)} } length := binary.BigEndian.Uint32(b) @@ -403,7 +406,7 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) { var res = new(tmcons.TimedWALMessage) err = proto.Unmarshal(data, res) if err != nil { - return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)} + return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %w", err)} } walMsg, err := WALFromProto(res.Msg) @@ -428,6 +431,6 @@ func (nilWAL) FlushAndSync() error { return nil } func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { return nil, false, nil } -func (nilWAL) Start() error { return nil } -func (nilWAL) Stop() error { return nil } -func (nilWAL) Wait() {} +func (nilWAL) Start(context.Context) error { return nil } +func (nilWAL) Stop() {} +func (nilWAL) Wait() {} diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index 147d6be2fb..e5cb1c64eb 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -3,10 +3,10 @@ package consensus import ( "bufio" "bytes" + "context" "fmt" "io" mrand "math/rand" - "path/filepath" "testing" "time" @@ -16,6 +16,7 @@ import ( abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" @@ -24,17 +25,18 @@ import ( "github.com/tendermint/tendermint/types" ) -// WALGenerateNBlocks generates a consensus WAL. It does this by spinning up a -// stripped down version of node (proxy app, event bus, consensus state) with a -// persistent kvstore application and special consensus wal instance -// (byteBufferWAL) and waits until numBlocks are created. -// If the node fails to produce given numBlocks, it returns an error. -func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { +// WALGenerateNBlocks generates a consensus WAL. It does this by +// spinning up a stripped down version of node (proxy app, event bus, +// consensus state) with a kvstore application and special consensus +// wal instance (byteBufferWAL) and waits until numBlocks are created. +// If the node fails to produce given numBlocks, it fails the test. +func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr io.Writer, numBlocks int) { + t.Helper() + cfg := getConfig(t) - app := kvstore.NewPersistentKVStoreApplication(filepath.Join(cfg.DBDir(), "wal_generator")) - t.Cleanup(func() { require.NoError(t, app.Close()) }) - logger := log.TestingLogger().With("wal_generator", "wal_generator") + app := kvstore.NewApplication() + logger.Info("generating WAL (last height msg excluded)", "numBlocks", numBlocks) // COPY PASTE FROM node.go WITH A FEW MODIFICATIONS @@ -44,72 +46,67 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { privValidatorStateFile := cfg.PrivValidator.StateFile() privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) if err != nil { - return err + t.Fatal(err) } genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) if err != nil { - return fmt.Errorf("failed to read genesis file: %w", err) + t.Fatal(fmt.Errorf("failed to read genesis file: %w", err)) } blockStoreDB := dbm.NewMemDB() stateDB := blockStoreDB stateStore := sm.NewStore(stateDB) state, err := sm.MakeGenesisState(genDoc) if err != nil { - return fmt.Errorf("failed to make genesis state: %w", err) + t.Fatal(fmt.Errorf("failed to make genesis state: %w", err)) } state.Version.Consensus.App = kvstore.ProtocolVersion if err = stateStore.Save(state); err != nil { - t.Error(err) + t.Fatal(err) } blockStore := store.NewBlockStore(blockStoreDB) - - proxyApp := proxy.NewAppConns(abciclient.NewLocalCreator(app), proxy.NopMetrics()) - proxyApp.SetLogger(logger.With("module", "proxy")) - if err := proxyApp.Start(); err != nil { - return fmt.Errorf("failed to start proxy app connections: %w", err) + proxyLogger := logger.With("module", "proxy") + proxyApp := proxy.New(abciclient.NewLocalClient(logger, app), proxyLogger, proxy.NopMetrics()) + if err := proxyApp.Start(ctx); err != nil { + t.Fatal(fmt.Errorf("failed to start proxy app connections: %w", err)) } - t.Cleanup(func() { - if err := proxyApp.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(proxyApp.Wait) - eventBus := types.NewEventBus() - eventBus.SetLogger(logger.With("module", "events")) - if err := eventBus.Start(); err != nil { - return fmt.Errorf("failed to start event bus: %w", err) + eventBus := eventbus.NewDefault(logger.With("module", "events")) + if err := eventBus.Start(ctx); err != nil { + t.Fatal(fmt.Errorf("failed to start event bus: %w", err)) } - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(func() { eventBus.Stop(); eventBus.Wait() }) + mempool := emptyMempool{} evpool := sm.EmptyEvidencePool{} blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), - proxyApp.Consensus(), - proxyApp.Query(), + log.NewNopLogger(), + proxyApp, mempool, evpool, blockStore, - nil, - sm.BlockExecutorWithAppHashSize(cfg.Consensus.AppHashSize), + eventBus, + sm.NopMetrics(), ) - consensusState := NewState( + blockExec.SetAppHashSize(cfg.Consensus.AppHashSize) + consensusState, err := NewState( + logger, cfg.Consensus, - state.Copy(), + stateStore, blockExec, blockStore, mempool, evpool, + eventBus, ) - consensusState.SetLogger(logger) - consensusState.SetEventBus(eventBus) + if err != nil { + t.Fatal(err) + } + if privValidator != nil && privValidator != (*privval.FilePV)(nil) { - consensusState.SetPrivValidator(privValidator) + consensusState.SetPrivValidator(ctx, privValidator) } // END OF COPY PASTE @@ -118,40 +115,33 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten) // see wal.go#103 if err := wal.Write(EndHeightMessage{0}); err != nil { - t.Error(err) + t.Fatal(err) } consensusState.wal = wal - if err := consensusState.Start(); err != nil { - return fmt.Errorf("failed to start consensus state: %w", err) + if err := consensusState.Start(ctx); err != nil { + t.Fatal(fmt.Errorf("failed to start consensus state: %w", err)) } + t.Cleanup(consensusState.Wait) + + defer consensusState.Stop() + timer := time.NewTimer(time.Minute) + defer timer.Stop() select { case <-numBlocksWritten: - if err := consensusState.Stop(); err != nil { - t.Error(err) - } - return nil - case <-time.After(1 * time.Minute): - if err := consensusState.Stop(); err != nil { - t.Error(err) - } - return fmt.Errorf( - "waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", - numBlocks, - ) + case <-timer.C: + t.Fatal(fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks)) } } // WALWithNBlocks returns a WAL content with numBlocks. -func WALWithNBlocks(t *testing.T, numBlocks int) (data []byte, err error) { +func WALWithNBlocks(ctx context.Context, t *testing.T, logger log.Logger, numBlocks int) (data []byte, err error) { var b bytes.Buffer wr := bufio.NewWriter(&b) - if err := WALGenerateNBlocks(t, wr, numBlocks); err != nil { - return []byte{}, err - } + WALGenerateNBlocks(ctx, t, logger, wr, numBlocks) wr.Flush() return b.Bytes(), nil @@ -164,23 +154,23 @@ func randPort() int { return base + mrand.Intn(spread) } -func makeAddrs() (string, string, string) { +// makeAddrs constructs local TCP addresses for node services. +// It uses consecutive ports from a random starting point, so that concurrent +// instances are less likely to collide. +func makeAddrs() (p2pAddr, rpcAddr string) { + const addrTemplate = "tcp://127.0.0.1:%d" start := randPort() - return fmt.Sprintf("tcp://127.0.0.1:%d", start), - fmt.Sprintf("tcp://127.0.0.1:%d", start+1), - fmt.Sprintf("tcp://127.0.0.1:%d", start+2) + return fmt.Sprintf(addrTemplate, start), fmt.Sprintf(addrTemplate, start+1) } // getConfig returns a config for test cases func getConfig(t *testing.T) *config.Config { - c, err := config.ResetTestRoot(t.Name()) + c, err := config.ResetTestRoot(t.TempDir(), t.Name()) require.NoError(t, err) - // and we use random ports to run in parallel - tm, rpc, grpc := makeAddrs() - c.P2P.ListenAddress = tm - c.RPC.ListenAddress = rpc - c.RPC.GRPCListenAddress = grpc + p2pAddr, rpcAddr := makeAddrs() + c.P2P.ListenAddress = p2pAddr + c.RPC.ListenAddress = rpcAddr return c } @@ -259,6 +249,6 @@ func (w *byteBufferWAL) SearchForEndHeight( return nil, false, nil } -func (w *byteBufferWAL) Start() error { return nil } -func (w *byteBufferWAL) Stop() error { return nil } -func (w *byteBufferWAL) Wait() {} +func (w *byteBufferWAL) Start(context.Context) error { return nil } +func (w *byteBufferWAL) Stop() {} +func (w *byteBufferWAL) Wait() {} diff --git a/internal/consensus/wal_test.go b/internal/consensus/wal_test.go index 55a32f9b23..62bebb4562 100644 --- a/internal/consensus/wal_test.go +++ b/internal/consensus/wal_test.go @@ -2,21 +2,19 @@ package consensus import ( "bytes" - "crypto/rand" + "context" "io/ioutil" "os" "path/filepath" - - // "sync" "testing" "time" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/merkle" - "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/consensus/types" "github.com/tendermint/tendermint/internal/libs/autofile" "github.com/tendermint/tendermint/libs/log" @@ -24,40 +22,37 @@ import ( tmtypes "github.com/tendermint/tendermint/types" ) -const ( - walTestFlushInterval = time.Duration(100) * time.Millisecond -) +const walTestFlushInterval = 100 * time.Millisecond func TestWALTruncate(t *testing.T) { walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") + logger := log.NewNopLogger() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // this magic number 4K can truncate the content when RotateFile. // defaultHeadSizeLimit(10M) is hard to simulate. // this magic number 1 * time.Millisecond make RotateFile check frequently. // defaultGroupCheckDuration(5s) is hard to simulate. - wal, err := NewWAL(walFile, + wal, err := NewWAL(ctx, logger, walFile, autofile.GroupHeadSizeLimit(4096), autofile.GroupCheckDuration(1*time.Millisecond), ) require.NoError(t, err) - wal.SetLogger(log.TestingLogger()) - err = wal.Start() + err = wal.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := wal.Stop(); err != nil { - t.Error(err) - } - // wait for the wal to finish shutting down so we - // can safely remove the directory - wal.Wait() - }) + t.Cleanup(func() { wal.Stop(); wal.Group().Stop(); wal.Group().Wait(); wal.Wait() }) // 60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), // when headBuf is full, truncate content will Flush to the file. at this // time, RotateFile is called, truncate content exist in each file. - err = WALGenerateNBlocks(t, wal.Group(), 60) - require.NoError(t, err) + WALGenerateNBlocks(ctx, t, logger, wal.Group(), 60) + + // put the leakcheck here so it runs after other cleanup + // functions. + t.Cleanup(leaktest.CheckTimeout(t, 500*time.Millisecond)) time.Sleep(1 * time.Millisecond) // wait groupCheckDuration, make sure RotateFile run @@ -111,18 +106,14 @@ func TestWALWrite(t *testing.T) { walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") - wal, err := NewWAL(walFile) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + wal, err := NewWAL(ctx, log.NewNopLogger(), walFile) require.NoError(t, err) - err = wal.Start() + err = wal.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := wal.Stop(); err != nil { - t.Error(err) - } - // wait for the wal to finish shutting down so we - // can safely remove the directory - wal.Wait() - }) + t.Cleanup(func() { wal.Stop(); wal.Group().Stop(); wal.Group().Wait(); wal.Wait() }) // 1) Write returns an error if msg is too big msg := &BlockPartMessage{ @@ -148,19 +139,22 @@ func TestWALWrite(t *testing.T) { } func TestWALWriteCommit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewTestingLogger(t) + walDir, err := ioutil.TempDir("", "wal") require.NoError(t, err) defer os.RemoveAll(walDir) walFile := filepath.Join(walDir, "wal") - wal, err := NewWAL(walFile) + wal, err := NewWAL(ctx, logger, walFile) require.NoError(t, err) - err = wal.Start() + err = wal.Start(ctx) require.NoError(t, err) defer func() { - if err := wal.Stop(); err != nil { - t.Error(err) - } + wal.Stop() // wait for the wal to finish shutting down so we // can safely remove the directory wal.Wait() @@ -169,10 +163,10 @@ func TestWALWriteCommit(t *testing.T) { // Prepare and write commit msg stateID := tmtypes.RandStateID() blockID := tmtypes.BlockID{ - Hash: crypto.CRandBytes(tmhash.Size), + Hash: crypto.CRandBytes(crypto.HashSize), PartSetHeader: tmtypes.PartSetHeader{ Total: 0, - Hash: crypto.CRandBytes(tmhash.Size)}, + Hash: crypto.CRandBytes(crypto.HashSize)}, } msg := &CommitMessage{ Commit: &tmtypes.Commit{ @@ -211,15 +205,19 @@ func TestWALWriteCommit(t *testing.T) { } func TestWALSearchForEndHeight(t *testing.T) { - walBody, err := WALWithNBlocks(t, 6) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + walBody, err := WALWithNBlocks(ctx, t, logger, 6) if err != nil { t.Fatal(err) } - walFile := tempWALWithData(walBody) + walFile := tempWALWithData(t, walBody) - wal, err := NewWAL(walFile) + wal, err := NewWAL(ctx, logger, walFile) require.NoError(t, err) - wal.SetLogger(log.TestingLogger()) h := int64(3) gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{}) @@ -234,34 +232,34 @@ func TestWALSearchForEndHeight(t *testing.T) { rs, ok := msg.Msg.(tmtypes.EventDataRoundState) assert.True(t, ok, "expected message of type EventDataRoundState") assert.Equal(t, rs.Height, h+1, "wrong height") + + t.Cleanup(leaktest.Check(t)) } func TestWALPeriodicSync(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + walDir := t.TempDir() walFile := filepath.Join(walDir, "wal") - wal, err := NewWAL(walFile, autofile.GroupCheckDuration(1*time.Millisecond)) + defer os.RemoveAll(walFile) + wal, err := NewWAL(ctx, log.NewNopLogger(), walFile, autofile.GroupCheckDuration(250*time.Millisecond)) require.NoError(t, err) wal.SetFlushInterval(walTestFlushInterval) - wal.SetLogger(log.TestingLogger()) + logger := log.NewNopLogger() // Generate some data - err = WALGenerateNBlocks(t, wal.Group(), 5) - require.NoError(t, err) + WALGenerateNBlocks(ctx, t, logger, wal.Group(), 5) // We should have data in the buffer now assert.NotZero(t, wal.Group().Buffered()) - require.NoError(t, wal.Start()) - t.Cleanup(func() { - if err := wal.Stop(); err != nil { - t.Error(err) - } - wal.Wait() - }) + require.NoError(t, wal.Start(ctx)) + t.Cleanup(func() { wal.Stop(); wal.Group().Stop(); wal.Group().Wait(); wal.Wait() }) - time.Sleep(walTestFlushInterval + (10 * time.Millisecond)) + time.Sleep(walTestFlushInterval + (20 * time.Millisecond)) // The data should have been flushed by the periodic sync assert.Zero(t, wal.Group().Buffered()) @@ -274,70 +272,6 @@ func TestWALPeriodicSync(t *testing.T) { if gr != nil { gr.Close() } -} - -/* -var initOnce sync.Once - -func registerInterfacesOnce() { - initOnce.Do(func() { - var _ = wire.RegisterInterface( - struct{ WALMessage }{}, - wire.ConcreteType{[]byte{}, 0x10}, - ) - }) -} -*/ - -func nBytes(n int) []byte { - buf := make([]byte, n) - n, _ = rand.Read(buf) - return buf[:n] -} - -func benchmarkWalDecode(b *testing.B, n int) { - // registerInterfacesOnce() - buf := new(bytes.Buffer) - enc := NewWALEncoder(buf) - - data := nBytes(n) - if err := enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second).UTC()}); err != nil { - b.Error(err) - } - encoded := buf.Bytes() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - buf.Reset() - buf.Write(encoded) - dec := NewWALDecoder(buf) - if _, err := dec.Decode(); err != nil { - b.Fatal(err) - } - } - b.ReportAllocs() -} - -func BenchmarkWalDecode512B(b *testing.B) { - benchmarkWalDecode(b, 512) -} - -func BenchmarkWalDecode10KB(b *testing.B) { - benchmarkWalDecode(b, 10*1024) -} -func BenchmarkWalDecode100KB(b *testing.B) { - benchmarkWalDecode(b, 100*1024) -} -func BenchmarkWalDecode1MB(b *testing.B) { - benchmarkWalDecode(b, 1024*1024) -} -func BenchmarkWalDecode10MB(b *testing.B) { - benchmarkWalDecode(b, 10*1024*1024) -} -func BenchmarkWalDecode100MB(b *testing.B) { - benchmarkWalDecode(b, 100*1024*1024) -} -func BenchmarkWalDecode1GB(b *testing.B) { - benchmarkWalDecode(b, 1024*1024*1024) + t.Cleanup(leaktest.Check(t)) } diff --git a/internal/eventbus/event_bus.go b/internal/eventbus/event_bus.go new file mode 100644 index 0000000000..0743b04b09 --- /dev/null +++ b/internal/eventbus/event_bus.go @@ -0,0 +1,200 @@ +package eventbus + +import ( + "context" + "fmt" + "strings" + + abci "github.com/tendermint/tendermint/abci/types" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/types" +) + +// Subscription is a proxy interface for a pubsub Subscription. +type Subscription interface { + ID() string + Next(context.Context) (tmpubsub.Message, error) +} + +// EventBus is a common bus for all events going through the system. +// It is a type-aware wrapper around an underlying pubsub server. +// All events should be published via the bus. +type EventBus struct { + service.BaseService + pubsub *tmpubsub.Server +} + +// NewDefault returns a new event bus with default options. +func NewDefault(l log.Logger) *EventBus { + logger := l.With("module", "eventbus") + pubsub := tmpubsub.NewServer(l, tmpubsub.BufferCapacity(0)) + b := &EventBus{pubsub: pubsub} + b.BaseService = *service.NewBaseService(logger, "EventBus", b) + return b +} + +func (b *EventBus) OnStart(ctx context.Context) error { + return b.pubsub.Start(ctx) +} + +func (b *EventBus) OnStop() {} + +func (b *EventBus) NumClients() int { + return b.pubsub.NumClients() +} + +func (b *EventBus) NumClientSubscriptions(clientID string) int { + return b.pubsub.NumClientSubscriptions(clientID) +} + +func (b *EventBus) SubscribeWithArgs(ctx context.Context, args tmpubsub.SubscribeArgs) (Subscription, error) { + return b.pubsub.SubscribeWithArgs(ctx, args) +} + +func (b *EventBus) Unsubscribe(ctx context.Context, args tmpubsub.UnsubscribeArgs) error { + return b.pubsub.Unsubscribe(ctx, args) +} + +func (b *EventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { + return b.pubsub.UnsubscribeAll(ctx, subscriber) +} + +func (b *EventBus) Observe(ctx context.Context, observe func(tmpubsub.Message) error, queries ...*tmquery.Query) error { + return b.pubsub.Observe(ctx, observe, queries...) +} + +func (b *EventBus) Publish(eventValue string, eventData types.EventData) error { + tokens := strings.Split(types.EventTypeKey, ".") + event := abci.Event{ + Type: tokens[0], + Attributes: []abci.EventAttribute{ + { + Key: tokens[1], + Value: eventValue, + }, + }, + } + + return b.pubsub.PublishWithEvents(eventData, []abci.Event{event}) +} + +func (b *EventBus) PublishEventNewBlock(data types.EventDataNewBlock) error { + events := data.ResultFinalizeBlock.Events + + // add Tendermint-reserved new block event + events = append(events, types.EventNewBlock) + + return b.pubsub.PublishWithEvents(data, events) +} + +func (b *EventBus) PublishEventNewBlockHeader(data types.EventDataNewBlockHeader) error { + // no explicit deadline for publishing events + + events := data.ResultFinalizeBlock.Events + + // add Tendermint-reserved new block header event + events = append(events, types.EventNewBlockHeader) + + return b.pubsub.PublishWithEvents(data, events) +} + +func (b *EventBus) PublishEventNewEvidence(evidence types.EventDataNewEvidence) error { + return b.Publish(types.EventNewEvidenceValue, evidence) +} + +func (b *EventBus) PublishEventVote(data types.EventDataVote) error { + return b.Publish(types.EventVoteValue, data) +} + +func (b *EventBus) PublishEventValidBlock(data types.EventDataRoundState) error { + return b.Publish(types.EventValidBlockValue, data) +} + +func (b *EventBus) PublishEventBlockSyncStatus(data types.EventDataBlockSyncStatus) error { + return b.Publish(types.EventBlockSyncStatusValue, data) +} + +func (b *EventBus) PublishEventStateSyncStatus(data types.EventDataStateSyncStatus) error { + return b.Publish(types.EventStateSyncStatusValue, data) +} + +// PublishEventTx publishes tx event with events from Result. Note it will add +// predefined keys (EventTypeKey, TxHashKey). Existing events with the same keys +// will be overwritten. +func (b *EventBus) PublishEventTx(data types.EventDataTx) error { + events := data.Result.Events + + // add Tendermint-reserved events + events = append(events, types.EventTx) + + tokens := strings.Split(types.TxHashKey, ".") + events = append(events, abci.Event{ + Type: tokens[0], + Attributes: []abci.EventAttribute{ + { + Key: tokens[1], + Value: fmt.Sprintf("%X", types.Tx(data.Tx).Hash()), + }, + }, + }) + + tokens = strings.Split(types.TxHeightKey, ".") + events = append(events, abci.Event{ + Type: tokens[0], + Attributes: []abci.EventAttribute{ + { + Key: tokens[1], + Value: fmt.Sprintf("%d", data.Height), + }, + }, + }) + + return b.pubsub.PublishWithEvents(data, events) +} + +func (b *EventBus) PublishEventNewRoundStep(data types.EventDataRoundState) error { + return b.Publish(types.EventNewRoundStepValue, data) +} + +func (b *EventBus) PublishEventTimeoutPropose(data types.EventDataRoundState) error { + return b.Publish(types.EventTimeoutProposeValue, data) +} + +func (b *EventBus) PublishEventTimeoutWait(data types.EventDataRoundState) error { + return b.Publish(types.EventTimeoutWaitValue, data) +} + +func (b *EventBus) PublishEventNewRound(data types.EventDataNewRound) error { + return b.Publish(types.EventNewRoundValue, data) +} + +func (b *EventBus) PublishEventCompleteProposal(data types.EventDataCompleteProposal) error { + return b.Publish(types.EventCompleteProposalValue, data) +} + +func (b *EventBus) PublishEventPolka(data types.EventDataRoundState) error { + return b.Publish(types.EventPolkaValue, data) +} + +func (b *EventBus) PublishEventRelock(data types.EventDataRoundState) error { + return b.Publish(types.EventRelockValue, data) +} + +func (b *EventBus) PublishEventLock(data types.EventDataRoundState) error { + return b.Publish(types.EventLockValue, data) +} + +func (b *EventBus) PublishEventValidatorSetUpdates(data types.EventDataValidatorSetUpdate) error { + return b.Publish(types.EventValidatorSetUpdateValue, data) +} + +func (b *EventBus) PublishEventEvidenceValidated(evidence types.EventDataEvidenceValidated) error { + return b.Publish(types.EventEvidenceValidatedValue, evidence) +} + +func (b *EventBus) PublishEventCommit(data types.EventDataCommit) error { + return b.Publish(types.EventCommitValue, data) +} diff --git a/internal/eventbus/event_bus_test.go b/internal/eventbus/event_bus_test.go new file mode 100644 index 0000000000..5cf1b60492 --- /dev/null +++ b/internal/eventbus/event_bus_test.go @@ -0,0 +1,559 @@ +package eventbus_test + +import ( + "context" + "fmt" + mrand "math/rand" + "testing" + "time" + + "github.com/dashevo/dashd-go/btcjson" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/eventbus" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" +) + +func TestEventBusPublishEventTx(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus := eventbus.NewDefault(log.NewNopLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + tx := types.Tx("foo") + result := abci.ExecTxResult{ + Data: []byte("bar"), + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, + }, + } + + // PublishEventTx adds 3 composite keys, so the query below should work + query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND testType.baz=1", tx.Hash()) + txsSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustCompile(query), + }) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + defer close(done) + msg, err := txsSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataTx) + assert.Equal(t, int64(1), edt.Height) + assert.Equal(t, uint32(0), edt.Index) + assert.EqualValues(t, tx, edt.Tx) + assert.Equal(t, result, edt.Result) + }() + + err = eventBus.PublishEventTx(types.EventDataTx{ + TxResult: abci.TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: result, + }, + }) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a transaction after 1 sec.") + } +} + +func TestEventBusPublishEventNewBlock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + eventBus := eventbus.NewDefault(log.NewNopLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + block := types.MakeBlock(0, 0, nil, []types.Tx{}, nil, []types.Evidence{}, 1) + bps, err := block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} + resultFinalizeBlock := abci.ResponseFinalizeBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{ + {Key: "baz", Value: "1"}, + {Key: "foz", Value: "2"}, + }}, + }, + } + + // PublishEventNewBlock adds the tm.event compositeKey, so the query below should work + query := "tm.event='NewBlock' AND testType.baz=1 AND testType.foz=2" + blocksSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustCompile(query), + }) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + defer close(done) + msg, err := blocksSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataNewBlock) + assert.Equal(t, block, edt.Block) + assert.Equal(t, blockID, edt.BlockID) + assert.Equal(t, resultFinalizeBlock, edt.ResultFinalizeBlock) + }() + + err = eventBus.PublishEventNewBlock(types.EventDataNewBlock{ + Block: block, + BlockID: blockID, + ResultFinalizeBlock: resultFinalizeBlock, + }) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a block after 1 sec.") + } +} + +func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + eventBus := eventbus.NewDefault(log.NewNopLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + tx := types.Tx("foo") + result := abci.ExecTxResult{ + Data: []byte("bar"), + Events: []abci.Event{ + { + Type: "transfer", + Attributes: []abci.EventAttribute{ + {Key: "sender", Value: "foo"}, + {Key: "recipient", Value: "bar"}, + {Key: "amount", Value: "5"}, + }, + }, + { + Type: "transfer", + Attributes: []abci.EventAttribute{ + {Key: "sender", Value: "baz"}, + {Key: "recipient", Value: "cat"}, + {Key: "amount", Value: "13"}, + }, + }, + { + Type: "withdraw.rewards", + Attributes: []abci.EventAttribute{ + {Key: "address", Value: "bar"}, + {Key: "source", Value: "iceman"}, + {Key: "amount", Value: "33"}, + }, + }, + }, + } + + testCases := []struct { + query string + expectResults bool + }{ + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='DoesNotExist'", + false, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo'", + true, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='baz'", + true, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo' AND transfer.sender='baz'", + true, + }, + { + "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo' AND transfer.sender='DoesNotExist'", + false, + }, + } + + for i, tc := range testCases { + var name string + + if tc.expectResults { + name = fmt.Sprintf("ExpetedResultsCase%d", i) + } else { + name = fmt.Sprintf("NoResultsCase%d", i) + } + + t.Run(name, func(t *testing.T) { + + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: fmt.Sprintf("client-%d", i), + Query: tmquery.MustCompile(tc.query), + }) + require.NoError(t, err) + + gotResult := make(chan bool, 1) + go func() { + defer close(gotResult) + tctx, cancel := context.WithTimeout(ctx, 1*time.Second) + defer cancel() + msg, err := sub.Next(tctx) + if err == nil { + data := msg.Data().(types.EventDataTx) + assert.Equal(t, int64(1), data.Height) + assert.Equal(t, uint32(0), data.Index) + assert.EqualValues(t, tx, data.Tx) + assert.Equal(t, result, data.Result) + gotResult <- true + } + }() + + assert.NoError(t, eventBus.PublishEventTx(types.EventDataTx{ + TxResult: abci.TxResult{ + Height: 1, + Index: 0, + Tx: tx, + Result: result, + }, + })) + + require.NoError(t, ctx.Err(), "context should not have been canceled") + + if got := <-gotResult; got != tc.expectResults { + require.Failf(t, "Wrong transaction result", + "got a tx: %v, wanted a tx: %v", got, tc.expectResults) + } + }) + + } +} + +func TestEventBusPublishEventNewBlockHeader(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus := eventbus.NewDefault(log.NewNopLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + block := types.MakeBlock(0, 0, nil, []types.Tx{}, nil, []types.Evidence{}, 1) + resultFinalizeBlock := abci.ResponseFinalizeBlock{ + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{ + {Key: "baz", Value: "1"}, + {Key: "foz", Value: "2"}, + }}, + }, + } + + // PublishEventNewBlockHeader adds the tm.event compositeKey, so the query below should work + query := "tm.event='NewBlockHeader' AND testType.baz=1 AND testType.foz=2" + headersSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustCompile(query), + }) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + defer close(done) + msg, err := headersSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataNewBlockHeader) + assert.Equal(t, block.Header, edt.Header) + assert.Equal(t, resultFinalizeBlock, edt.ResultFinalizeBlock) + }() + + err = eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ + Header: block.Header, + ResultFinalizeBlock: resultFinalizeBlock, + }) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a block header after 1 sec.") + } +} + +func TestEventBusPublishEventEvidenceValidated(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus := eventbus.NewDefault(log.NewNopLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + quorumHash := crypto.RandQuorumHash() + + ev, err := types.NewMockDuplicateVoteEvidence(ctx, 1, time.Now(), "test-chain-id", btcjson.LLMQType_5_60, quorumHash) + require.NoError(t, err) + + const query = `tm.event='EvidenceValidated'` + evSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustCompile(query), + }) + + require.NoError(t, err) + done := make(chan struct{}) + go func() { + defer close(done) + msg, err := evSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataEvidenceValidated) + assert.Equal(t, ev, edt.Evidence) + assert.Equal(t, int64(1), edt.Height) + }() + + err = eventBus.PublishEventEvidenceValidated(types.EventDataEvidenceValidated{ + Evidence: ev, + Height: int64(1), + }) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a block header after 1 sec.") + } + +} +func TestEventBusPublishEventNewEvidence(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus := eventbus.NewDefault(log.NewNopLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + quorumHash := crypto.RandQuorumHash() + + ev, err := types.NewMockDuplicateVoteEvidence(ctx, 1, time.Now(), "test-chain-id", btcjson.LLMQType_5_60, quorumHash) + require.NoError(t, err) + + const query = `tm.event='NewEvidence'` + evSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustCompile(query), + }) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + defer close(done) + msg, err := evSub.Next(ctx) + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataNewEvidence) + assert.Equal(t, ev, edt.Evidence) + assert.Equal(t, int64(4), edt.Height) + }() + + err = eventBus.PublishEventNewEvidence(types.EventDataNewEvidence{ + Evidence: ev, + Height: 4, + }) + assert.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a block header after 1 sec.") + } +} + +func TestEventBusPublish(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus := eventbus.NewDefault(log.NewNopLogger()) + err := eventBus.Start(ctx) + require.NoError(t, err) + + const numEventsExpected = 14 + + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.All, + Limit: numEventsExpected, + }) + require.NoError(t, err) + + count := make(chan int, 1) + go func() { + defer close(count) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + for n := 0; ; n++ { + if _, err := sub.Next(ctx); err != nil { + count <- n + return + } + } + }() + + require.NoError(t, eventBus.Publish(types.EventNewBlockHeaderValue, + types.EventDataNewBlockHeader{})) + require.NoError(t, eventBus.PublishEventNewBlock(types.EventDataNewBlock{})) + require.NoError(t, eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{})) + require.NoError(t, eventBus.PublishEventVote(types.EventDataVote{})) + require.NoError(t, eventBus.PublishEventNewRoundStep(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventTimeoutPropose(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventTimeoutWait(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventNewRound(types.EventDataNewRound{})) + require.NoError(t, eventBus.PublishEventCompleteProposal(types.EventDataCompleteProposal{})) + require.NoError(t, eventBus.PublishEventPolka(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventRelock(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventLock(types.EventDataRoundState{})) + require.NoError(t, eventBus.PublishEventValidatorSetUpdates(types.EventDataValidatorSetUpdate{})) + require.NoError(t, eventBus.PublishEventBlockSyncStatus(types.EventDataBlockSyncStatus{})) + require.NoError(t, eventBus.PublishEventStateSyncStatus(types.EventDataStateSyncStatus{})) + + require.GreaterOrEqual(t, <-count, numEventsExpected) +} + +func BenchmarkEventBus(b *testing.B) { + benchmarks := []struct { + name string + numClients int + randQueries bool + randEvents bool + }{ + {"10Clients1Query1Event", 10, false, false}, + {"100Clients", 100, false, false}, + {"1000Clients", 1000, false, false}, + + {"10ClientsRandQueries1Event", 10, true, false}, + {"100Clients", 100, true, false}, + {"1000Clients", 1000, true, false}, + + {"10ClientsRandQueriesRandEvents", 10, true, true}, + {"100Clients", 100, true, true}, + {"1000Clients", 1000, true, true}, + + {"10Clients1QueryRandEvents", 10, false, true}, + {"100Clients", 100, false, true}, + {"1000Clients", 1000, false, true}, + } + + for _, bm := range benchmarks { + bm := bm + b.Run(bm.name, func(b *testing.B) { + benchmarkEventBus(bm.numClients, bm.randQueries, bm.randEvents, b) + }) + } +} + +func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *testing.B) { + // for random* functions + mrand.Seed(time.Now().Unix()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + eventBus := eventbus.NewDefault(log.NewNopLogger()) // set buffer capacity to 0 so we are not testing cache + err := eventBus.Start(ctx) + if err != nil { + b.Error(err) + } + b.Cleanup(eventBus.Wait) + + q := types.EventQueryNewBlock + + for i := 0; i < numClients; i++ { + if randQueries { + q = randQuery() + } + sub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: fmt.Sprintf("client-%d", i), + Query: q, + }) + if err != nil { + b.Fatal(err) + } + go func() { + for { + if _, err := sub.Next(ctx); err != nil { + return + } + } + }() + } + + eventValue := types.EventNewBlockValue + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + if randEvents { + eventValue = randEventValue() + } + + err := eventBus.Publish(eventValue, types.EventDataString("Gamora")) + if err != nil { + b.Error(err) + } + } +} + +var events = []string{ + types.EventNewBlockValue, + types.EventNewBlockHeaderValue, + types.EventNewRoundValue, + types.EventNewRoundStepValue, + types.EventTimeoutProposeValue, + types.EventCompleteProposalValue, + types.EventPolkaValue, + types.EventLockValue, + types.EventRelockValue, + types.EventTimeoutWaitValue, + types.EventVoteValue, + types.EventBlockSyncStatusValue, + types.EventStateSyncStatusValue, +} + +func randEventValue() string { + return events[mrand.Intn(len(events))] +} + +var queries = []*tmquery.Query{ + types.EventQueryNewBlock, + types.EventQueryNewBlockHeader, + types.EventQueryNewRound, + types.EventQueryNewRoundStep, + types.EventQueryTimeoutPropose, + types.EventQueryCompleteProposal, + types.EventQueryPolka, + types.EventQueryLock, + types.EventQueryRelock, + types.EventQueryTimeoutWait, + types.EventQueryVote, + types.EventQueryBlockSyncStatus, + types.EventQueryStateSyncStatus, +} + +func randQuery() *tmquery.Query { + return queries[mrand.Intn(len(queries))] +} diff --git a/internal/eventlog/cursor/cursor.go b/internal/eventlog/cursor/cursor.go new file mode 100644 index 0000000000..215c797892 --- /dev/null +++ b/internal/eventlog/cursor/cursor.go @@ -0,0 +1,100 @@ +// Package cursor implements time-ordered item cursors for an event log. +package cursor + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// A Source produces cursors based on a time index generator and a sequence +// counter. A zero-valued Source is ready for use with defaults as described. +type Source struct { + // This function is called to produce the current time index. + // If nil, it defaults to time.Now().UnixNano(). + TimeIndex func() int64 + + // The current counter value used for sequence number generation. It is + // incremented in-place each time a cursor is generated. + Counter int64 +} + +func (s *Source) timeIndex() int64 { + if s.TimeIndex == nil { + return time.Now().UnixNano() + } + return s.TimeIndex() +} + +func (s *Source) nextCounter() int64 { + s.Counter++ + return s.Counter +} + +// Cursor produces a fresh cursor from s at the current time index and counter. +func (s *Source) Cursor() Cursor { + return Cursor{ + timestamp: uint64(s.timeIndex()), + sequence: uint16(s.nextCounter() & 0xffff), + } +} + +// A Cursor is a unique identifier for an item in a time-ordered event log. +// It is safe to copy and compare cursors by value. +type Cursor struct { + timestamp uint64 // ns since Unix epoch + sequence uint16 // sequence number +} + +// Before reports whether c is prior to o in time ordering. This comparison +// ignores sequence numbers. +func (c Cursor) Before(o Cursor) bool { return c.timestamp < o.timestamp } + +// Diff returns the time duration between c and o. The duration is negative if +// c is before o in time order. +func (c Cursor) Diff(o Cursor) time.Duration { + return time.Duration(c.timestamp) - time.Duration(o.timestamp) +} + +// IsZero reports whether c is the zero cursor. +func (c Cursor) IsZero() bool { return c == Cursor{} } + +// MarshalText implements the encoding.TextMarshaler interface. +// A zero cursor marshals as "", otherwise the format used by the String method. +func (c Cursor) MarshalText() ([]byte, error) { + if c.IsZero() { + return nil, nil + } + return []byte(c.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// An empty text unmarshals without error to a zero cursor. +func (c *Cursor) UnmarshalText(data []byte) error { + if len(data) == 0 { + *c = Cursor{} // set zero + return nil + } + ps := strings.SplitN(string(data), "-", 2) + if len(ps) != 2 { + return errors.New("invalid cursor format") + } + ts, err := strconv.ParseUint(ps[0], 16, 64) + if err != nil { + return fmt.Errorf("invalid timestamp: %w", err) + } + sn, err := strconv.ParseUint(ps[1], 16, 16) + if err != nil { + return fmt.Errorf("invalid sequence: %w", err) + } + c.timestamp = ts + c.sequence = uint16(sn) + return nil +} + +// String returns a printable text representation of a cursor. +func (c Cursor) String() string { + return fmt.Sprintf("%016x-%04x", c.timestamp, c.sequence) +} diff --git a/internal/eventlog/cursor/cursor_test.go b/internal/eventlog/cursor/cursor_test.go new file mode 100644 index 0000000000..31701ddf79 --- /dev/null +++ b/internal/eventlog/cursor/cursor_test.go @@ -0,0 +1,141 @@ +package cursor_test + +import ( + "fmt" + "testing" + "time" + + "github.com/tendermint/tendermint/internal/eventlog/cursor" +) + +func mustParse(t *testing.T, s string) cursor.Cursor { + t.Helper() + var c cursor.Cursor + if err := c.UnmarshalText([]byte(s)); err != nil { + t.Fatalf("Unmarshal %q: unexpected error: %v", s, err) + } + return c +} + +func TestSource_counter(t *testing.T) { + src := &cursor.Source{ + TimeIndex: func() int64 { return 255 }, + } + for i := 1; i <= 5; i++ { + want := fmt.Sprintf("00000000000000ff-%04x", i) + got := src.Cursor().String() + if got != want { + t.Errorf("Cursor %d: got %q, want %q", i, got, want) + } + } +} + +func TestSource_timeIndex(t *testing.T) { + times := []int64{0, 1, 100, 65535, 0x76543210fecdba98} + src := &cursor.Source{ + TimeIndex: func() int64 { + out := times[0] + times = append(times[1:], out) + return out + }, + Counter: 160, + } + results := []string{ + "0000000000000000-00a1", + "0000000000000001-00a2", + "0000000000000064-00a3", + "000000000000ffff-00a4", + "76543210fecdba98-00a5", + } + for i, want := range results { + if got := src.Cursor().String(); got != want { + t.Errorf("Cursor %d: got %q, want %q", i+1, got, want) + } + } +} + +func TestCursor_roundTrip(t *testing.T) { + const text = `0123456789abcdef-fce9` + + c := mustParse(t, text) + if got := c.String(); got != text { + t.Errorf("Wrong string format: got %q, want %q", got, text) + } + cmp, err := c.MarshalText() + if err != nil { + t.Fatalf("Marshal %+v failed: %v", c, err) + } + if got := string(cmp); got != text { + t.Errorf("Wrong text format: got %q, want %q", got, text) + } +} + +func TestCursor_ordering(t *testing.T) { + // Condition: text1 precedes text2 in time order. + // Condition: text2 has an earlier sequence than text1. + const zero = "" + const text1 = "0000000012345678-0005" + const text2 = "00000000fecdeba9-0002" + + zc := mustParse(t, zero) + c1 := mustParse(t, text1) + c2 := mustParse(t, text2) + + // Confirm for all pairs that string order respects time order. + pairs := []struct { + t1, t2 string + c1, c2 cursor.Cursor + }{ + {zero, zero, zc, zc}, + {zero, text1, zc, c1}, + {zero, text2, zc, c2}, + {text1, zero, c1, zc}, + {text1, text1, c1, c1}, + {text1, text2, c1, c2}, + {text2, zero, c2, zc}, + {text2, text1, c2, c1}, + {text2, text2, c2, c2}, + } + for _, pair := range pairs { + want := pair.t1 < pair.t2 + if got := pair.c1.Before(pair.c2); got != want { + t.Errorf("(%s).Before(%s): got %v, want %v", pair.t1, pair.t2, got, want) + } + } +} + +func TestCursor_IsZero(t *testing.T) { + tests := []struct { + text string + want bool + }{ + {"", true}, + {"0000000000000000-0000", true}, + {"0000000000000001-0000", false}, + {"0000000000000000-0001", false}, + {"0000000000000001-0001", false}, + } + for _, test := range tests { + c := mustParse(t, test.text) + if got := c.IsZero(); got != test.want { + t.Errorf("IsZero(%q): got %v, want %v", test.text, got, test.want) + } + } +} + +func TestCursor_Diff(t *testing.T) { + const time1 = 0x1ac0193001 + const time2 = 0x0ac0193001 + + text1 := fmt.Sprintf("%016x-0001", time1) + text2 := fmt.Sprintf("%016x-0005", time2) + want := time.Duration(time1 - time2) + + c1 := mustParse(t, text1) + c2 := mustParse(t, text2) + + got := c1.Diff(c2) + if got != want { + t.Fatalf("Diff %q - %q: got %v, want %v", text1, text2, got, want) + } +} diff --git a/internal/eventlog/eventlog.go b/internal/eventlog/eventlog.go new file mode 100644 index 0000000000..b507f79bc1 --- /dev/null +++ b/internal/eventlog/eventlog.go @@ -0,0 +1,217 @@ +// Package eventlog defines a reverse time-ordered log of events over a sliding +// window of time before the most recent item in the log. +// +// New items are added to the head of the log (the newest end), and items that +// fall outside the designated window are pruned from its tail (the oldest). +// Items within the log are indexed by lexicographically-ordered cursors. +package eventlog + +import ( + "context" + "errors" + "sync" + "time" + + "github.com/tendermint/tendermint/internal/eventlog/cursor" + "github.com/tendermint/tendermint/types" +) + +// A Log is a reverse time-ordered log of events in a sliding window of time +// before the newest item. Use Add to add new items to the front (head) of the +// log, and Scan or WaitScan to traverse the current contents of the log. +// +// After construction, a *Log is safe for concurrent access by one writer and +// any number of readers. +type Log struct { + // These values do not change after construction. + windowSize time.Duration + maxItems int + numItemsGauge gauge + + // Protects access to the fields below. Lock to modify the values of these + // fields, or to read or snapshot the values. + mu sync.Mutex + + numItems int // total number of items in the log + oldestCursor cursor.Cursor // cursor of the oldest item + head *logEntry // pointer to the newest item + ready chan struct{} // closed when head changes + source cursor.Source // generator of cursors +} + +// New constructs a new empty log with the given settings. +func New(opts LogSettings) (*Log, error) { + if opts.WindowSize <= 0 { + return nil, errors.New("window size must be positive") + } + lg := &Log{ + windowSize: opts.WindowSize, + maxItems: opts.MaxItems, + numItemsGauge: discard{}, + ready: make(chan struct{}), + source: opts.Source, + } + if opts.Metrics != nil { + lg.numItemsGauge = opts.Metrics.numItemsGauge + } + return lg, nil +} + +// Add adds a new item to the front of the log. If necessary, the log is pruned +// to fit its constraints on size and age. Add blocks until both steps are done. +// +// Any error reported by Add arises from pruning; the new item was added to the +// log regardless whether an error occurs. +func (lg *Log) Add(etype string, data types.EventData) error { + lg.mu.Lock() + head := &logEntry{ + item: newItem(lg.source.Cursor(), etype, data), + next: lg.head, + } + lg.numItems++ + lg.updateHead(head) + size := lg.numItems + age := head.item.Cursor.Diff(lg.oldestCursor) + + // If the log requires pruning, do the pruning step outside the lock. This + // permits readers to continue to make progress while we're working. + lg.mu.Unlock() + return lg.checkPrune(head, size, age) +} + +// Scan scans the current contents of the log, calling f with each item until +// all items are visited or f reports an error. If f returns ErrStopScan, Scan +// returns nil, otherwise it returns the error reported by f. +// +// The Info value returned is valid even if Scan reports an error. +func (lg *Log) Scan(f func(*Item) error) (Info, error) { + return lg.scanState(lg.state(), f) +} + +// WaitScan blocks until the cursor of the frontmost log item is different from +// c, then executes a Scan on the contents of the log. If ctx ends before the +// head is updated, WaitScan returns an error without calling f. +// +// The Info value returned is valid even if WaitScan reports an error. +func (lg *Log) WaitScan(ctx context.Context, c cursor.Cursor, f func(*Item) error) (Info, error) { + st := lg.state() + for st.head == nil || st.head.item.Cursor == c { + var err error + st, err = lg.waitStateChange(ctx) + if err != nil { + return st.info(), err + } + } + return lg.scanState(st, f) +} + +// Info returns the current state of the log. +func (lg *Log) Info() Info { return lg.state().info() } + +// ErrStopScan is returned by a Scan callback to signal that scanning should be +// terminated without error. +var ErrStopScan = errors.New("stop scanning") + +// ErrLogPruned is returned by Add to signal that at least some events within +// the time window were discarded by pruning in excess of the size limit. +// This error may be wrapped, use errors.Is to test for it. +var ErrLogPruned = errors.New("log pruned") + +// LogSettings configure the construction of an event log. +type LogSettings struct { + // The size of the time window measured in time before the newest item. + // This value must be positive. + WindowSize time.Duration + + // The maximum number of items that will be retained in memory within the + // designated time window. A value ≤ 0 imposes no limit, otherwise items in + // excess of this number will be dropped from the log. + MaxItems int + + // The cursor source to use for log entries. If not set, use wallclock time. + Source cursor.Source + + // If non-nil, exported metrics to update. If nil, metrics are discarded. + Metrics *Metrics +} + +// Info records the current state of the log at the time of a scan operation. +type Info struct { + Oldest cursor.Cursor // the cursor of the oldest item in the log + Newest cursor.Cursor // the cursor of the newest item in the log + Size int // the number of items in the log +} + +// logState is a snapshot of the state of the log. +type logState struct { + oldest cursor.Cursor + newest cursor.Cursor + size int + head *logEntry +} + +func (st logState) info() Info { + return Info{Oldest: st.oldest, Newest: st.newest, Size: st.size} +} + +// state returns a snapshot of the current log contents. The caller may freely +// traverse the internal structure of the list without locking, provided it +// does not modify either the entries or their items. +func (lg *Log) state() logState { + lg.mu.Lock() + defer lg.mu.Unlock() + if lg.head == nil { + return logState{} // empty + } + return logState{ + oldest: lg.oldestCursor, + newest: lg.head.item.Cursor, + size: lg.numItems, + head: lg.head, + } +} + +// waitStateChange blocks until either ctx ends or the head of the log is +// modified, then returns the state of the log. An error is reported only if +// ctx terminates before head changes. +func (lg *Log) waitStateChange(ctx context.Context) (logState, error) { + lg.mu.Lock() + ch := lg.ready // capture + lg.mu.Unlock() + select { + case <-ctx.Done(): + return lg.state(), ctx.Err() + case <-ch: + return lg.state(), nil + } +} + +// scanState scans the contents of the log at st. See the Scan method for a +// description of the callback semantics. +func (lg *Log) scanState(st logState, f func(*Item) error) (Info, error) { + info := Info{Oldest: st.oldest, Newest: st.newest, Size: st.size} + for cur := st.head; cur != nil; cur = cur.next { + if err := f(cur.item); err != nil { + if errors.Is(err, ErrStopScan) { + return info, nil + } + return info, err + } + } + return info, nil +} + +// updateHead replaces the current head with newHead, signals any waiters, and +// resets the wait signal. The caller must hold log.mu exclusively. +func (lg *Log) updateHead(newHead *logEntry) { + lg.head = newHead + close(lg.ready) // signal + lg.ready = make(chan struct{}) +} + +// A logEntry is the backbone of the event log queue. Entries are not mutated +// after construction, so it is safe to read item and next without locking. +type logEntry struct { + item *Item + next *logEntry +} diff --git a/internal/eventlog/eventlog_test.go b/internal/eventlog/eventlog_test.go new file mode 100644 index 0000000000..b49ce52e6c --- /dev/null +++ b/internal/eventlog/eventlog_test.go @@ -0,0 +1,222 @@ +package eventlog_test + +import ( + "context" + "errors" + "fmt" + "math/rand" + "strconv" + "sync" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/google/go-cmp/cmp" + + "github.com/tendermint/tendermint/internal/eventlog" + "github.com/tendermint/tendermint/internal/eventlog/cursor" + "github.com/tendermint/tendermint/types" +) + +// fakeTime is a fake clock to use to control cursor assignment. +// The timeIndex method reports the current "time" and advance manually updates +// the apparent time. +type fakeTime struct{ now int64 } + +func newFakeTime(init int64) *fakeTime { return &fakeTime{now: init} } + +func (f *fakeTime) timeIndex() int64 { return f.now } + +func (f *fakeTime) advance(d time.Duration) { f.now += int64(d) } + +// eventData is a placeholder event data implementation for testing. +type eventData string + +func (eventData) TypeTag() string { return "eventData" } + +func TestNewError(t *testing.T) { + lg, err := eventlog.New(eventlog.LogSettings{}) + if err == nil { + t.Fatalf("New: got %+v, wanted error", lg) + } else { + t.Logf("New: got expected error: %v", err) + } +} + +func TestPruneTime(t *testing.T) { + clk := newFakeTime(0) + + // Construct a log with a 60-second time window. + lg, err := eventlog.New(eventlog.LogSettings{ + WindowSize: 60 * time.Second, + Source: cursor.Source{ + TimeIndex: clk.timeIndex, + }, + }) + if err != nil { + t.Fatalf("New unexpectedly failed: %v", err) + } + + // Add events up to the time window, at seconds 0, 15, 30, 45, 60. + // None of these should be pruned (yet). + var want []string // cursor strings + for i := 1; i <= 5; i++ { + want = append(want, fmt.Sprintf("%016x-%04x", clk.timeIndex(), i)) + mustAdd(t, lg, "test-event", eventData("whatever")) + clk.advance(15 * time.Second) + } + // time now: 75 sec. + + // Verify that all the events we added are present. + got := cursors(t, lg) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Cursors before pruning: (-want, +got)\n%s", diff) + } + + // Add an event past the end of the window at second 90, and verify that + // this triggered an age-based prune of the oldest events (0, 15) that are + // outside the 60-second window. + + clk.advance(15 * time.Second) // time now: 90 sec. + want = append(want[2:], fmt.Sprintf("%016x-%04x", clk.timeIndex(), 6)) + + mustAdd(t, lg, "test-event", eventData("extra")) + got = cursors(t, lg) + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Cursors after pruning: (-want, +got)\n%s", diff) + } +} + +// Run a publisher and concurrent subscribers to tickle the race detector with +// concurrent add and scan operations. +func TestConcurrent(t *testing.T) { + defer leaktest.Check(t) + if testing.Short() { + t.Skip("Skipping concurrency exercise because -short is set") + } + + lg, err := eventlog.New(eventlog.LogSettings{ + WindowSize: 30 * time.Second, + }) + if err != nil { + t.Fatalf("New unexpectedly failed: %v", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var wg sync.WaitGroup + + // Publisher: Add events and handle expirations. + wg.Add(1) + go func() { + defer wg.Done() + + tick := time.NewTimer(0) + defer tick.Stop() + for { + select { + case <-ctx.Done(): + return + case t := <-tick.C: + _ = lg.Add("test-event", eventData(t.Format(time.RFC3339Nano))) + tick.Reset(time.Duration(rand.Intn(50)) * time.Millisecond) + } + } + }() + + // Subscribers: Wait for new events at the head of the queue. This + // simulates the typical operation of a subscriber by waiting for the head + // cursor to change and then scanning down toward the unconsumed item. + const numSubs = 16 + for i := 0; i < numSubs; i++ { + task := i + wg.Add(1) + go func() { + defer wg.Done() + + tick := time.NewTimer(0) + var cur cursor.Cursor + for { + // Simulate the subscriber being busy with other things. + select { + case <-ctx.Done(): + return + case <-tick.C: + tick.Reset(time.Duration(rand.Intn(150)) * time.Millisecond) + } + + // Wait for new data to arrive. + info, err := lg.WaitScan(ctx, cur, func(itm *eventlog.Item) error { + if itm.Cursor == cur { + return eventlog.ErrStopScan + } + return nil + }) + if err != nil { + if !errors.Is(err, context.Canceled) { + t.Errorf("Wait scan for task %d failed: %v", task, err) + } + return + } + cur = info.Newest + } + }() + } + + time.AfterFunc(2*time.Second, cancel) + wg.Wait() +} + +func TestPruneSize(t *testing.T) { + const maxItems = 25 + lg, err := eventlog.New(eventlog.LogSettings{ + WindowSize: 60 * time.Second, + MaxItems: maxItems, + }) + if err != nil { + t.Fatalf("New unexpectedly failed: %v", err) + } + + // Add a lot of items to the log and verify that we never exceed the + // specified cap. + for i := 0; i < 60; i++ { + mustAdd(t, lg, "test-event", eventData(strconv.Itoa(i+1))) + + if got := lg.Info().Size; got > maxItems { + t.Errorf("After add %d: log size is %d, want ≤ %d", i+1, got, maxItems) + } + } +} + +// mustAdd adds a single event to lg. If Add reports an error other than for +// pruning, the test fails; otherwise the error is returned. +func mustAdd(t *testing.T, lg *eventlog.Log, etype string, data types.EventData) { + t.Helper() + err := lg.Add(etype, data) + if err != nil && !errors.Is(err, eventlog.ErrLogPruned) { + t.Fatalf("Add %q failed: %v", etype, err) + } +} + +// cursors extracts the cursors from lg in ascending order of time. +func cursors(t *testing.T, lg *eventlog.Log) []string { + t.Helper() + + var cursors []string + if _, err := lg.Scan(func(itm *eventlog.Item) error { + cursors = append(cursors, itm.Cursor.String()) + return nil + }); err != nil { + t.Fatalf("Scan failed: %v", err) + } + reverse(cursors) // put in forward-time order for comparison + return cursors +} + +func reverse(ss []string) { + for i, j := 0, len(ss)-1; i < j; { + ss[i], ss[j] = ss[j], ss[i] + i++ + j-- + } +} diff --git a/internal/eventlog/item.go b/internal/eventlog/item.go new file mode 100644 index 0000000000..f1f43b46d0 --- /dev/null +++ b/internal/eventlog/item.go @@ -0,0 +1,78 @@ +package eventlog + +import ( + "strings" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/eventlog/cursor" + "github.com/tendermint/tendermint/types" +) + +// Cached constants for the pieces of reserved event names. +var ( + tmTypeTag string + tmTypeKey string +) + +func init() { + parts := strings.SplitN(types.EventTypeKey, ".", 2) + if len(parts) != 2 { + panic("invalid event type key: " + types.EventTypeKey) + } + tmTypeTag = parts[0] + tmTypeKey = parts[1] +} + +// ABCIEventer is an optional extension interface that may be implemented by +// event data types, to expose ABCI metadata to the event log. If an event item +// does not implement this interface, it is presumed to have no ABCI metadata. +type ABCIEventer interface { + // Return any ABCI events metadata the receiver contains. + // The reported slice must not contain a type (tm.event) record, since some + // events share the same structure among different event types. + ABCIEvents() []abci.Event +} + +// An Item is a single event item. +type Item struct { + Cursor cursor.Cursor + Type string + Data types.EventData + Events []abci.Event +} + +// newItem constructs a new item with the specified cursor, type, and data. +func newItem(cursor cursor.Cursor, etype string, data types.EventData) *Item { + return &Item{Cursor: cursor, Type: etype, Data: data, Events: makeEvents(etype, data)} +} + +// makeEvents returns a slice of ABCI events comprising the type tag along with +// any internal events exported by the data value. +func makeEvents(etype string, data types.EventData) []abci.Event { + base := []abci.Event{{ + Type: tmTypeTag, + Attributes: []abci.EventAttribute{{ + Key: tmTypeKey, Value: etype, + }}, + }} + if evt, ok := data.(ABCIEventer); ok { + return append(base, evt.ABCIEvents()...) + } + return base +} + +// FindType reports whether events contains a tm.event event, and if so returns +// its value, which is the type of the underlying event item. +func FindType(events []abci.Event) (string, bool) { + for _, evt := range events { + if evt.Type != tmTypeTag { + continue + } + for _, attr := range evt.Attributes { + if attr.Key == tmTypeKey { + return attr.Value, true + } + } + } + return "", false +} diff --git a/internal/eventlog/metrics.go b/internal/eventlog/metrics.go new file mode 100644 index 0000000000..cc319032ee --- /dev/null +++ b/internal/eventlog/metrics.go @@ -0,0 +1,39 @@ +package eventlog + +import ( + "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +// gauge is the subset of the Prometheus gauge interface used here. +type gauge interface { + Set(float64) +} + +// Metrics define the metrics exported by the eventlog package. +type Metrics struct { + numItemsGauge gauge +} + +// discard is a no-op implementation of the gauge interface. +type discard struct{} + +func (discard) Set(float64) {} + +const eventlogSubsystem = "eventlog" + +// PrometheusMetrics returns a collection of eventlog metrics for Prometheus. +func PrometheusMetrics(ns string, fields ...string) *Metrics { + var labels []string + for i := 0; i < len(fields); i += 2 { + labels = append(labels, fields[i]) + } + return &Metrics{ + numItemsGauge: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: ns, + Subsystem: eventlogSubsystem, + Name: "num_items", + Help: "Number of items currently resident in the event log.", + }, labels).With(fields...), + } +} diff --git a/internal/eventlog/prune.go b/internal/eventlog/prune.go new file mode 100644 index 0000000000..4c3c1f0d0a --- /dev/null +++ b/internal/eventlog/prune.go @@ -0,0 +1,111 @@ +package eventlog + +import ( + "time" +) + +// checkPrune checks whether the log has exceeded its boundaries of size or +// age, and if so prunes the log and updates the head. +func (lg *Log) checkPrune(head *logEntry, size int, age time.Duration) error { + // To avoid potentially re-pruning for every event, don't trigger an age + // prune until we're at least this far beyond the designated size. + const windowSlop = 30 * time.Second + + if age < (lg.windowSize+windowSlop) && (lg.maxItems <= 0 || size <= lg.maxItems) { + lg.numItemsGauge.Set(float64(lg.numItems)) + return nil // no pruning is needed + } + + var newState logState + var err error + + switch { + case lg.maxItems > 0 && size > lg.maxItems: + // We exceeded the size cap. In this case, age does not matter: count off + // the newest items and drop the unconsumed tail. Note that we prune by a + // fraction rather than an absolute amount so that we only have to prune + // for size occasionally. + + // TODO(creachadair): We may want to spill dropped events to secondary + // storage rather than dropping them. The size cap is meant as a safety + // valve against unexpected extremes, but if a network has "expected" + // spikes that nevertheless exceed any safe buffer size (e.g., Osmosis + // epochs), we may want to have a fallback so that we don't lose events + // that would otherwise fall within the window. + newSize := 3 * size / 4 + newState, err = lg.pruneSize(head, newSize) + + default: + // We did not exceed the size cap, but some items are too old. + newState = lg.pruneAge(head) + } + + // Note that when we update the head after pruning, we do not need to signal + // any waiters; pruning never adds new material to the log so anyone waiting + // should continue doing so until a subsequent Add occurs. + lg.mu.Lock() + defer lg.mu.Unlock() + lg.numItems = newState.size + lg.numItemsGauge.Set(float64(newState.size)) + lg.oldestCursor = newState.oldest + lg.head = newState.head + return err +} + +// pruneSize returns a new log state by pruning head to newSize. +// Precondition: newSize ≤ len(head). +func (lg *Log) pruneSize(head *logEntry, newSize int) (logState, error) { + // Special case for size 0 to simplify the logic below. + if newSize == 0 { + return logState{}, ErrLogPruned // drop everything + } + + // Initialize: New head has the same item as the old head. + first := &logEntry{item: head.item} // new head + last := first // new tail (last copied cons) + + cur := head.next + for i := 1; i < newSize; i++ { + cp := &logEntry{item: cur.item} + last.next = cp + last = cp + + cur = cur.next + } + var err error + if head.item.Cursor.Diff(last.item.Cursor) <= lg.windowSize { + err = ErrLogPruned + } + + return logState{ + oldest: last.item.Cursor, + newest: first.item.Cursor, + size: newSize, + head: first, + }, err +} + +// pruneAge returns a new log state by pruning items older than the window +// prior to the head element. +func (lg *Log) pruneAge(head *logEntry) logState { + first := &logEntry{item: head.item} + last := first + + size := 1 + for cur := head.next; cur != nil; cur = cur.next { + diff := head.item.Cursor.Diff(cur.item.Cursor) + if diff > lg.windowSize { + break // all remaining items are older than the window + } + cp := &logEntry{item: cur.item} + last.next = cp + last = cp + size++ + } + return logState{ + oldest: last.item.Cursor, + newest: first.item.Cursor, + size: size, + head: first, + } +} diff --git a/internal/evidence/doc.go b/internal/evidence/doc.go index d521debd30..01d99ee36b 100644 --- a/internal/evidence/doc.go +++ b/internal/evidence/doc.go @@ -1,7 +1,7 @@ /* Package evidence handles all evidence storage and gossiping from detection to block proposal. For the different types of evidence refer to the `evidence.go` file in the types package -or https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md. +or https://github.com/tendermint/tendermint/blob/master/spec/consensus/light-client/accountability.md. Gossiping diff --git a/internal/evidence/metrics.go b/internal/evidence/metrics.go new file mode 100644 index 0000000000..59efc23f91 --- /dev/null +++ b/internal/evidence/metrics.go @@ -0,0 +1,47 @@ +package evidence + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" + "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +const ( + // MetricsSubsystem is a subsystem shared by all metrics exposed by this + // package. + MetricsSubsystem = "evidence_pool" +) + +// Metrics contains metrics exposed by this package. +// see MetricsProvider for descriptions. +type Metrics struct { + // Number of evidence in the evidence pool + NumEvidence metrics.Gauge +} + +// PrometheusMetrics returns Metrics build using Prometheus client library. +// Optionally, labels can be provided along with their values ("foo", +// "fooValue"). +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + + NumEvidence: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "num_evidence", + Help: "Number of pending evidence in evidence pool.", + }, labels).With(labelsAndValues...), + } +} + +// NopMetrics returns no-op Metrics. +func NopMetrics() *Metrics { + return &Metrics{ + NumEvidence: discard.NewGauge(), + } +} diff --git a/internal/evidence/mocks/block_store.go b/internal/evidence/mocks/block_store.go index ef3346b2a7..e45b281b90 100644 --- a/internal/evidence/mocks/block_store.go +++ b/internal/evidence/mocks/block_store.go @@ -3,7 +3,10 @@ package mocks import ( + testing "testing" + mock "github.com/stretchr/testify/mock" + types "github.com/tendermint/tendermint/types" ) @@ -57,3 +60,13 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { return r0 } + +// NewBlockStore creates a new instance of BlockStore. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewBlockStore(t testing.TB) *BlockStore { + mock := &BlockStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/evidence/pool.go b/internal/evidence/pool.go index 936c025d71..53d2e54ba1 100644 --- a/internal/evidence/pool.go +++ b/internal/evidence/pool.go @@ -2,6 +2,7 @@ package evidence import ( "bytes" + "context" "errors" "fmt" "sync" @@ -13,6 +14,7 @@ import ( "github.com/google/orderedcode" dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/internal/eventbus" clist "github.com/tendermint/tendermint/internal/libs/clist" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" @@ -34,14 +36,14 @@ type Pool struct { evidenceList *clist.CList // concurrent linked-list of evidence evidenceSize uint32 // amount of pending evidence - // needed to load validators to verify evidence - stateDB sm.Store // needed to load headers and commits to verify evidence blockStore BlockStore + stateDB sm.Store mtx sync.Mutex // latest state - state sm.State + state sm.State + isStarted bool // evidence from consensus is buffered to this slice, awaiting until the next height // before being flushed to the pool. This prevents broadcasting and proposing of // evidence before the height with which the evidence happened is finished. @@ -49,41 +51,28 @@ type Pool struct { pruningHeight int64 pruningTime time.Time + + // Eventbus to emit events when evidence is validated + // Not part of the constructor, use SetEventBus to set it + // The eventBus must be started in order for event publishing not to block + eventBus *eventbus.EventBus + + Metrics *Metrics } // NewPool creates an evidence pool. If using an existing evidence store, // it will add all pending evidence to the concurrent list. -func NewPool(logger log.Logger, evidenceDB dbm.DB, stateDB sm.Store, blockStore BlockStore) (*Pool, error) { - state, err := stateDB.Load() - if err != nil { - return nil, fmt.Errorf("failed to load state: %w", err) - } - - pool := &Pool{ - stateDB: stateDB, +func NewPool(logger log.Logger, evidenceDB dbm.DB, stateStore sm.Store, blockStore BlockStore, metrics *Metrics, eventBus *eventbus.EventBus) *Pool { + return &Pool{ blockStore: blockStore, - state: state, + stateDB: stateStore, logger: logger, evidenceStore: evidenceDB, evidenceList: clist.New(), consensusBuffer: make([]duplicateVoteSet, 0), + Metrics: metrics, + eventBus: eventBus, } - - // If pending evidence already in db, in event of prior failure, then check - // for expiration, update the size and load it back to the evidenceList. - pool.pruningHeight, pool.pruningTime = pool.removeExpiredPendingEvidence() - evList, _, err := pool.listEvidence(prefixPending, -1) - if err != nil { - return nil, err - } - - atomic.StoreUint32(&pool.evidenceSize, uint32(len(evList))) - - for _, ev := range evList { - pool.evidenceList.PushBack(ev) - } - - return pool, nil } // PendingEvidence is used primarily as part of block proposal and returns up to @@ -108,7 +97,7 @@ func (evpool *Pool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64) { // 2. Update the pool's state which contains evidence params relating to expiry. // 3. Moves pending evidence that has now been committed into the committed pool. // 4. Removes any expired evidence based on both height and time. -func (evpool *Pool) Update(state sm.State, ev types.EvidenceList) { +func (evpool *Pool) Update(ctx context.Context, state sm.State, ev types.EvidenceList) { // sanity check if state.LastBlockHeight <= evpool.state.LastBlockHeight { panic(fmt.Sprintf( @@ -126,7 +115,7 @@ func (evpool *Pool) Update(state sm.State, ev types.EvidenceList) { // flush conflicting vote pairs from the buffer, producing DuplicateVoteEvidence and // adding it to the pool - evpool.processConsensusBuffer(state) + evpool.processConsensusBuffer(ctx, state) // update state evpool.updateState(state) @@ -142,7 +131,7 @@ func (evpool *Pool) Update(state sm.State, ev types.EvidenceList) { } // AddEvidence checks the evidence is valid and adds it to the pool. -func (evpool *Pool) AddEvidence(ev types.Evidence) error { +func (evpool *Pool) AddEvidence(ctx context.Context, ev types.Evidence) error { evpool.logger.Debug("attempting to add evidence", "evidence", ev) // We have already verified this piece of evidence - no need to do it again @@ -160,12 +149,12 @@ func (evpool *Pool) AddEvidence(ev types.Evidence) error { } // 1) Verify against state. - if err := evpool.verify(ev); err != nil { + if err := evpool.verify(ctx, ev); err != nil { return err } // 2) Save to store. - if err := evpool.addPendingEvidence(ev); err != nil { + if err := evpool.addPendingEvidence(ctx, ev); err != nil { return fmt.Errorf("failed to add evidence to pending list: %w", err) } @@ -198,7 +187,7 @@ func (evpool *Pool) ReportConflictingVotes(voteA, voteB *types.Vote) { // If it has already verified the evidence then it jumps to the next one. It ensures that no // evidence has already been committed or is being proposed twice. It also adds any // evidence that it doesn't currently have so that it can quickly form ABCI Evidence later. -func (evpool *Pool) CheckEvidence(evList types.EvidenceList) error { +func (evpool *Pool) CheckEvidence(ctx context.Context, evList types.EvidenceList) error { hashes := make([][]byte, len(evList)) for idx, ev := range evList { @@ -210,12 +199,12 @@ func (evpool *Pool) CheckEvidence(evList types.EvidenceList) error { return &types.ErrInvalidEvidence{Evidence: ev, Reason: errors.New("evidence was already committed")} } - err := evpool.verify(ev) + err := evpool.verify(ctx, ev) if err != nil { return err } - if err := evpool.addPendingEvidence(ev); err != nil { + if err := evpool.addPendingEvidence(ctx, ev); err != nil { // Something went wrong with adding the evidence but we already know it is valid // hence we log an error and continue evpool.logger.Error("failed to add evidence to pending list", "err", err, "evidence", ev) @@ -259,6 +248,31 @@ func (evpool *Pool) State() sm.State { return evpool.state } +func (evpool *Pool) Start(state sm.State) error { + if evpool.isStarted { + return errors.New("pool is already running") + } + + evpool.state = state + + // If pending evidence already in db, in event of prior failure, then check + // for expiration, update the size and load it back to the evidenceList. + evpool.pruningHeight, evpool.pruningTime = evpool.removeExpiredPendingEvidence() + evList, _, err := evpool.listEvidence(prefixPending, -1) + if err != nil { + return err + } + + atomic.StoreUint32(&evpool.evidenceSize, uint32(len(evList))) + evpool.Metrics.NumEvidence.Set(float64(evpool.evidenceSize)) + + for _, ev := range evList { + evpool.evidenceList.PushBack(ev) + } + + return nil +} + func (evpool *Pool) Close() error { return evpool.evidenceStore.Close() } @@ -295,7 +309,7 @@ func (evpool *Pool) isPending(evidence types.Evidence) bool { return ok } -func (evpool *Pool) addPendingEvidence(ev types.Evidence) error { +func (evpool *Pool) addPendingEvidence(ctx context.Context, ev types.Evidence) error { evpb, err := types.EvidenceToProto(ev) if err != nil { return fmt.Errorf("failed to convert to proto: %w", err) @@ -314,7 +328,18 @@ func (evpool *Pool) addPendingEvidence(ev types.Evidence) error { } atomic.AddUint32(&evpool.evidenceSize, 1) - return nil + evpool.Metrics.NumEvidence.Set(float64(evpool.evidenceSize)) + + // This should normally never be true + if evpool.eventBus == nil { + evpool.logger.Debug("event bus is not configured") + return nil + + } + return evpool.eventBus.PublishEventEvidenceValidated(types.EventDataEvidenceValidated{ + Evidence: ev, + Height: ev.Height(), + }) } // markEvidenceAsCommitted processes all the evidence in the block, marking it as @@ -366,6 +391,7 @@ func (evpool *Pool) markEvidenceAsCommitted(evidence types.EvidenceList, height // update the evidence size atomic.AddUint32(&evpool.evidenceSize, ^uint32(len(blockEvidenceMap)-1)) + evpool.Metrics.NumEvidence.Set(float64(evpool.evidenceSize)) } // listEvidence retrieves lists evidence from oldest to newest within maxBytes. @@ -380,7 +406,7 @@ func (evpool *Pool) listEvidence(prefixKey int64, maxBytes int64) ([]types.Evide iter, err := dbm.IteratePrefix(evpool.evidenceStore, prefixToBytes(prefixKey)) if err != nil { - return nil, totalSize, fmt.Errorf("database error: %v", err) + return nil, totalSize, fmt.Errorf("database error: %w", err) } defer iter.Close() @@ -419,6 +445,7 @@ func (evpool *Pool) listEvidence(prefixKey int64, maxBytes int64) ([]types.Evide } func (evpool *Pool) removeExpiredPendingEvidence() (int64, time.Time) { + batch := evpool.evidenceStore.NewBatch() defer batch.Close() @@ -443,7 +470,6 @@ func (evpool *Pool) removeExpiredPendingEvidence() (int64, time.Time) { // remove evidence from the clist evpool.removeEvidenceFromList(blockEvidenceMap) - // update the evidence size atomic.AddUint32(&evpool.evidenceSize, ^uint32(len(blockEvidenceMap)-1)) @@ -511,7 +537,7 @@ func (evpool *Pool) updateState(state sm.State) { // into DuplicateVoteEvidence. It sets the evidence timestamp to the block height // from the most recently committed block. // Evidence is then added to the pool so as to be ready to be broadcasted and proposed. -func (evpool *Pool) processConsensusBuffer(state sm.State) { +func (evpool *Pool) processConsensusBuffer(ctx context.Context, state sm.State) { evpool.mtx.Lock() defer evpool.mtx.Unlock() for _, voteSet := range evpool.consensusBuffer { @@ -576,7 +602,7 @@ func (evpool *Pool) processConsensusBuffer(state sm.State) { continue } - if err := evpool.addPendingEvidence(dve); err != nil { + if err := evpool.addPendingEvidence(ctx, dve); err != nil { evpool.logger.Error("failed to flush evidence from consensus buffer to pending list: %w", err) continue } diff --git a/internal/evidence/pool_test.go b/internal/evidence/pool_test.go index 9b7e99d43c..19bf3bec44 100644 --- a/internal/evidence/pool_test.go +++ b/internal/evidence/pool_test.go @@ -5,16 +5,18 @@ import ( "testing" "time" + "github.com/dashevo/dashd-go/btcjson" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - "github.com/dashevo/dashd-go/btcjson" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/evidence/mocks" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" sm "github.com/tendermint/tendermint/internal/state" smmocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/internal/store" @@ -30,6 +32,18 @@ var ( defaultEvidenceMaxBytes int64 = 1000 ) +func startPool(t *testing.T, pool *evidence.Pool, store sm.Store) { + t.Helper() + state, err := store.Load() + if err != nil { + t.Fatalf("cannot load state: %v", err) + } + if err := pool.Start(state); err != nil { + t.Fatalf("cannot start state pool: %v", err) + } + +} + func TestEvidencePoolBasic(t *testing.T) { var ( height = int64(1) @@ -38,26 +52,30 @@ func TestEvidencePoolBasic(t *testing.T) { blockStore = &mocks.BlockStore{} ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() valSet, privVals := types.RandValidatorSet(1) - blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return( &types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}}, ) stateStore.On("LoadValidators", mock.AnythingOfType("int64")).Return(valSet, nil) stateStore.On("Load").Return(createState(height+1, valSet), nil) - pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore) - require.NoError(t, err) + logger := log.NewNopLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + pool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus) + startPool(t, pool, stateStore) // evidence not seen yet: evs, size := pool.PendingEvidence(defaultEvidenceMaxBytes) require.Equal(t, 0, len(evs)) require.Zero(t, size) - ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, privVals[0], evidenceChainID, + ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, height, defaultEvidenceTime, privVals[0], evidenceChainID, valSet.QuorumType, valSet.QuorumHash) require.NoError(t, err) - // good evidence evAdded := make(chan struct{}) go func() { @@ -66,7 +84,8 @@ func TestEvidencePoolBasic(t *testing.T) { }() // evidence seen but not yet committed: - require.NoError(t, pool.AddEvidence(ev)) + err = pool.AddEvidence(ctx, ev) + require.NoError(t, err) select { case <-evAdded: @@ -83,18 +102,22 @@ func TestEvidencePoolBasic(t *testing.T) { require.Equal(t, evidenceBytes, size) // check that the size of the single evidence in bytes is correct // shouldn't be able to add evidence twice - require.NoError(t, pool.AddEvidence(ev)) + err = pool.AddEvidence(ctx, ev) + require.NoError(t, err) evs, _ = pool.PendingEvidence(defaultEvidenceMaxBytes) require.Equal(t, 1, len(evs)) } // Tests inbound evidence for the right time and height func TestAddExpiredEvidence(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var ( quorumHash = crypto.RandQuorumHash() val = types.NewMockPVForQuorum(quorumHash) height = int64(30) - stateStore = initializeValidatorState(t, val, height, btcjson.LLMQType_5_60, quorumHash) + stateStore = initializeValidatorState(ctx, t, val, height, btcjson.LLMQType_5_60, quorumHash) evidenceDB = dbm.NewMemDB() blockStore = &mocks.BlockStore{} expiredEvidenceTime = time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC) @@ -108,8 +131,12 @@ func TestAddExpiredEvidence(t *testing.T) { return &types.BlockMeta{Header: types.Header{Time: expiredEvidenceTime}} }) - pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore) - require.NoError(t, err) + logger := log.NewNopLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + pool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus) + startPool(t, pool, stateStore) testCases := []struct { evHeight int64 @@ -129,10 +156,13 @@ func TestAddExpiredEvidence(t *testing.T) { tc := tc t.Run(tc.evDescription, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + vals := pool.State().Validators - ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(tc.evHeight, tc.evTime, val, evidenceChainID, vals.QuorumType, vals.QuorumHash) + ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, tc.evHeight, tc.evTime, val, evidenceChainID, vals.QuorumType, vals.QuorumHash) require.NoError(t, err) - err = pool.AddEvidence(ev) + err = pool.AddEvidence(ctx, ev) if tc.expErr { require.Error(t, err) } else { @@ -145,12 +175,15 @@ func TestAddExpiredEvidence(t *testing.T) { func TestReportConflictingVotes(t *testing.T) { var height int64 = 10 - pool, pv := defaultTestPool(t, height) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pool, pv, _ := defaultTestPool(ctx, t, height) quorumHash := pool.State().Validators.QuorumHash - val := pv.ExtractIntoValidator(context.Background(), quorumHash) - ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(height+1, defaultEvidenceTime, pv, evidenceChainID, + val := pv.ExtractIntoValidator(ctx, quorumHash) + ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, height+1, defaultEvidenceTime, pv, evidenceChainID, btcjson.LLMQType_5_60, quorumHash) require.NoError(t, err) @@ -173,7 +206,7 @@ func TestReportConflictingVotes(t *testing.T) { state.LastBlockTime = ev.Time() state.LastValidators = types.NewValidatorSet([]*types.Validator{val}, val.PubKey, btcjson.LLMQType_5_60, quorumHash, true) - pool.Update(state, []types.Evidence{}) + pool.Update(ctx, state, []types.Evidence{}) // should be able to retrieve evidence from pool evList, _ = pool.PendingEvidence(defaultEvidenceMaxBytes) @@ -185,11 +218,15 @@ func TestReportConflictingVotes(t *testing.T) { func TestEvidencePoolUpdate(t *testing.T) { height := int64(21) - pool, val := defaultTestPool(t, height) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pool, val, _ := defaultTestPool(ctx, t, height) + state := pool.State() // create two lots of old evidence that we expect to be pruned when we update - prunedEv, err := types.NewMockDuplicateVoteEvidenceWithValidator( + prunedEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, 1, defaultEvidenceTime.Add(1*time.Minute), val, @@ -199,7 +236,7 @@ func TestEvidencePoolUpdate(t *testing.T) { ) require.NoError(t, err) - notPrunedEv, err := types.NewMockDuplicateVoteEvidenceWithValidator( + notPrunedEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, 2, defaultEvidenceTime.Add(2*time.Minute), val, @@ -209,10 +246,11 @@ func TestEvidencePoolUpdate(t *testing.T) { ) require.NoError(t, err) - require.NoError(t, pool.AddEvidence(prunedEv)) - require.NoError(t, pool.AddEvidence(notPrunedEv)) + require.NoError(t, pool.AddEvidence(ctx, prunedEv)) + require.NoError(t, pool.AddEvidence(ctx, notPrunedEv)) ev, err := types.NewMockDuplicateVoteEvidenceWithValidator( + ctx, height, defaultEvidenceTime.Add(21*time.Minute), val, @@ -235,21 +273,21 @@ func TestEvidencePoolUpdate(t *testing.T) { require.Equal(t, uint32(2), pool.Size()) - require.NoError(t, pool.CheckEvidence(types.EvidenceList{ev})) + require.NoError(t, pool.CheckEvidence(ctx, types.EvidenceList{ev})) evList, _ = pool.PendingEvidence(3 * defaultEvidenceMaxBytes) require.Equal(t, 3, len(evList)) require.Equal(t, uint32(3), pool.Size()) - pool.Update(state, block.Evidence.Evidence) + pool.Update(ctx, state, block.Evidence) // a) Update marks evidence as committed so pending evidence should be empty evList, _ = pool.PendingEvidence(defaultEvidenceMaxBytes) require.Equal(t, []types.Evidence{notPrunedEv}, evList) // b) If we try to check this evidence again it should fail because it has already been committed - err = pool.CheckEvidence(types.EvidenceList{ev}) + err = pool.CheckEvidence(ctx, types.EvidenceList{ev}) if assert.Error(t, err) { assert.Equal(t, "evidence was already committed", err.(*types.ErrInvalidEvidence).Reason.Error()) } @@ -258,9 +296,13 @@ func TestEvidencePoolUpdate(t *testing.T) { func TestVerifyPendingEvidencePasses(t *testing.T) { var height int64 = 1 - pool, val := defaultTestPool(t, height) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pool, val, _ := defaultTestPool(ctx, t, height) vals := pool.State().Validators ev, err := types.NewMockDuplicateVoteEvidenceWithValidator( + ctx, height, defaultEvidenceTime.Add(1*time.Minute), val, @@ -269,16 +311,20 @@ func TestVerifyPendingEvidencePasses(t *testing.T) { vals.QuorumHash, ) require.NoError(t, err) - - require.NoError(t, pool.AddEvidence(ev)) - require.NoError(t, pool.CheckEvidence(types.EvidenceList{ev})) + require.NoError(t, pool.AddEvidence(ctx, ev)) + require.NoError(t, pool.CheckEvidence(ctx, types.EvidenceList{ev})) } func TestVerifyDuplicatedEvidenceFails(t *testing.T) { var height int64 = 1 - pool, val := defaultTestPool(t, height) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pool, val, _ := defaultTestPool(ctx, t, height) vals := pool.State().Validators ev, err := types.NewMockDuplicateVoteEvidenceWithValidator( + ctx, height, defaultEvidenceTime.Add(1*time.Minute), val, @@ -288,34 +334,95 @@ func TestVerifyDuplicatedEvidenceFails(t *testing.T) { ) require.NoError(t, err) - err = pool.CheckEvidence(types.EvidenceList{ev, ev}) + err = pool.CheckEvidence(ctx, types.EvidenceList{ev, ev}) if assert.Error(t, err) { assert.Equal(t, "duplicate evidence", err.(*types.ErrInvalidEvidence).Reason.Error()) } } +// Check that we generate events when evidence is added into the evidence pool +func TestEventOnEvidenceValidated(t *testing.T) { + const height = 1 + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + pool, val, eventBus := defaultTestPool(ctx, t, height) + + vals := pool.State().Validators + + ev, err := types.NewMockDuplicateVoteEvidenceWithValidator( + ctx, + height, + defaultEvidenceTime.Add(1*time.Minute), + val, + evidenceChainID, + vals.QuorumType, + vals.QuorumHash, + ) + require.NoError(t, err) + + const query = `tm.event='EvidenceValidated'` + evSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: "test", + Query: tmquery.MustCompile(query), + }) + require.NoError(t, err) + + done := make(chan struct{}) + go func() { + defer close(done) + msg, err := evSub.Next(ctx) + if ctx.Err() != nil { + return + } + assert.NoError(t, err) + + edt := msg.Data().(types.EventDataEvidenceValidated) + assert.Equal(t, ev, edt.Evidence) + }() + err = pool.AddEvidence(ctx, ev) + require.NoError(t, err) + + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("did not receive a block header after 1 sec.") + } + +} + // Tests that restarting the evidence pool after a potential failure will recover the // pending evidence and continue to gossip it func TestRecoverPendingEvidence(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height := int64(10) quorumHash := crypto.RandQuorumHash() val := types.NewMockPVForQuorum(quorumHash) proTxHash := val.ProTxHash evidenceDB := dbm.NewMemDB() - stateStore := initializeValidatorState(t, val, height, btcjson.LLMQType_5_60, quorumHash) + stateStore := initializeValidatorState(ctx, t, val, height, btcjson.LLMQType_5_60, quorumHash) state, err := stateStore.Load() require.NoError(t, err) - blockStore := initializeBlockStore(dbm.NewMemDB(), state, proTxHash) + blockStore, err := initializeBlockStore(dbm.NewMemDB(), state, proTxHash) + require.NoError(t, err) + + logger := log.NewNopLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) // create previous pool and populate it - pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore) - require.NoError(t, err) + pool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus) + startPool(t, pool, stateStore) vals := pool.State().Validators goodEvidence, err := types.NewMockDuplicateVoteEvidenceWithValidator( + ctx, height, defaultEvidenceTime.Add(10*time.Minute), val, @@ -325,6 +432,7 @@ func TestRecoverPendingEvidence(t *testing.T) { ) require.NoError(t, err) expiredEvidence, err := types.NewMockDuplicateVoteEvidenceWithValidator( + ctx, int64(1), defaultEvidenceTime.Add(1*time.Minute), val, @@ -334,8 +442,10 @@ func TestRecoverPendingEvidence(t *testing.T) { ) require.NoError(t, err) - require.NoError(t, pool.AddEvidence(goodEvidence)) - require.NoError(t, pool.AddEvidence(expiredEvidence)) + err = pool.AddEvidence(ctx, goodEvidence) + require.NoError(t, err) + err = pool.AddEvidence(ctx, expiredEvidence) + require.NoError(t, err) // now recover from the previous pool at a different time newStateStore := &smmocks.Store{} @@ -355,9 +465,8 @@ func TestRecoverPendingEvidence(t *testing.T) { }, }, nil) - newPool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, newStateStore, blockStore) - require.NoError(t, err) - + newPool := evidence.NewPool(logger, evidenceDB, newStateStore, blockStore, evidence.NopMetrics(), nil) + startPool(t, newPool, newStateStore) evList, _ := newPool.PendingEvidence(defaultEvidenceMaxBytes) require.Equal(t, 1, len(evList)) @@ -400,15 +509,16 @@ func initializeStateFromValidatorSet(t *testing.T, valSet *types.ValidatorSet, h } func initializeValidatorState( + ctx context.Context, t *testing.T, privVal types.PrivValidator, height int64, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, ) sm.Store { - pubKey, err := privVal.GetPubKey(context.Background(), quorumHash) + pubKey, err := privVal.GetPubKey(ctx, quorumHash) require.NoError(t, err) - proTxHash, err := privVal.GetProTxHash(context.Background()) + proTxHash, err := privVal.GetProTxHash(ctx) require.NoError(t, err) validator := &types.Validator{VotingPower: types.DefaultDashVotingPower, PubKey: pubKey, ProTxHash: proTxHash} @@ -426,22 +536,26 @@ func initializeValidatorState( // initializeBlockStore creates a block storage and populates it w/ a dummy // block at +height+. -func initializeBlockStore(db dbm.DB, state sm.State, valProTxHash []byte) *store.BlockStore { +func initializeBlockStore(db dbm.DB, state sm.State, valProTxHash []byte) (*store.BlockStore, error) { blockStore := store.NewBlockStore(db) for i := int64(1); i <= state.LastBlockHeight; i++ { lastCommit := makeCommit(i-1, state.Validators.QuorumHash, valProTxHash) - block, _ := state.MakeBlock(i, nil, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().ProTxHash, 0) + block := state.MakeBlock(i, nil, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().ProTxHash, 0) + block.Header.Time = defaultEvidenceTime.Add(time.Duration(i) * time.Minute) block.Header.Version = version.Consensus{Block: version.BlockProtocol, App: 1} const parts = 1 - partSet := block.MakePartSet(parts) + partSet, err := block.MakePartSet(parts) + if err != nil { + return nil, err + } seenCommit := makeCommit(i, state.Validators.QuorumHash, valProTxHash) blockStore.SaveBlock(block, partSet, seenCommit) } - return blockStore + return blockStore, nil } func makeCommit(height int64, quorumHash []byte, valProTxHash []byte) *types.Commit { @@ -456,18 +570,26 @@ func makeCommit(height int64, quorumHash []byte, valProTxHash []byte) *types.Com ) } -func defaultTestPool(t *testing.T, height int64) (*evidence.Pool, *types.MockPV) { +func defaultTestPool(ctx context.Context, t *testing.T, height int64) (*evidence.Pool, *types.MockPV, *eventbus.EventBus) { + t.Helper() quorumHash := crypto.RandQuorumHash() val := types.NewMockPVForQuorum(quorumHash) + evidenceDB := dbm.NewMemDB() - stateStore := initializeValidatorState(t, val, height, btcjson.LLMQType_5_60, quorumHash) - state, _ := stateStore.Load() - blockStore := initializeBlockStore(dbm.NewMemDB(), state, val.ProTxHash) + stateStore := initializeValidatorState(ctx, t, val, height, btcjson.LLMQType_5_60, quorumHash) + state, err := stateStore.Load() + require.NoError(t, err) + blockStore, err := initializeBlockStore(dbm.NewMemDB(), state, val.ProTxHash) + require.NoError(t, err) + + logger := log.NewNopLogger() - pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore) - require.NoError(t, err, "test evidence pool could not be created") + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) - return pool, val + pool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), eventBus) + startPool(t, pool, stateStore) + return pool, val, eventBus } func createState(height int64, valSet *types.ValidatorSet) sm.State { diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index 120832192f..5bbaff5366 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -1,13 +1,13 @@ package evidence import ( + "context" "fmt" "runtime/debug" "sync" "time" clist "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" @@ -15,29 +15,7 @@ import ( "github.com/tendermint/tendermint/types" ) -var ( - _ service.Service = (*Reactor)(nil) - - // ChannelShims contains a map of ChannelDescriptorShim objects, where each - // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding - // p2p proto.Message the new p2p Channel is responsible for handling. - // - // - // TODO: Remove once p2p refactor is complete. - // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - EvidenceChannel: { - MsgType: new(tmproto.EvidenceList), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(EvidenceChannel), - Priority: 6, - RecvMessageCapacity: maxMsgSize, - RecvBufferCapacity: 32, - MaxSendBytes: 400, - }, - }, - } -) +var _ service.Service = (*Reactor)(nil) const ( EvidenceChannel = p2p.ChannelID(0x38) @@ -51,19 +29,31 @@ const ( broadcastEvidenceIntervalS = 10 ) +// GetChannelDescriptor produces an instance of a descriptor for this +// package's required channels. +func GetChannelDescriptor() *p2p.ChannelDescriptor { + return &p2p.ChannelDescriptor{ + ID: EvidenceChannel, + MessageType: new(tmproto.Evidence), + Priority: 6, + RecvMessageCapacity: maxMsgSize, + RecvBufferCapacity: 32, + Name: "evidence", + } +} + // Reactor handles evpool evidence broadcasting amongst peers. type Reactor struct { service.BaseService + logger log.Logger - evpool *Pool - evidenceCh *p2p.Channel - peerUpdates *p2p.PeerUpdates - closeCh chan struct{} + evpool *Pool + chCreator p2p.ChannelCreator + peerEvents p2p.PeerEventSubscriber - peerWG sync.WaitGroup + mtx sync.Mutex - mtx tmsync.Mutex - peerRoutines map[types.NodeID]*tmsync.Closer + peerRoutines map[types.NodeID]context.CancelFunc } // NewReactor returns a reference to a new evidence reactor, which implements the @@ -71,19 +61,20 @@ type Reactor struct { // envelopes with EvidenceList messages. func NewReactor( logger log.Logger, - evidenceCh *p2p.Channel, - peerUpdates *p2p.PeerUpdates, + chCreator p2p.ChannelCreator, + peerEvents p2p.PeerEventSubscriber, evpool *Pool, ) *Reactor { r := &Reactor{ + logger: logger, evpool: evpool, - evidenceCh: evidenceCh, - peerUpdates: peerUpdates, - closeCh: make(chan struct{}), - peerRoutines: make(map[types.NodeID]*tmsync.Closer), + chCreator: chCreator, + peerEvents: peerEvents, + peerRoutines: make(map[types.NodeID]context.CancelFunc), } r.BaseService = *service.NewBaseService(logger, "Evidence", r) + return r } @@ -91,68 +82,45 @@ func NewReactor( // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. No error is returned. -func (r *Reactor) OnStart() error { - go r.processEvidenceCh() - go r.processPeerUpdates() +func (r *Reactor) OnStart(ctx context.Context) error { + ch, err := r.chCreator(ctx, GetChannelDescriptor()) + if err != nil { + return err + } + + go r.processEvidenceCh(ctx, ch) + go r.processPeerUpdates(ctx, r.peerEvents(ctx), ch) return nil } // OnStop stops the reactor by signaling to all spawned goroutines to exit and // blocking until they all exit. -func (r *Reactor) OnStop() { - r.mtx.Lock() - for _, c := range r.peerRoutines { - c.Close() - } - r.mtx.Unlock() - - // Wait for all spawned peer evidence broadcasting goroutines to gracefully - // exit. - r.peerWG.Wait() - - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - // Wait for all p2p Channels to be closed before returning. This ensures we - // can easily reason about synchronization of all p2p Channels and ensure no - // panics will occur. - <-r.evidenceCh.Done() - <-r.peerUpdates.Done() - - // Close the evidence db - r.evpool.Close() -} +func (r *Reactor) OnStop() { r.evpool.Close() } // handleEvidenceMessage handles envelopes sent from peers on the EvidenceChannel. // It returns an error only if the Envelope.Message is unknown for this channel // or if the given evidence is invalid. This should never be called outside of // handleMessage. -func (r *Reactor) handleEvidenceMessage(envelope p2p.Envelope) error { - logger := r.Logger.With("peer", envelope.From) +func (r *Reactor) handleEvidenceMessage(ctx context.Context, envelope *p2p.Envelope) error { + logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { - case *tmproto.EvidenceList: - // TODO: Refactor the Evidence type to not contain a list since we only ever - // send and receive one piece of evidence at a time. Or potentially consider - // batching evidence. - // - // see: https://github.com/tendermint/tendermint/issues/4729 - for i := 0; i < len(msg.Evidence); i++ { - ev, err := types.EvidenceFromProto(&msg.Evidence[i]) - if err != nil { - logger.Error("failed to convert evidence", "err", err) - continue + case *tmproto.Evidence: + // Process the evidence received from a peer + // Evidence is sent and received one by one + ev, err := types.EvidenceFromProto(msg) + if err != nil { + logger.Error("failed to convert evidence", "err", err) + return err + } + if err := r.evpool.AddEvidence(ctx, ev); err != nil { + // If we're given invalid evidence by the peer, notify the router that + // we should remove this peer by returning an error. + if _, ok := err.(*types.ErrInvalidEvidence); ok { + return err } - if err := r.evpool.AddEvidence(ev); err != nil { - // If we're given invalid evidence by the peer, notify the router that - // we should remove this peer by returning an error. - if _, ok := err.(*types.ErrInvalidEvidence); ok { - return err - } - } } default: @@ -165,11 +133,11 @@ func (r *Reactor) handleEvidenceMessage(envelope p2p.Envelope) error { // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) - r.Logger.Error( + r.logger.Error( "recovering from processing message panic", "err", err, "stack", string(debug.Stack()), @@ -177,38 +145,32 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err } }() - // r.Logger.Debug("received message", "msg", envelope.Message, "peer", envelope.From) + // r.logger.Debug("received message", "message", envelope.Message, "peer", envelope.From) - switch chID { + switch envelope.ChannelID { case EvidenceChannel: - err = r.handleEvidenceMessage(envelope) - + err = r.handleEvidenceMessage(ctx, envelope) default: - err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) + err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", envelope.ChannelID, envelope) } - return err + return } // processEvidenceCh implements a blocking event loop where we listen for p2p // Envelope messages from the evidenceCh. -func (r *Reactor) processEvidenceCh() { - defer r.evidenceCh.Close() - - for { - select { - case envelope := <-r.evidenceCh.In: - if err := r.handleMessage(r.evidenceCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.evidenceCh.ID, "envelope", envelope, "err", err) - r.evidenceCh.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } +func (r *Reactor) processEvidenceCh(ctx context.Context, evidenceCh *p2p.Channel) { + iter := evidenceCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, envelope); err != nil { + r.logger.Error("failed to process message", "ch_id", envelope.ChannelID, "envelope", envelope, "err", err) + if serr := evidenceCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return } - - case <-r.closeCh: - r.Logger.Debug("stopped listening on evidence channel; closing...") - return } } } @@ -224,8 +186,8 @@ func (r *Reactor) processEvidenceCh() { // connects/disconnects frequently from the broadcasting peer(s). // // REF: https://github.com/tendermint/tendermint/issues/4727 -func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) +func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, evidenceCh *p2p.Channel) { + r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) r.mtx.Lock() defer r.mtx.Unlock() @@ -234,8 +196,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { case p2p.PeerStatusUp: // Do not allow starting new evidence broadcast loops after reactor shutdown // has been initiated. This can happen after we've manually closed all - // peer broadcast loops and closed r.closeCh, but the router still sends - // in-flight peer updates. + // peer broadcast loops, but the router still sends in-flight peer updates. if !r.IsRunning() { return } @@ -246,11 +207,9 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // safely, and finally start the goroutine to broadcast evidence to that peer. _, ok := r.peerRoutines[peerUpdate.NodeID] if !ok { - closer := tmsync.NewCloser() - - r.peerRoutines[peerUpdate.NodeID] = closer - r.peerWG.Add(1) - go r.broadcastEvidenceLoop(peerUpdate.NodeID, closer) + pctx, pcancel := context.WithCancel(ctx) + r.peerRoutines[peerUpdate.NodeID] = pcancel + go r.broadcastEvidenceLoop(pctx, peerUpdate.NodeID, evidenceCh) } case p2p.PeerStatusDown: @@ -260,7 +219,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // from the map of peer evidence broadcasting goroutines. closer, ok := r.peerRoutines[peerUpdate.NodeID] if ok { - closer.Close() + closer() } } } @@ -268,16 +227,12 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates() { - defer r.peerUpdates.Close() - +func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, evidenceCh *p2p.Channel) { for { select { - case peerUpdate := <-r.peerUpdates.Updates(): - r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.Logger.Debug("stopped listening on peer updates channel; closing...") + case peerUpdate := <-peerUpdates.Updates(): + r.processPeerUpdate(ctx, peerUpdate, evidenceCh) + case <-ctx.Done(): return } } @@ -294,7 +249,7 @@ func (r *Reactor) processPeerUpdates() { // that the peer has already received or may not be ready for. // // REF: https://github.com/tendermint/tendermint/issues/4727 -func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Closer) { +func (r *Reactor) broadcastEvidenceLoop(ctx context.Context, peerID types.NodeID, evidenceCh *p2p.Channel) { var next *clist.CElement defer func() { @@ -302,10 +257,8 @@ func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Clos delete(r.peerRoutines, peerID) r.mtx.Unlock() - r.peerWG.Done() - if e := recover(); e != nil { - r.Logger.Error( + r.logger.Error( "recovering from broadcasting evidence loop", "err", e, "stack", string(debug.Stack()), @@ -313,6 +266,9 @@ func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Clos } }() + timer := time.NewTimer(0) + defer timer.Stop() + for { // This happens because the CElement we were looking at got garbage // collected (removed). That is, .NextWaitChan() returned nil. So we can go @@ -324,14 +280,7 @@ func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Clos continue } - case <-closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. - return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. + case <-ctx.Done(): return } } @@ -346,30 +295,25 @@ func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Clos // and thus would not be able to process the evidence correctly. Also, the // peer may receive this piece of evidence multiple times if it added and // removed frequently from the broadcasting peer. - r.evidenceCh.Out <- p2p.Envelope{ - To: peerID, - Message: &tmproto.EvidenceList{ - Evidence: []tmproto.Evidence{*evProto}, - }, + + if err := evidenceCh.Send(ctx, p2p.Envelope{ + To: peerID, + Message: evProto, + }); err != nil { + return } - r.Logger.Debug("gossiped evidence to peer", "evidence", ev, "peer", peerID) + r.logger.Debug("gossiped evidence to peer", "evidence", ev, "peer", peerID) select { - case <-time.After(time.Second * broadcastEvidenceIntervalS): + case <-timer.C: // start from the beginning after broadcastEvidenceIntervalS seconds + timer.Reset(time.Second * broadcastEvidenceIntervalS) next = nil case <-next.NextWaitChan(): next = next.Next() - case <-closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. - return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. + case <-ctx.Done(): return } } diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index a717bc3c6d..0f2ac2b19c 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -1,24 +1,23 @@ package evidence_test import ( + "context" "encoding/hex" "math/rand" "sync" "testing" "time" + "github.com/dashevo/dashd-go/btcjson" "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - "github.com/dashevo/dashd-go/btcjson" - "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" - "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/evidence/mocks" "github.com/tendermint/tendermint/internal/p2p" @@ -47,7 +46,7 @@ type reactorTestSuite struct { numStateStores int } -func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { +func setup(ctx context.Context, t *testing.T, stateStores []sm.Store) *reactorTestSuite { t.Helper() pID := make([]byte, 16) @@ -57,23 +56,21 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { numStateStores := len(stateStores) rts := &reactorTestSuite{ numStateStores: numStateStores, - logger: log.TestingLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numStateStores}), + logger: log.NewNopLogger().With("testCase", t.Name()), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numStateStores}), reactors: make(map[types.NodeID]*evidence.Reactor, numStateStores), pools: make(map[types.NodeID]*evidence.Pool, numStateStores), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numStateStores), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numStateStores), } - chDesc := p2p.ChannelDescriptor{ID: byte(evidence.EvidenceChannel)} - rts.evidenceChannels = rts.network.MakeChannelsNoCleanup(t, - chDesc, - new(tmproto.EvidenceList), - int(chBuf)) + chDesc := &p2p.ChannelDescriptor{ID: evidence.EvidenceChannel, MessageType: new(tmproto.Evidence)} + rts.evidenceChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc) require.Len(t, rts.network.RandomNode().PeerManager.Peers(), 0) idx := 0 evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) + for nodeID := range rts.network.Nodes { logger := rts.logger.With("validator", idx) evidenceDB := dbm.NewMemDB() @@ -85,21 +82,32 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { } return nil }) - rts.pools[nodeID], err = evidence.NewPool(logger, evidenceDB, stateStores[idx], blockStore) + eventBus := eventbus.NewDefault(logger) + err = eventBus.Start(ctx) + require.NoError(t, err) + + rts.pools[nodeID] = evidence.NewPool(logger, evidenceDB, stateStores[idx], blockStore, evidence.NopMetrics(), eventBus) + startPool(t, rts.pools[nodeID], stateStores[idx]) require.NoError(t, err) rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) - rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) - rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID]) + pu := p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) + rts.peerUpdates[nodeID] = pu + rts.network.Nodes[nodeID].PeerManager.Register(ctx, pu) rts.nodes = append(rts.nodes, rts.network.Nodes[nodeID]) - rts.reactors[nodeID] = evidence.NewReactor(logger, - rts.evidenceChannels[nodeID], - rts.peerUpdates[nodeID], + chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + return rts.evidenceChannels[nodeID], nil + } + + rts.reactors[nodeID] = evidence.NewReactor( + logger, + chCreator, + func(ctx context.Context) *p2p.PeerUpdates { return pu }, rts.pools[nodeID]) - require.NoError(t, rts.reactors[nodeID].Start()) + require.NoError(t, rts.reactors[nodeID].Start(ctx)) require.True(t, rts.reactors[nodeID].IsRunning()) idx++ @@ -108,19 +116,20 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { t.Cleanup(func() { for _, r := range rts.reactors { if r.IsRunning() { - require.NoError(t, r.Stop()) + r.Stop() + r.Wait() require.False(t, r.IsRunning()) } } - leaktest.Check(t) }) + t.Cleanup(leaktest.Check(t)) return rts } -func (rts *reactorTestSuite) start(t *testing.T) { - rts.network.Start(t) +func (rts *reactorTestSuite) start(ctx context.Context, t *testing.T) { + rts.network.Start(ctx, t) require.Len(t, rts.network.RandomNode().PeerManager.Peers(), rts.numStateStores-1, @@ -192,22 +201,8 @@ func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.Evidence wg.Wait() } -func (rts *reactorTestSuite) assertEvidenceChannelsEmpty(t *testing.T) { - t.Helper() - - for id, r := range rts.reactors { - require.NoError(t, r.Stop(), "stopping reactor #%s", id) - r.Wait() - require.False(t, r.IsRunning(), "reactor #%d did not stop", id) - - } - - for id, ech := range rts.evidenceChannels { - require.Empty(t, ech.Out, "checking channel #%q", id) - } -} - func createEvidenceList( + ctx context.Context, t *testing.T, pool *evidence.Pool, val types.PrivValidator, @@ -220,6 +215,7 @@ func createEvidenceList( vals := pool.State().Validators for i := 0; i < numEvidence; i++ { ev, err := types.NewMockDuplicateVoteEvidenceWithValidator( + ctx, int64(i+1), time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC), val, @@ -228,8 +224,8 @@ func createEvidenceList( vals.QuorumHash, ) require.NoError(t, err) - - require.NoError(t, pool.AddEvidence(ev), + err = pool.AddEvidence(ctx, ev) + require.NoError(t, err, "adding evidence it#%d of %d to pool with height %d", i, numEvidence, pool.State().LastBlockHeight) evList[i] = ev @@ -239,32 +235,35 @@ func createEvidenceList( } func TestReactorMultiDisconnect(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + quorumHash := crypto.RandQuorumHash() val := types.NewMockPVForQuorum(quorumHash) height := int64(numEvidence) + 10 - stateDB1 := initializeValidatorState(t, val, height, btcjson.LLMQType_5_60, quorumHash) - stateDB2 := initializeValidatorState(t, val, height, btcjson.LLMQType_5_60, quorumHash) + stateDB1 := initializeValidatorState(ctx, t, val, height, btcjson.LLMQType_5_60, quorumHash) + stateDB2 := initializeValidatorState(ctx, t, val, height, btcjson.LLMQType_5_60, quorumHash) - rts := setup(t, []sm.Store{stateDB1, stateDB2}, 20) + rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}) primary := rts.nodes[0] secondary := rts.nodes[1] - _ = createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence) + _ = createEvidenceList(ctx, t, rts.pools[primary.NodeID], val, numEvidence) require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusDown) - rts.start(t) + rts.start(ctx, t) require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusUp) // Ensure "disconnecting" the secondary peer from the primary more than once // is handled gracefully. - primary.PeerManager.Disconnected(secondary.NodeID) + primary.PeerManager.Disconnected(ctx, secondary.NodeID) require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusDown) _, err := primary.PeerManager.TryEvictNext() require.NoError(t, err) - primary.PeerManager.Disconnected(secondary.NodeID) + primary.PeerManager.Disconnected(ctx, secondary.NodeID) require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusDown) require.Equal(t, secondary.PeerManager.Status(primary.NodeID), p2p.PeerStatusUp) @@ -277,6 +276,9 @@ func TestReactorMultiDisconnect(t *testing.T) { func TestReactorBroadcastEvidence(t *testing.T) { numPeers := 7 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // create a stateDB for all test suites (nodes) stateDBs := make([]sm.Store, numPeers) quorumHash := crypto.RandQuorumHash() @@ -286,16 +288,18 @@ func TestReactorBroadcastEvidence(t *testing.T) { // evidence for. height := int64(numEvidence) + 10 for i := 0; i < numPeers; i++ { - stateDBs[i] = initializeValidatorState(t, val, height, btcjson.LLMQType_5_60, quorumHash) + stateDBs[i] = initializeValidatorState(ctx, t, val, height, btcjson.LLMQType_5_60, quorumHash) } - rts := setup(t, stateDBs, 0) - rts.start(t) + rts := setup(ctx, t, stateDBs) + + rts.start(ctx, t) // Create a series of fixtures where each suite contains a reactor and // evidence pool. In addition, we mark a primary suite and the rest are // secondaries where each secondary is added as a peer via a PeerUpdate to the // primary. As a result, the primary will gossip all evidence to each secondary. + primary := rts.network.RandomNode() secondaries := make([]*p2ptest.Node, 0, len(rts.network.NodeIDs())-1) secondaryIDs := make([]types.NodeID, 0, cap(secondaries)) @@ -308,7 +312,7 @@ func TestReactorBroadcastEvidence(t *testing.T) { secondaryIDs = append(secondaryIDs, id) } - evList := createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence) + evList := createEvidenceList(ctx, t, rts.pools[primary.NodeID], val, numEvidence) // Add each secondary suite (node) as a peer to the primary suite (node). This // will cause the primary to gossip all evidence to the secondaries. @@ -327,7 +331,6 @@ func TestReactorBroadcastEvidence(t *testing.T) { require.Equal(t, numEvidence, int(pool.Size())) } - rts.assertEvidenceChannelsEmpty(t) } // TestReactorSelectiveBroadcast tests a context where we have two reactors @@ -339,20 +342,23 @@ func TestReactorBroadcastEvidence_Lagging(t *testing.T) { height1 := int64(numEvidence) + 10 height2 := int64(numEvidence) / 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // stateDB1 is ahead of stateDB2, where stateDB1 has all heights (1-20) and // stateDB2 only has heights 1-5. - stateDB1 := initializeValidatorState(t, val, height1, btcjson.LLMQType_5_60, quorumHash) - stateDB2 := initializeValidatorState(t, val, height2, btcjson.LLMQType_5_60, quorumHash) + stateDB1 := initializeValidatorState(ctx, t, val, height1, btcjson.LLMQType_5_60, quorumHash) + stateDB2 := initializeValidatorState(ctx, t, val, height2, btcjson.LLMQType_5_60, quorumHash) - rts := setup(t, []sm.Store{stateDB1, stateDB2}, 100) - rts.start(t) + rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}) + rts.start(ctx, t) primary := rts.nodes[0] secondary := rts.nodes[1] // Send a list of valid evidence to the first reactor's, the one that is ahead, // evidence pool. - evList := createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence) + evList := createEvidenceList(ctx, t, rts.pools[primary.NodeID], val, numEvidence) // Add each secondary suite (node) as a peer to the primary suite (node). This // will cause the primary to gossip all evidence to the secondaries. @@ -366,8 +372,6 @@ func TestReactorBroadcastEvidence_Lagging(t *testing.T) { require.Equal(t, numEvidence, int(rts.pools[primary.NodeID].Size())) require.Equal(t, int(height2), int(rts.pools[secondary.NodeID].Size())) - - rts.assertEvidenceChannelsEmpty(t) } func TestReactorBroadcastEvidence_Pending(t *testing.T) { @@ -375,25 +379,29 @@ func TestReactorBroadcastEvidence_Pending(t *testing.T) { val := types.NewMockPVForQuorum(quorumHash) height := int64(10) - stateDB1 := initializeValidatorState(t, val, height, btcjson.LLMQType_5_60, quorumHash) - stateDB2 := initializeValidatorState(t, val, height, btcjson.LLMQType_5_60, quorumHash) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - rts := setup(t, []sm.Store{stateDB1, stateDB2}, 100) + stateDB1 := initializeValidatorState(ctx, t, val, height, btcjson.LLMQType_5_60, quorumHash) + stateDB2 := initializeValidatorState(ctx, t, val, height, btcjson.LLMQType_5_60, quorumHash) + + rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}) primary := rts.nodes[0] secondary := rts.nodes[1] - evList := createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence) + evList := createEvidenceList(ctx, t, rts.pools[primary.NodeID], val, numEvidence) // Manually add half the evidence to the secondary which will mark them as // pending. for i := 0; i < numEvidence/2; i++ { - require.NoError(t, rts.pools[secondary.NodeID].AddEvidence(evList[i])) + err := rts.pools[secondary.NodeID].AddEvidence(ctx, evList[i]) + require.NoError(t, err) } // the secondary should have half the evidence as pending require.Equal(t, numEvidence/2, int(rts.pools[secondary.NodeID].Size())) - rts.start(t) + rts.start(ctx, t) // The secondary reactor should have received all the evidence ignoring the // already pending evidence. @@ -413,21 +421,25 @@ func TestReactorBroadcastEvidence_Committed(t *testing.T) { val := types.NewMockPVForQuorum(quorumHash) height := int64(10) - stateDB1 := initializeValidatorState(t, val, height, btcjson.LLMQType_5_60, quorumHash) - stateDB2 := initializeValidatorState(t, val, height, btcjson.LLMQType_5_60, quorumHash) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - rts := setup(t, []sm.Store{stateDB1, stateDB2}, 0) + stateDB1 := initializeValidatorState(ctx, t, val, height, btcjson.LLMQType_5_60, quorumHash) + stateDB2 := initializeValidatorState(ctx, t, val, height, btcjson.LLMQType_5_60, quorumHash) + + rts := setup(ctx, t, []sm.Store{stateDB1, stateDB2}) primary := rts.nodes[0] secondary := rts.nodes[1] // add all evidence to the primary reactor - evList := createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence) + evList := createEvidenceList(ctx, t, rts.pools[primary.NodeID], val, numEvidence) // Manually add half the evidence to the secondary which will mark them as // pending. for i := 0; i < numEvidence/2; i++ { - require.NoError(t, rts.pools[secondary.NodeID].AddEvidence(evList[i])) + err := rts.pools[secondary.NodeID].AddEvidence(ctx, evList[i]) + require.NoError(t, err) } // the secondary should have half the evidence as pending @@ -438,13 +450,13 @@ func TestReactorBroadcastEvidence_Committed(t *testing.T) { // update the secondary's pool such that all pending evidence is committed state.LastBlockHeight++ - rts.pools[secondary.NodeID].Update(state, evList[:numEvidence/2]) + rts.pools[secondary.NodeID].Update(ctx, state, evList[:numEvidence/2]) // the secondary should have half the evidence as committed require.Equal(t, 0, int(rts.pools[secondary.NodeID].Size())) // start the network and ensure it's configured - rts.start(t) + rts.start(ctx, t) // The secondary reactor should have received all the evidence ignoring the // already committed evidence. @@ -465,17 +477,20 @@ func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) { quorumHash := crypto.RandQuorumHash() val := types.NewMockPVForQuorum(quorumHash) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // We need all validators saved for heights at least as high as we have // evidence for. height := int64(numEvidence) + 10 for i := 0; i < numPeers; i++ { - stateDBs[i] = initializeValidatorState(t, val, height, btcjson.LLMQType_5_60, quorumHash) + stateDBs[i] = initializeValidatorState(ctx, t, val, height, btcjson.LLMQType_5_60, quorumHash) } - rts := setup(t, stateDBs, 0) - rts.start(t) + rts := setup(ctx, t, stateDBs) + rts.start(ctx, t) - evList := createEvidenceList(t, rts.pools[rts.network.RandomNode().NodeID], val, numEvidence) + evList := createEvidenceList(ctx, t, rts.pools[rts.network.RandomNode().NodeID], val, numEvidence) // every suite (reactor) connects to every other suite (reactor) for outerID, outerChan := range rts.peerChans { @@ -498,7 +513,7 @@ func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) { // commit state so we do not continue to repeat gossiping the same evidence state := pool.State() state.LastBlockHeight++ - pool.Update(state, evList) + pool.Update(ctx, state, evList) } } @@ -509,10 +524,10 @@ func TestEvidenceListSerialization(t *testing.T) { Height: 3, Round: 2, BlockID: types.BlockID{ - Hash: tmhash.Sum([]byte("blockID_hash")), + Hash: crypto.Checksum([]byte("blockID_hash")), PartSetHeader: types.PartSetHeader{ Total: 1000000, - Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + Hash: crypto.Checksum([]byte("blockID_part_set_header_hash")), }, }, ValidatorProTxHash: crypto.ProTxHashFromSeedBytes([]byte("validator_pro_tx_hash")), diff --git a/internal/evidence/verify.go b/internal/evidence/verify.go index 66b8908d19..5e2aac5125 100644 --- a/internal/evidence/verify.go +++ b/internal/evidence/verify.go @@ -2,6 +2,7 @@ package evidence import ( "bytes" + "context" "fmt" "github.com/tendermint/tendermint/types" @@ -18,7 +19,7 @@ import ( // set for. In these cases, we do not return a ErrInvalidEvidence as not to have // the sending peer disconnect. All other errors are treated as invalid evidence // (i.e. ErrInvalidEvidence). -func (evpool *Pool) verify(evidence types.Evidence) error { +func (evpool *Pool) verify(ctx context.Context, evidence types.Evidence) error { var ( state = evpool.State() height = state.LastBlockHeight @@ -71,7 +72,7 @@ func (evpool *Pool) verify(evidence types.Evidence) error { if err := ev.ValidateABCI(val, valSet, evTime); err != nil { ev.GenerateABCI(val, valSet, evTime) - if addErr := evpool.addPendingEvidence(ev); addErr != nil { + if addErr := evpool.addPendingEvidence(ctx, ev); addErr != nil { evpool.logger.Error("adding pending duplicate vote evidence failed", "err", addErr) } return err diff --git a/internal/evidence/verify_test.go b/internal/evidence/verify_test.go index 560b6cb047..03160172ed 100644 --- a/internal/evidence/verify_test.go +++ b/internal/evidence/verify_test.go @@ -11,7 +11,7 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/evidence/mocks" sm "github.com/tendermint/tendermint/internal/state" @@ -28,6 +28,11 @@ type voteData struct { } func TestVerifyDuplicateVoteEvidence(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewTestingLogger(t) + quorumType := crypto.SmallQuorumType() quorumHash := crypto.RandQuorumHash() val := types.NewMockPVForQuorum(quorumHash) @@ -44,13 +49,13 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { stateID := types.RandStateID() - vote1 := makeVote(t, val, chainID, 0, 10, 2, 1, blockID, quorumType, quorumHash, stateID) + vote1 := makeVote(ctx, t, val, chainID, 0, 10, 2, 1, blockID, quorumType, quorumHash, stateID) v1 := vote1.ToProto() - err := val.SignVote(context.Background(), chainID, quorumType, quorumHash, v1, stateID, nil) + err := val.SignVote(ctx, chainID, quorumType, quorumHash, v1, stateID, nil) require.NoError(t, err) - badVote := makeVote(t, val, chainID, 0, 10, 2, 1, blockID, quorumType, quorumHash, stateID) + badVote := makeVote(ctx, t, val, chainID, 0, 10, 2, 1, blockID, quorumType, quorumHash, stateID) bv := badVote.ToProto() - err = val2.SignVote(context.Background(), chainID, crypto.SmallQuorumType(), quorumHash, bv, stateID, nil) + err = val2.SignVote(ctx, chainID, crypto.SmallQuorumType(), quorumHash, bv, stateID, nil) require.NoError(t, err) vote1.BlockSignature = v1.BlockSignature @@ -59,15 +64,17 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { badVote.StateSignature = bv.StateSignature cases := []voteData{ - {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID2, quorumType, quorumHash, stateID), true}, // different block ids - {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID3, quorumType, quorumHash, stateID), true}, - {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID4, quorumType, quorumHash, stateID), true}, - {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID, quorumType, quorumHash, stateID), false}, // wrong block id - {vote1, makeVote(t, val, "mychain2", 0, 10, 2, 1, blockID2, quorumType, quorumHash, stateID), false}, // wrong chain id - {vote1, makeVote(t, val, chainID, 0, 11, 2, 1, blockID2, quorumType, quorumHash, stateID), false}, // wrong height - {vote1, makeVote(t, val, chainID, 0, 10, 3, 1, blockID2, quorumType, quorumHash, stateID), false}, // wrong round - {vote1, makeVote(t, val, chainID, 0, 10, 2, 2, blockID2, quorumType, quorumHash, stateID), false}, // wrong step - {vote1, makeVote(t, val2, chainID, 0, 10, 2, 1, blockID2, quorumType, quorumHash, stateID), false}, // wrong validator + {vote1, makeVote(ctx, t, val, chainID, 0, 10, 2, 1, blockID2, quorumType, quorumHash, stateID), true}, // different block ids + {vote1, makeVote(ctx, t, val, chainID, 0, 10, 2, 1, blockID3, quorumType, quorumHash, stateID), true}, + {vote1, makeVote(ctx, t, val, chainID, 0, 10, 2, 1, blockID4, quorumType, quorumHash, stateID), true}, + {vote1, makeVote(ctx, t, val, chainID, 0, 10, 2, 1, blockID, quorumType, quorumHash, stateID), false}, // wrong block id + {vote1, makeVote(ctx, t, val, "mychain2", 0, 10, 2, 1, blockID2, quorumType, quorumHash, stateID), false}, // wrong chain id + {vote1, makeVote(ctx, t, val, chainID, 0, 11, 2, 1, blockID2, quorumType, quorumHash, stateID), false}, // wrong height + {vote1, makeVote(ctx, t, val, chainID, 0, 10, 3, 1, blockID2, quorumType, quorumHash, stateID), false}, // wrong round + {vote1, makeVote(ctx, t, val, chainID, 0, 10, 2, 2, blockID2, quorumType, quorumHash, stateID), false}, // wrong step + {vote1, makeVote(ctx, t, val2, chainID, 0, 10, 2, 1, blockID2, quorumType, quorumHash, stateID), false}, // wrong validator + // a different vote time doesn't matter + {vote1, makeVote(ctx, t, val, chainID, 0, 10, 2, 1, blockID2, quorumType, quorumHash, stateID), true}, {vote1, badVote, false}, // signed by wrong key } @@ -88,15 +95,15 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { } // create good evidence and correct validator power - goodEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(10, defaultEvidenceTime, val, chainID, crypto.SmallQuorumType(), quorumHash) + goodEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, 10, defaultEvidenceTime, val, chainID, crypto.SmallQuorumType(), quorumHash) require.NoError(t, err) goodEv.ValidatorPower = types.DefaultDashVotingPower goodEv.TotalVotingPower = types.DefaultDashVotingPower - badEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(10, defaultEvidenceTime, val, chainID, crypto.SmallQuorumType(), quorumHash) + badEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, 10, defaultEvidenceTime, val, chainID, crypto.SmallQuorumType(), quorumHash) require.NoError(t, err) badEv.ValidatorPower = types.DefaultDashVotingPower + 1 badEv.TotalVotingPower = types.DefaultDashVotingPower - badTimeEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(10, defaultEvidenceTime.Add(1*time.Minute), val, chainID, crypto.SmallQuorumType(), quorumHash) + badTimeEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, 10, defaultEvidenceTime.Add(1*time.Minute), val, chainID, crypto.SmallQuorumType(), quorumHash) require.NoError(t, err) badTimeEv.ValidatorPower = types.DefaultDashVotingPower badTimeEv.TotalVotingPower = types.DefaultDashVotingPower @@ -112,28 +119,32 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { blockStore := &mocks.BlockStore{} blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}}) - pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore) - require.NoError(t, err) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + pool := evidence.NewPool(logger, dbm.NewMemDB(), stateStore, blockStore, evidence.NopMetrics(), eventBus) + startPool(t, pool, stateStore) evList := types.EvidenceList{goodEv} - err = pool.CheckEvidence(evList) + err = pool.CheckEvidence(ctx, evList) assert.NoError(t, err) // evidence with a different validator power should fail evList = types.EvidenceList{badEv} - err = pool.CheckEvidence(evList) + err = pool.CheckEvidence(ctx, evList) assert.Error(t, err) // evidence with a different timestamp should fail evList = types.EvidenceList{badTimeEv} - err = pool.CheckEvidence(evList) + err = pool.CheckEvidence(ctx, evList) assert.Error(t, err) } func makeVote( + ctx context.Context, t *testing.T, val types.PrivValidator, chainID string, valIndex int32, height int64, round int32, step int, blockID types.BlockID, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, stateID types.StateID) *types.Vote { - proTxHash, err := val.GetProTxHash(context.Background()) + proTxHash, err := val.GetProTxHash(ctx) require.NoError(t, err) v := &types.Vote{ ValidatorProTxHash: proTxHash, @@ -145,18 +156,16 @@ func makeVote( } vpb := v.ToProto() - err = val.SignVote(context.Background(), chainID, quorumType, quorumHash, vpb, stateID, nil) - if err != nil { - panic(err) - } + err = val.SignVote(ctx, chainID, quorumType, quorumHash, vpb, stateID, nil) + require.NoError(t, err) v.BlockSignature = vpb.BlockSignature return v } func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.BlockID { var ( - h = make([]byte, tmhash.Size) - psH = make([]byte, tmhash.Size) + h = make([]byte, crypto.HashSize) + psH = make([]byte, crypto.HashSize) ) copy(h, hash) copy(psH, partSetHash) diff --git a/internal/inspect/inspect.go b/internal/inspect/inspect.go index 3f49866b37..6381ea888a 100644 --- a/internal/inspect/inspect.go +++ b/internal/inspect/inspect.go @@ -5,8 +5,10 @@ import ( "errors" "fmt" "net" + "net/http" "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/inspect/rpc" rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/internal/state" @@ -32,7 +34,7 @@ type Inspector struct { config *config.RPCConfig indexerService *indexer.Service - eventBus *types.EventBus + eventBus *eventbus.EventBus logger log.Logger } @@ -41,20 +43,18 @@ type Inspector struct { // The sinks are used to enable block and transaction querying via the RPC server. // The caller is responsible for starting and stopping the Inspector service. func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, es []indexer.EventSink, logger log.Logger) *Inspector { - routes := rpc.Routes(*cfg, ss, bs, es, logger) - eb := types.NewEventBus() - eb.SetLogger(logger.With("module", "events")) - is := indexer.NewService(indexer.ServiceArgs{ - Sinks: es, - EventBus: eb, - Logger: logger.With("module", "txindex"), - }) + eb := eventbus.NewDefault(logger.With("module", "events")) + return &Inspector{ - routes: routes, - config: cfg, - logger: logger, - eventBus: eb, - indexerService: is, + routes: rpc.Routes(*cfg, ss, bs, es, logger), + config: cfg, + logger: logger, + eventBus: eb, + indexerService: indexer.NewService(indexer.ServiceArgs{ + Sinks: es, + EventBus: eb, + Logger: logger.With("module", "txindex"), + }), } } @@ -84,26 +84,18 @@ func NewFromConfig(logger log.Logger, cfg *config.Config) (*Inspector, error) { // Run starts the Inspector servers and blocks until the servers shut down. The passed // in context is used to control the lifecycle of the servers. func (ins *Inspector) Run(ctx context.Context) error { - err := ins.eventBus.Start() + err := ins.eventBus.Start(ctx) if err != nil { return fmt.Errorf("error starting event bus: %s", err) } - defer func() { - err := ins.eventBus.Stop() - if err != nil { - ins.logger.Error("event bus stopped with error", "err", err) - } - }() - err = ins.indexerService.Start() + defer ins.eventBus.Wait() + + err = ins.indexerService.Start(ctx) if err != nil { return fmt.Errorf("error starting indexer service: %s", err) } - defer func() { - err := ins.indexerService.Stop() - if err != nil { - ins.logger.Error("indexer service stopped with error", "err", err) - } - }() + defer ins.indexerService.Wait() + return startRPCServers(ctx, ins.config, ins.logger, ins.routes) } @@ -126,7 +118,7 @@ func startRPCServers(ctx context.Context, cfg *config.RPCConfig, logger log.Logg logger.Info("RPC HTTPS server starting", "address", listenerAddr, "certfile", certFile, "keyfile", keyFile) err := server.ListenAndServeTLS(tctx, certFile, keyFile) - if !errors.Is(err, net.ErrClosed) { + if !errors.Is(err, net.ErrClosed) && !errors.Is(err, http.ErrServerClosed) { return err } logger.Info("RPC HTTPS server stopped", "address", listenerAddr) @@ -137,7 +129,7 @@ func startRPCServers(ctx context.Context, cfg *config.RPCConfig, logger log.Logg g.Go(func() error { logger.Info("RPC HTTP server starting", "address", listenerAddr) err := server.ListenAndServe(tctx) - if !errors.Is(err, net.ErrClosed) { + if !errors.Is(err, net.ErrClosed) && !errors.Is(err, http.ErrServerClosed) { return err } logger.Info("RPC HTTP server stopped", "address", listenerAddr) diff --git a/internal/inspect/inspect_test.go b/internal/inspect/inspect_test.go index 6405e76fe6..4085dcb805 100644 --- a/internal/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -5,6 +5,7 @@ import ( "fmt" "net" "os" + "runtime" "strings" "sync" "testing" @@ -13,23 +14,24 @@ import ( "github.com/fortytw2/leaktest" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + abcitypes "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/inspect" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" indexermocks "github.com/tendermint/tendermint/internal/state/indexer/mocks" statemocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/proto/tendermint/state" httpclient "github.com/tendermint/tendermint/rpc/client/http" "github.com/tendermint/tendermint/types" ) func TestInspectConstructor(t *testing.T) { - cfg, err := config.ResetTestRoot("test") + cfg, err := config.ResetTestRoot(t.TempDir(), "test") require.NoError(t, err) - testLogger := log.TestingLogger() + testLogger := log.NewNopLogger() t.Cleanup(leaktest.Check(t)) defer func() { _ = os.RemoveAll(cfg.RootDir) }() t.Run("from config", func(t *testing.T) { @@ -42,23 +44,24 @@ func TestInspectConstructor(t *testing.T) { } func TestInspectRun(t *testing.T) { - cfg, err := config.ResetTestRoot("test") + cfg, err := config.ResetTestRoot(t.TempDir(), "test") require.NoError(t, err) - testLogger := log.TestingLogger() + testLogger := log.NewNopLogger() t.Cleanup(leaktest.Check(t)) defer func() { _ = os.RemoveAll(cfg.RootDir) }() t.Run("from config", func(t *testing.T) { logger := testLogger.With(t.Name()) d, err := inspect.NewFromConfig(logger, cfg) require.NoError(t, err) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) stoppedWG := &sync.WaitGroup{} stoppedWG.Add(1) go func() { + defer stoppedWG.Done() require.NoError(t, d.Run(ctx)) - stoppedWG.Done() }() + time.Sleep(100 * time.Millisecond) cancel() stoppedWG.Wait() }) @@ -79,28 +82,26 @@ func TestBlock(t *testing.T) { blockStoreMock.On("LoadBlock", testHeight).Return(testBlock) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() + l := log.NewNopLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} wg.Add(1) - startedWG := &sync.WaitGroup{} - startedWG.Add(1) go func() { - startedWG.Done() defer wg.Done() require.NoError(t, d.Run(ctx)) }() // FIXME: used to induce context switch. // Determine more deterministic method for prompting a context switch - startedWG.Wait() + runtime.Gosched() requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - resultBlock, err := cli.Block(context.Background(), &testHeight) + resultBlock, err := cli.Block(ctx, &testHeight) require.NoError(t, err) require.Equal(t, testBlock.Height, resultBlock.Block.Height) require.Equal(t, testBlock.LastCommitHash, resultBlock.Block.LastCommitHash) @@ -131,7 +132,7 @@ func TestTxSearch(t *testing.T) { Return([]*abcitypes.TxResult{testTxResult}, nil) rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() + l := log.NewNopLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} @@ -152,7 +153,7 @@ func TestTxSearch(t *testing.T) { require.NoError(t, err) var page = 1 - resultTxSearch, err := cli.TxSearch(context.Background(), testQuery, false, &page, &page, "") + resultTxSearch, err := cli.TxSearch(ctx, testQuery, false, &page, &page, "") require.NoError(t, err) require.Len(t, resultTxSearch.Txs, 1) require.Equal(t, types.Tx(testTx), resultTxSearch.Txs[0].Tx) @@ -178,7 +179,7 @@ func TestTx(t *testing.T) { }, nil) rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() + l := log.NewNopLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) ctx, cancel := context.WithCancel(context.Background()) wg := &sync.WaitGroup{} @@ -198,7 +199,7 @@ func TestTx(t *testing.T) { cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.Tx(context.Background(), testHash, false) + res, err := cli.Tx(ctx, testHash, false) require.NoError(t, err) require.Equal(t, types.Tx(testTx), res.Tx) @@ -223,8 +224,10 @@ func TestConsensusParams(t *testing.T) { }, nil) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() + l := log.NewNopLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) ctx, cancel := context.WithCancel(context.Background()) @@ -244,7 +247,7 @@ func TestConsensusParams(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - params, err := cli.ConsensusParams(context.Background(), &testHeight) + params, err := cli.ConsensusParams(ctx, &testHeight) require.NoError(t, err) require.Equal(t, params.ConsensusParams.Block.MaxGas, testMaxGas) @@ -261,21 +264,23 @@ func TestBlockResults(t *testing.T) { stateStoreMock := &statemocks.Store{} // tmstate "github.com/tendermint/tendermint/proto/tendermint/state" stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{ - DeliverTxs: []*abcitypes.ResponseDeliverTx{ - { - GasUsed: testGasUsed, + FinalizeBlock: &abcitypes.ResponseFinalizeBlock{ + TxResults: []*abcitypes.ExecTxResult{ + { + GasUsed: testGasUsed, + }, }, }, - EndBlock: &abcitypes.ResponseEndBlock{}, - BeginBlock: &abcitypes.ResponseBeginBlock{}, }, nil) blockStoreMock := &statemocks.BlockStore{} blockStoreMock.On("Base").Return(int64(0)) blockStoreMock.On("Height").Return(testHeight) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() + l := log.NewNopLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) ctx, cancel := context.WithCancel(context.Background()) @@ -295,7 +300,7 @@ func TestBlockResults(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.BlockResults(context.Background(), &testHeight) + res, err := cli.BlockResults(ctx, &testHeight) require.NoError(t, err) require.Equal(t, res.TotalGasUsed, testGasUsed) @@ -320,8 +325,10 @@ func TestCommit(t *testing.T) { }, nil) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() + l := log.NewNopLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) ctx, cancel := context.WithCancel(context.Background()) @@ -341,7 +348,7 @@ func TestCommit(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.Commit(context.Background(), &testHeight) + res, err := cli.Commit(ctx, &testHeight) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, res.SignedHeader.Commit.Round, testRound) @@ -372,8 +379,10 @@ func TestBlockByHash(t *testing.T) { blockStoreMock.On("LoadBlockByHash", testHash).Return(testBlock, nil) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() + l := log.NewNopLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) ctx, cancel := context.WithCancel(context.Background()) @@ -393,7 +402,7 @@ func TestBlockByHash(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.BlockByHash(context.Background(), testHash) + res, err := cli.BlockByHash(ctx, testHash) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, []byte(res.BlockID.Hash), testHash) @@ -423,8 +432,10 @@ func TestBlockchain(t *testing.T) { }) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() + l := log.NewNopLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) ctx, cancel := context.WithCancel(context.Background()) @@ -444,7 +455,7 @@ func TestBlockchain(t *testing.T) { requireConnect(t, rpcConfig.ListenAddress, 20) cli, err := httpclient.New(rpcConfig.ListenAddress) require.NoError(t, err) - res, err := cli.BlockchainInfo(context.Background(), 0, 100) + res, err := cli.BlockchainInfo(ctx, 0, 100) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, testBlockHash, []byte(res.BlockMetas[0].BlockID.Hash)) @@ -474,8 +485,10 @@ func TestValidators(t *testing.T) { blockStoreMock.On("Base").Return(int64(0)) eventSinkMock := &indexermocks.EventSink{} eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.EventSinkType("Mock")) + rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() + l := log.NewNopLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) ctx, cancel := context.WithCancel(context.Background()) @@ -499,7 +512,7 @@ func TestValidators(t *testing.T) { testPage := 1 testPerPage := 100 requestQuorumInfo := true - res, err := cli.Validators(context.Background(), &testHeight, &testPage, &testPerPage, &requestQuorumInfo) + res, err := cli.Validators(ctx, &testHeight, &testPage, &testPerPage, &requestQuorumInfo) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, testVotingPower, res.Validators[0].VotingPower) @@ -535,7 +548,7 @@ func TestBlockSearch(t *testing.T) { mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })). Return([]int64{testHeight}, nil) rpcConfig := config.TestRPCConfig() - l := log.TestingLogger() + l := log.NewNopLogger() d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) ctx, cancel := context.WithCancel(context.Background()) @@ -559,7 +572,7 @@ func TestBlockSearch(t *testing.T) { testPage := 1 testPerPage := 100 testOrderBy := "desc" - res, err := cli.BlockSearch(context.Background(), testQuery, &testPage, &testPerPage, testOrderBy) + res, err := cli.BlockSearch(ctx, testQuery, &testPage, &testPerPage, testOrderBy) require.NoError(t, err) require.NotNil(t, res) require.Equal(t, testBlockHash, []byte(res.Blocks[0].BlockID.Hash)) diff --git a/internal/inspect/rpc/rpc.go b/internal/inspect/rpc/rpc.go index 43a0fe1242..00c3e52efa 100644 --- a/internal/inspect/rpc/rpc.go +++ b/internal/inspect/rpc/rpc.go @@ -8,14 +8,12 @@ import ( "github.com/rs/cors" "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/pubsub" "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/rpc/jsonrpc/server" - "github.com/tendermint/tendermint/types" ) // Server defines parameters for running an Inspector rpc server. @@ -26,27 +24,30 @@ type Server struct { Config *config.RPCConfig } +type eventBusUnsubscriber interface { + UnsubscribeAll(ctx context.Context, subscriber string) error +} + // Routes returns the set of routes used by the Inspector server. func Routes(cfg config.RPCConfig, s state.Store, bs state.BlockStore, es []indexer.EventSink, logger log.Logger) core.RoutesMap { env := &core.Environment{ - Config: cfg, - EventSinks: es, - StateStore: s, - BlockStore: bs, - ConsensusReactor: waitSyncCheckerImpl{}, - Logger: logger, + Config: cfg, + EventSinks: es, + StateStore: s, + BlockStore: bs, + Logger: logger, } return core.RoutesMap{ - "blockchain": server.NewRPCFunc(env.BlockchainInfo, "minHeight,maxHeight", true), - "consensus_params": server.NewRPCFunc(env.ConsensusParams, "height", true), - "block": server.NewRPCFunc(env.Block, "height", true), - "block_by_hash": server.NewRPCFunc(env.BlockByHash, "hash", true), - "block_results": server.NewRPCFunc(env.BlockResults, "height", true), - "commit": server.NewRPCFunc(env.Commit, "height", true), - "validators": server.NewRPCFunc(env.Validators, "height,page,per_page,request_quorum_info", true), - "tx": server.NewRPCFunc(env.Tx, "hash,prove", true), - "tx_search": server.NewRPCFunc(env.TxSearch, "query,prove,page,per_page,order_by", false), - "block_search": server.NewRPCFunc(env.BlockSearch, "query,page,per_page,order_by", false), + "blockchain": server.NewRPCFunc(env.BlockchainInfo), + "consensus_params": server.NewRPCFunc(env.ConsensusParams), + "block": server.NewRPCFunc(env.Block), + "block_by_hash": server.NewRPCFunc(env.BlockByHash), + "block_results": server.NewRPCFunc(env.BlockResults), + "commit": server.NewRPCFunc(env.Commit), + "validators": server.NewRPCFunc(env.Validators), + "tx": server.NewRPCFunc(env.Tx), + "tx_search": server.NewRPCFunc(env.TxSearch), + "block_search": server.NewRPCFunc(env.BlockSearch), } } @@ -57,7 +58,7 @@ func Handler(rpcConfig *config.RPCConfig, routes core.RoutesMap, logger log.Logg mux := http.NewServeMux() wmLogger := logger.With("protocol", "websocket") - var eventBus types.EventBusSubscriber + var eventBus eventBusUnsubscriber websocketDisconnectFn := func(remoteAddr string) { err := eventBus.UnsubscribeAll(context.Background(), remoteAddr) @@ -65,10 +66,9 @@ func Handler(rpcConfig *config.RPCConfig, routes core.RoutesMap, logger log.Logg wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) } } - wm := server.NewWebsocketManager(routes, + wm := server.NewWebsocketManager(logger, routes, server.OnDisconnect(websocketDisconnectFn), server.ReadLimit(rpcConfig.MaxBodyBytes)) - wm.SetLogger(wmLogger) mux.HandleFunc("/websocket", wm.WebsocketHandler) server.RegisterRPCFuncs(mux, routes, logger) @@ -89,16 +89,6 @@ func addCORSHandler(rpcConfig *config.RPCConfig, h http.Handler) http.Handler { return h } -type waitSyncCheckerImpl struct{} - -func (waitSyncCheckerImpl) WaitSync() bool { - return false -} - -func (waitSyncCheckerImpl) GetPeerState(peerID types.NodeID) (*consensus.PeerState, bool) { - return nil, false -} - // ListenAndServe listens on the address specified in srv.Addr and handles any // incoming requests over HTTP using the Inspector rpc handler specified on the server. func (srv *Server) ListenAndServe(ctx context.Context) error { @@ -110,7 +100,8 @@ func (srv *Server) ListenAndServe(ctx context.Context) error { <-ctx.Done() listener.Close() }() - return server.Serve(listener, srv.Handler, srv.Logger, serverRPCConfig(srv.Config)) + + return server.Serve(ctx, listener, srv.Handler, srv.Logger, serverRPCConfig(srv.Config)) } // ListenAndServeTLS listens on the address specified in srv.Addr. ListenAndServeTLS handles @@ -124,7 +115,7 @@ func (srv *Server) ListenAndServeTLS(ctx context.Context, certFile, keyFile stri <-ctx.Done() listener.Close() }() - return server.ServeTLS(listener, srv.Handler, certFile, keyFile, srv.Logger, serverRPCConfig(srv.Config)) + return server.ServeTLS(ctx, listener, srv.Handler, certFile, keyFile, srv.Logger, serverRPCConfig(srv.Config)) } func serverRPCConfig(r *config.RPCConfig) *server.Config { diff --git a/internal/jsontypes/jsontypes.go b/internal/jsontypes/jsontypes.go new file mode 100644 index 0000000000..69405da1b4 --- /dev/null +++ b/internal/jsontypes/jsontypes.go @@ -0,0 +1,121 @@ +// Package jsontypes supports decoding for interface types whose concrete +// implementations need to be stored as JSON. To do this, concrete values are +// packaged in wrapper objects having the form: +// +// { +// "type": "", +// "value": +// } +// +// This package provides a registry for type tag strings and functions to +// encode and decode wrapper objects. +package jsontypes + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +// The Tagged interface must be implemented by a type in order to register it +// with the jsontypes package. The TypeTag method returns a string label that +// is used to distinguish objects of that type. +type Tagged interface { + TypeTag() string +} + +// registry records the mapping from type tags to value types. +var registry = struct { + types map[string]reflect.Type +}{types: make(map[string]reflect.Type)} + +// register adds v to the type registry. It reports an error if the tag +// returned by v is already registered. +func register(v Tagged) error { + tag := v.TypeTag() + if t, ok := registry.types[tag]; ok { + return fmt.Errorf("type tag %q already registered to %v", tag, t) + } + registry.types[tag] = reflect.TypeOf(v) + return nil +} + +// MustRegister adds v to the type registry. It will panic if the tag returned +// by v is already registered. This function is meant for use during program +// initialization. +func MustRegister(v Tagged) { + if err := register(v); err != nil { + panic(err) + } +} + +type wrapper struct { + Type string `json:"type"` + Value json.RawMessage `json:"value"` +} + +// Marshal marshals a JSON wrapper object containing v. If v == nil, Marshal +// returns the JSON "null" value without error. +func Marshal(v Tagged) ([]byte, error) { + if v == nil { + return []byte("null"), nil + } + data, err := json.Marshal(v) + if err != nil { + return nil, err + } + return json.Marshal(wrapper{ + Type: v.TypeTag(), + Value: data, + }) +} + +// Unmarshal unmarshals a JSON wrapper object into v. It reports an error if +// the data do not encode a valid wrapper object, if the wrapper's type tag is +// not registered with jsontypes, or if the resulting value is not compatible +// with the type of v. +func Unmarshal(data []byte, v interface{}) error { + // Verify that the target is some kind of pointer. + target := reflect.ValueOf(v) + if target.Kind() != reflect.Ptr { + return fmt.Errorf("target %T is not a pointer", v) + } else if target.IsZero() { + return fmt.Errorf("target is a nil %T", v) + } + baseType := target.Type().Elem() + if isNull(data) { + target.Elem().Set(reflect.Zero(baseType)) + return nil + } + + var w wrapper + dec := json.NewDecoder(bytes.NewReader(data)) + dec.DisallowUnknownFields() + if err := dec.Decode(&w); err != nil { + return fmt.Errorf("invalid type wrapper: %w", err) + } + typ, ok := registry.types[w.Type] + if !ok { + return fmt.Errorf("unknown type tag for %T: %q", v, w.Type) + } + if typ.AssignableTo(baseType) { + // ok: registered type is directly assignable to the target + } else if typ.Kind() == reflect.Ptr && typ.Elem().AssignableTo(baseType) { + typ = typ.Elem() + // ok: registered type is a pointer to a value assignable to the target + } else { + return fmt.Errorf("type %v is not assignable to %v", typ, baseType) + } + obj := reflect.New(typ) // we need a pointer to unmarshal + if err := json.Unmarshal(w.Value, obj.Interface()); err != nil { + return fmt.Errorf("decoding wrapped value: %w", err) + } + target.Elem().Set(obj.Elem()) + return nil +} + +// isNull reports true if data is empty or is the JSON "null" value. +func isNull(data []byte) bool { + return len(data) == 0 || bytes.Equal(data, []byte("null")) +} diff --git a/internal/jsontypes/jsontypes_test.go b/internal/jsontypes/jsontypes_test.go new file mode 100644 index 0000000000..223e25c343 --- /dev/null +++ b/internal/jsontypes/jsontypes_test.go @@ -0,0 +1,188 @@ +package jsontypes_test + +import ( + "testing" + + "github.com/tendermint/tendermint/internal/jsontypes" +) + +type testPtrType struct { + Field string `json:"field"` +} + +func (*testPtrType) TypeTag() string { return "test/PointerType" } +func (t *testPtrType) Value() string { return t.Field } + +type testBareType struct { + Field string `json:"field"` +} + +func (testBareType) TypeTag() string { return "test/BareType" } +func (t testBareType) Value() string { return t.Field } + +type fielder interface{ Value() string } + +func TestRoundTrip(t *testing.T) { + t.Run("MustRegister_ok", func(t *testing.T) { + defer func() { + if x := recover(); x != nil { + t.Fatalf("Registration panicked: %v", x) + } + }() + jsontypes.MustRegister((*testPtrType)(nil)) + jsontypes.MustRegister(testBareType{}) + }) + + t.Run("MustRegister_fail", func(t *testing.T) { + defer func() { + if x := recover(); x != nil { + t.Logf("Got expected panic: %v", x) + } + }() + jsontypes.MustRegister((*testPtrType)(nil)) + t.Fatal("Registration should not have succeeded") + }) + + t.Run("Marshal_nilTagged", func(t *testing.T) { + bits, err := jsontypes.Marshal(nil) + if err != nil { + t.Fatalf("Marshal failed: %v", err) + } + if got := string(bits); got != "null" { + t.Errorf("Marshal nil: got %#q, want null", got) + } + }) + + t.Run("RoundTrip_pointerType", func(t *testing.T) { + const wantEncoded = `{"type":"test/PointerType","value":{"field":"hello"}}` + + obj := testPtrType{Field: "hello"} + bits, err := jsontypes.Marshal(&obj) + if err != nil { + t.Fatalf("Marshal %T failed: %v", obj, err) + } + if got := string(bits); got != wantEncoded { + t.Errorf("Marshal %T: got %#q, want %#q", obj, got, wantEncoded) + } + + var cmp testPtrType + if err := jsontypes.Unmarshal(bits, &cmp); err != nil { + t.Errorf("Unmarshal %#q failed: %v", string(bits), err) + } + if obj != cmp { + t.Errorf("Unmarshal %#q: got %+v, want %+v", string(bits), cmp, obj) + } + }) + + t.Run("RoundTrip_bareType", func(t *testing.T) { + const wantEncoded = `{"type":"test/BareType","value":{"field":"hello"}}` + + obj := testBareType{Field: "hello"} + bits, err := jsontypes.Marshal(&obj) + if err != nil { + t.Fatalf("Marshal %T failed: %v", obj, err) + } + if got := string(bits); got != wantEncoded { + t.Errorf("Marshal %T: got %#q, want %#q", obj, got, wantEncoded) + } + + var cmp testBareType + if err := jsontypes.Unmarshal(bits, &cmp); err != nil { + t.Errorf("Unmarshal %#q failed: %v", string(bits), err) + } + if obj != cmp { + t.Errorf("Unmarshal %#q: got %+v, want %+v", string(bits), cmp, obj) + } + }) + + t.Run("Unmarshal_nilPointer", func(t *testing.T) { + var obj *testBareType + + // Unmarshaling to a nil pointer target should report an error. + if err := jsontypes.Unmarshal([]byte(`null`), obj); err == nil { + t.Errorf("Unmarshal nil: got %+v, wanted error", obj) + } else { + t.Logf("Unmarshal correctly failed: %v", err) + } + }) + + t.Run("Unmarshal_bareType", func(t *testing.T) { + const want = "foobar" + const input = `{"type":"test/BareType","value":{"field":"` + want + `"}}` + + var obj testBareType + if err := jsontypes.Unmarshal([]byte(input), &obj); err != nil { + t.Fatalf("Unmarshal failed: %v", err) + } + if obj.Field != want { + t.Errorf("Unmarshal result: got %q, want %q", obj.Field, want) + } + }) + + t.Run("Unmarshal_bareType_interface", func(t *testing.T) { + const want = "foobar" + const input = `{"type":"test/BareType","value":{"field":"` + want + `"}}` + + var obj fielder + if err := jsontypes.Unmarshal([]byte(input), &obj); err != nil { + t.Fatalf("Unmarshal failed: %v", err) + } + if got := obj.Value(); got != want { + t.Errorf("Unmarshal result: got %q, want %q", got, want) + } + }) + + t.Run("Unmarshal_pointerType", func(t *testing.T) { + const want = "bazquux" + const input = `{"type":"test/PointerType","value":{"field":"` + want + `"}}` + + var obj testPtrType + if err := jsontypes.Unmarshal([]byte(input), &obj); err != nil { + t.Fatalf("Unmarshal failed: %v", err) + } + if obj.Field != want { + t.Errorf("Unmarshal result: got %q, want %q", obj.Field, want) + } + }) + + t.Run("Unmarshal_pointerType_interface", func(t *testing.T) { + const want = "foobar" + const input = `{"type":"test/PointerType","value":{"field":"` + want + `"}}` + + var obj fielder + if err := jsontypes.Unmarshal([]byte(input), &obj); err != nil { + t.Fatalf("Unmarshal failed: %v", err) + } + if got := obj.Value(); got != want { + t.Errorf("Unmarshal result: got %q, want %q", got, want) + } + }) + + t.Run("Unmarshal_unknownTypeTag", func(t *testing.T) { + const input = `{"type":"test/Nonesuch","value":null}` + + // An unregistered type tag in a valid envelope should report an error. + var obj interface{} + if err := jsontypes.Unmarshal([]byte(input), &obj); err == nil { + t.Errorf("Unmarshal: got %+v, wanted error", obj) + } else { + t.Logf("Unmarshal correctly failed: %v", err) + } + }) + + t.Run("Unmarshal_similarTarget", func(t *testing.T) { + const want = "zootie-zoot-zoot" + const input = `{"type":"test/PointerType","value":{"field":"` + want + `"}}` + + // The target has a compatible (i.e., assignable) shape to the registered + // type. This should work even though it's not the original named type. + var cmp struct { + Field string `json:"field"` + } + if err := jsontypes.Unmarshal([]byte(input), &cmp); err != nil { + t.Errorf("Unmarshal %#q failed: %v", input, err) + } else if cmp.Field != want { + t.Errorf("Unmarshal result: got %q, want %q", cmp.Field, want) + } + }) +} diff --git a/libs/async/async.go b/internal/libs/async/async.go similarity index 100% rename from libs/async/async.go rename to internal/libs/async/async.go diff --git a/libs/async/async_test.go b/internal/libs/async/async_test.go similarity index 100% rename from libs/async/async_test.go rename to internal/libs/async/async_test.go diff --git a/internal/libs/autofile/autofile.go b/internal/libs/autofile/autofile.go index 10cc04a28f..f554228baf 100644 --- a/internal/libs/autofile/autofile.go +++ b/internal/libs/autofile/autofile.go @@ -1,6 +1,9 @@ package autofile import ( + "context" + "errors" + "fmt" "os" "os/signal" "path/filepath" @@ -16,7 +19,7 @@ import ( // Create/Append to ./autofile_test af, err := OpenAutoFile("autofile_test") if err != nil { - panic(err) + log.Fatal(err) } // Stream of writes. @@ -29,7 +32,7 @@ for i := 0; i < 60; i++ { // Close the AutoFile err = af.Close() if err != nil { - panic(err) + log.Fatal(err) } */ @@ -38,6 +41,10 @@ const ( autoFilePerms = os.FileMode(0600) ) +// ErrAutoFileClosed is reported when operations attempt to use an autofile +// after it has been closed. +var ErrAutoFileClosed = errors.New("autofile is closed") + // AutoFile automatically closes and re-opens file for writing. The file is // automatically setup to close itself every 1s and upon receiving SIGHUP. // @@ -46,81 +53,98 @@ type AutoFile struct { ID string Path string - closeTicker *time.Ticker - closeTickerStopc chan struct{} // closed when closeTicker is stopped - hupc chan os.Signal + closeTicker *time.Ticker // signals periodic close + cancel func() // cancels the lifecycle context - mtx sync.Mutex - file *os.File + mtx sync.Mutex // guards the fields below + closed bool // true when the the autofile is no longer usable + file *os.File // the underlying file (may be nil) } // OpenAutoFile creates an AutoFile in the path (with random ID). If there is // an error, it will be of type *PathError or *ErrPermissionsChanged (if file's // permissions got changed (should be 0600)). -func OpenAutoFile(path string) (*AutoFile, error) { +func OpenAutoFile(ctx context.Context, path string) (*AutoFile, error) { var err error path, err = filepath.Abs(path) if err != nil { return nil, err } + + ctx, cancel := context.WithCancel(ctx) af := &AutoFile{ - ID: tmrand.Str(12) + ":" + path, - Path: path, - closeTicker: time.NewTicker(autoFileClosePeriod), - closeTickerStopc: make(chan struct{}), + ID: tmrand.Str(12) + ":" + path, + Path: path, + closeTicker: time.NewTicker(autoFileClosePeriod), + cancel: cancel, } if err := af.openFile(); err != nil { af.Close() return nil, err } - // Close file on SIGHUP. - af.hupc = make(chan os.Signal, 1) - signal.Notify(af.hupc, syscall.SIGHUP) + // Set up a SIGHUP handler to forcibly flush and close the filehandle. + // This forces the next operation to re-open the underlying path. + hupc := make(chan os.Signal, 1) + signal.Notify(hupc, syscall.SIGHUP) go func() { - for range af.hupc { - _ = af.closeFile() + defer close(hupc) + for { + select { + case <-hupc: + _ = af.closeFile() + case <-ctx.Done(): + return + } } }() - go af.closeFileRoutine() + go af.closeFileRoutine(ctx) return af, nil } -// Close shuts down the closing goroutine, SIGHUP handler and closes the -// AutoFile. +// Close shuts down the service goroutine and marks af as invalid. Operations +// on af after Close will report an error. func (af *AutoFile) Close() error { - af.closeTicker.Stop() - close(af.closeTickerStopc) - if af.hupc != nil { - close(af.hupc) - } - return af.closeFile() + return af.withLock(func() error { + af.cancel() // signal the close service to stop + af.closed = true // mark the file as invalid + return af.unsyncCloseFile() + }) } -func (af *AutoFile) closeFileRoutine() { +func (af *AutoFile) closeFileRoutine(ctx context.Context) { for { select { + case <-ctx.Done(): + _ = af.Close() + return case <-af.closeTicker.C: _ = af.closeFile() - case <-af.closeTickerStopc: - return } } } func (af *AutoFile) closeFile() (err error) { - af.mtx.Lock() - defer af.mtx.Unlock() + return af.withLock(af.unsyncCloseFile) +} - file := af.file - if file == nil { - return nil +// unsyncCloseFile closes the underlying filehandle if one is open, and reports +// any error it returns. The caller must hold af.mtx exclusively. +func (af *AutoFile) unsyncCloseFile() error { + if fp := af.file; fp != nil { + af.file = nil + return fp.Close() } + return nil +} - af.file = nil - return file.Close() +// withLock runs f while holding af.mtx, and reports any error it returns. +func (af *AutoFile) withLock(f func() error) error { + af.mtx.Lock() + defer af.mtx.Unlock() + return f() } // Write writes len(b) bytes to the AutoFile. It returns the number of bytes @@ -130,6 +154,9 @@ func (af *AutoFile) closeFile() (err error) { func (af *AutoFile) Write(b []byte) (n int, err error) { af.mtx.Lock() defer af.mtx.Unlock() + if af.closed { + return 0, fmt.Errorf("write: %w", ErrAutoFileClosed) + } if af.file == nil { if err = af.openFile(); err != nil { @@ -144,31 +171,25 @@ func (af *AutoFile) Write(b []byte) (n int, err error) { // Sync commits the current contents of the file to stable storage. Typically, // this means flushing the file system's in-memory copy of recently written // data to disk. -// Opens AutoFile if needed. func (af *AutoFile) Sync() error { - af.mtx.Lock() - defer af.mtx.Unlock() - - if af.file == nil { - if err := af.openFile(); err != nil { - return err + return af.withLock(func() error { + if af.closed { + return fmt.Errorf("sync: %w", ErrAutoFileClosed) + } else if af.file == nil { + return nil // nothing to sync } - } - return af.file.Sync() + return af.file.Sync() + }) } +// openFile unconditionally replaces af.file with a new filehandle on the path. +// The caller must hold af.mtx exclusively. func (af *AutoFile) openFile() error { file, err := os.OpenFile(af.Path, os.O_RDWR|os.O_CREATE|os.O_APPEND, autoFilePerms) if err != nil { return err } - // fileInfo, err := file.Stat() - // if err != nil { - // return err - // } - // if fileInfo.Mode() != autoFilePerms { - // return errors.NewErrPermissionsChanged(file.Name(), fileInfo.Mode(), autoFilePerms) - // } + af.file = file return nil } @@ -179,6 +200,9 @@ func (af *AutoFile) openFile() error { func (af *AutoFile) Size() (int64, error) { af.mtx.Lock() defer af.mtx.Unlock() + if af.closed { + return 0, fmt.Errorf("size: %w", ErrAutoFileClosed) + } if af.file == nil { if err := af.openFile(); err != nil { diff --git a/internal/libs/autofile/autofile_test.go b/internal/libs/autofile/autofile_test.go index c2442a56f9..9dbba276ac 100644 --- a/internal/libs/autofile/autofile_test.go +++ b/internal/libs/autofile/autofile_test.go @@ -1,7 +1,7 @@ package autofile import ( - "io/ioutil" + "context" "os" "path/filepath" "syscall" @@ -13,6 +13,9 @@ import ( ) func TestSIGHUP(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + origDir, err := os.Getwd() require.NoError(t, err) t.Cleanup(func() { @@ -22,16 +25,12 @@ func TestSIGHUP(t *testing.T) { }) // First, create a temporary directory and move into it - dir, err := ioutil.TempDir("", "sighup_test") - require.NoError(t, err) - t.Cleanup(func() { - _ = os.RemoveAll(dir) - }) + dir := t.TempDir() require.NoError(t, os.Chdir(dir)) // Create an AutoFile in the temporary directory name := "sighup_test" - af, err := OpenAutoFile(name) + af, err := OpenAutoFile(ctx, name) require.NoError(t, err) require.True(t, filepath.IsAbs(af.Path)) @@ -45,9 +44,7 @@ func TestSIGHUP(t *testing.T) { require.NoError(t, os.Rename(name, name+"_old")) // Move into a different temporary directory - otherDir, err := ioutil.TempDir("", "sighup_test_other") - require.NoError(t, err) - t.Cleanup(func() { os.RemoveAll(otherDir) }) + otherDir := t.TempDir() require.NoError(t, os.Chdir(otherDir)) // Send SIGHUP to self. @@ -72,7 +69,7 @@ func TestSIGHUP(t *testing.T) { } // The current directory should be empty - files, err := ioutil.ReadDir(".") + files, err := os.ReadDir(".") require.NoError(t, err) assert.Empty(t, files) } @@ -80,7 +77,7 @@ func TestSIGHUP(t *testing.T) { // // Manually modify file permissions, close, and reopen using autofile: // // We expect the file permissions to be changed back to the intended perms. // func TestOpenAutoFilePerms(t *testing.T) { -// file, err := ioutil.TempFile("", "permission_test") +// file, err := os.CreateTemp("", "permission_test") // require.NoError(t, err) // err = file.Close() // require.NoError(t, err) @@ -105,13 +102,16 @@ func TestSIGHUP(t *testing.T) { // } func TestAutoFileSize(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // First, create an AutoFile writing to a tempfile dir - f, err := ioutil.TempFile("", "sighup_test") + f, err := os.CreateTemp(t.TempDir(), "sighup_test") require.NoError(t, err) require.NoError(t, f.Close()) // Here is the actual AutoFile. - af, err := OpenAutoFile(f.Name()) + af, err := OpenAutoFile(ctx, f.Name()) require.NoError(t, err) // 1. Empty file @@ -128,7 +128,7 @@ func TestAutoFileSize(t *testing.T) { require.NoError(t, err) // 3. Not existing file - require.NoError(t, af.Close()) + require.NoError(t, af.closeFile()) require.NoError(t, os.Remove(f.Name())) size, err = af.Size() require.EqualValues(t, 0, size, "Expected a new file to be empty") @@ -139,7 +139,7 @@ func TestAutoFileSize(t *testing.T) { } func mustReadFile(t *testing.T, filePath string) []byte { - fileBytes, err := ioutil.ReadFile(filePath) + fileBytes, err := os.ReadFile(filePath) require.NoError(t, err) return fileBytes diff --git a/internal/libs/autofile/cmd/logjack.go b/internal/libs/autofile/cmd/logjack.go index 1aa8b6a113..c3c4665038 100644 --- a/internal/libs/autofile/cmd/logjack.go +++ b/internal/libs/autofile/cmd/logjack.go @@ -1,70 +1,71 @@ package main import ( + "context" "flag" "fmt" "io" + stdlog "log" "os" + "os/signal" "strconv" "strings" + "syscall" auto "github.com/tendermint/tendermint/internal/libs/autofile" - tmos "github.com/tendermint/tendermint/libs/os" + "github.com/tendermint/tendermint/libs/log" ) const Version = "0.0.1" const readBufferSize = 1024 // 1KB at a time // Parse command-line options -func parseFlags() (headPath string, chopSize int64, limitSize int64, version bool) { +func parseFlags() (headPath string, chopSize int64, limitSize int64, version bool, err error) { var flagSet = flag.NewFlagSet(os.Args[0], flag.ExitOnError) var chopSizeStr, limitSizeStr string flagSet.StringVar(&headPath, "head", "logjack.out", "Destination (head) file.") flagSet.StringVar(&chopSizeStr, "chop", "100M", "Move file if greater than this") flagSet.StringVar(&limitSizeStr, "limit", "10G", "Only keep this much (for each specified file). Remove old files.") flagSet.BoolVar(&version, "version", false, "Version") - if err := flagSet.Parse(os.Args[1:]); err != nil { - fmt.Printf("err parsing flag: %v\n", err) - os.Exit(1) - } - chopSize = parseBytesize(chopSizeStr) - limitSize = parseBytesize(limitSizeStr) - return -} -type fmtLogger struct{} + if err = flagSet.Parse(os.Args[1:]); err != nil { + return + } -func (fmtLogger) Info(msg string, keyvals ...interface{}) { - strs := make([]string, len(keyvals)) - for i, kv := range keyvals { - strs[i] = fmt.Sprintf("%v", kv) + chopSize, err = parseByteSize(chopSizeStr) + if err != nil { + return + } + limitSize, err = parseByteSize(limitSizeStr) + if err != nil { + return } - fmt.Printf("%s %s\n", msg, strings.Join(strs, ",")) + return } func main() { - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(fmtLogger{}, func() { - fmt.Println("logjack shutting down") - }) + ctx, cancel := signal.NotifyContext(context.Background(), syscall.SIGTERM) + defer cancel() + defer func() { fmt.Println("logjack shutting down") }() // Read options - headPath, chopSize, limitSize, version := parseFlags() + headPath, chopSize, limitSize, version, err := parseFlags() + if err != nil { + stdlog.Fatalf("problem parsing arguments: %q", err.Error()) + } + if version { - fmt.Printf("logjack version %v\n", Version) - return + stdlog.Printf("logjack version %s", Version) } // Open Group - group, err := auto.OpenGroup(headPath, auto.GroupHeadSizeLimit(chopSize), auto.GroupTotalSizeLimit(limitSize)) + group, err := auto.OpenGroup(ctx, log.NewNopLogger(), headPath, auto.GroupHeadSizeLimit(chopSize), auto.GroupTotalSizeLimit(limitSize)) if err != nil { - fmt.Printf("logjack couldn't create output file %v\n", headPath) - os.Exit(1) + stdlog.Fatalf("logjack couldn't create output file %q", headPath) } - if err = group.Start(); err != nil { - fmt.Printf("logjack couldn't start with file %v\n", headPath) - os.Exit(1) + if err = group.Start(ctx); err != nil { + stdlog.Fatalf("logjack couldn't start with file %q", headPath) } // Forever read from stdin and write to AutoFile. @@ -72,30 +73,22 @@ func main() { for { n, err := os.Stdin.Read(buf) if err != nil { - if err := group.Stop(); err != nil { - fmt.Fprintf(os.Stderr, "logjack stopped with error %v\n", headPath) - os.Exit(1) - } if err == io.EOF { - os.Exit(0) - } else { - fmt.Println("logjack errored") - os.Exit(1) + return } + stdlog.Fatalln("logjack errored:", err.Error()) } _, err = group.Write(buf[:n]) if err != nil { - fmt.Fprintf(os.Stderr, "logjack failed write with error %v\n", headPath) - os.Exit(1) + stdlog.Fatalf("logjack failed write %q with error: %q", headPath, err.Error()) } if err := group.FlushAndSync(); err != nil { - fmt.Fprintf(os.Stderr, "logjack flushsync fail with error %v\n", headPath) - os.Exit(1) + stdlog.Fatalf("logjack flushsync %q fail with error: %q", headPath, err.Error()) } } } -func parseBytesize(chopSize string) int64 { +func parseByteSize(chopSize string) (int64, error) { // Handle suffix multiplier var multiplier int64 = 1 if strings.HasSuffix(chopSize, "T") { @@ -118,8 +111,8 @@ func parseBytesize(chopSize string) int64 { // Parse the numeric part chopSizeInt, err := strconv.Atoi(chopSize) if err != nil { - panic(err) + return 0, err } - return int64(chopSizeInt) * multiplier + return int64(chopSizeInt) * multiplier, nil } diff --git a/internal/libs/autofile/group.go b/internal/libs/autofile/group.go index 23f27c59bd..81e16feeaf 100644 --- a/internal/libs/autofile/group.go +++ b/internal/libs/autofile/group.go @@ -2,6 +2,7 @@ package autofile import ( "bufio" + "context" "errors" "fmt" "io" @@ -13,6 +14,7 @@ import ( "sync" "time" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -53,6 +55,7 @@ assuming that marker lines are written occasionally. */ type Group struct { service.BaseService + logger log.Logger ID string Head *AutoFile // The head AutoFile to write to @@ -66,28 +69,24 @@ type Group struct { minIndex int // Includes head maxIndex int // Includes head, where Head will move to - // close this when the processTicks routine is done. - // this ensures we can cleanup the dir after calling Stop - // and the routine won't be trying to access it anymore - doneProcessTicks chan struct{} - // TODO: When we start deleting files, we need to start tracking GroupReaders // and their dependencies. } // OpenGroup creates a new Group with head at headPath. It returns an error if // it fails to open head file. -func OpenGroup(headPath string, groupOptions ...func(*Group)) (*Group, error) { +func OpenGroup(ctx context.Context, logger log.Logger, headPath string, groupOptions ...func(*Group)) (*Group, error) { dir, err := filepath.Abs(filepath.Dir(headPath)) if err != nil { return nil, err } - head, err := OpenAutoFile(headPath) + head, err := OpenAutoFile(ctx, headPath) if err != nil { return nil, err } g := &Group{ + logger: logger, ID: "group:" + head.ID, Head: head, headBuf: bufio.NewWriterSize(head, 4096*10), @@ -97,14 +96,13 @@ func OpenGroup(headPath string, groupOptions ...func(*Group)) (*Group, error) { groupCheckDuration: defaultGroupCheckDuration, minIndex: 0, maxIndex: 0, - doneProcessTicks: make(chan struct{}), } for _, option := range groupOptions { option(g) } - g.BaseService = *service.NewBaseService(nil, "Group", g) + g.BaseService = *service.NewBaseService(logger, "Group", g) gInfo := g.readGroupInfo() g.minIndex = gInfo.MinIndex @@ -135,9 +133,9 @@ func GroupTotalSizeLimit(limit int64) func(*Group) { // OnStart implements service.Service by starting the goroutine that checks file // and group limits. -func (g *Group) OnStart() error { +func (g *Group) OnStart(ctx context.Context) error { g.ticker = time.NewTicker(g.groupCheckDuration) - go g.processTicks() + go g.processTicks(ctx) return nil } @@ -146,25 +144,18 @@ func (g *Group) OnStart() error { func (g *Group) OnStop() { g.ticker.Stop() if err := g.FlushAndSync(); err != nil { - g.Logger.Error("Error flushing to disk", "err", err) + g.logger.Error("error flushing to disk", "err", err) } } -// Wait blocks until all internal goroutines are finished. Supposed to be -// called after Stop. -func (g *Group) Wait() { - // wait for processTicks routine to finish - <-g.doneProcessTicks -} - // Close closes the head file. The group must be stopped by this moment. func (g *Group) Close() { if err := g.FlushAndSync(); err != nil { - g.Logger.Error("Error flushing to disk", "err", err) + g.logger.Error("error flushing to disk", "err", err) } g.mtx.Lock() - _ = g.Head.closeFile() + _ = g.Head.Close() g.mtx.Unlock() } @@ -236,38 +227,43 @@ func (g *Group) FlushAndSync() error { return err } -func (g *Group) processTicks() { - defer close(g.doneProcessTicks) +func (g *Group) processTicks(ctx context.Context) { for { select { - case <-g.ticker.C: - g.checkHeadSizeLimit() - g.checkTotalSizeLimit() - case <-g.Quit(): + case <-ctx.Done(): return + case <-g.ticker.C: + g.checkHeadSizeLimit(ctx) + g.checkTotalSizeLimit(ctx) } } } // NOTE: this function is called manually in tests. -func (g *Group) checkHeadSizeLimit() { +func (g *Group) checkHeadSizeLimit(ctx context.Context) { limit := g.HeadSizeLimit() if limit == 0 { return } size, err := g.Head.Size() if err != nil { - g.Logger.Error("Group's head may grow without bound", "head", g.Head.Path, "err", err) + g.logger.Error("Group's head may grow without bound", "head", g.Head.Path, "err", err) return } if size >= limit { - g.RotateFile() + g.rotateFile(ctx) } } -func (g *Group) checkTotalSizeLimit() { - limit := g.TotalSizeLimit() - if limit == 0 { +func (g *Group) checkTotalSizeLimit(ctx context.Context) { + g.mtx.Lock() + defer g.mtx.Unlock() + + if err := ctx.Err(); err != nil { + return + } + + if g.totalSizeLimit == 0 { return } @@ -275,51 +271,72 @@ func (g *Group) checkTotalSizeLimit() { totalSize := gInfo.TotalSize for i := 0; i < maxFilesToRemove; i++ { index := gInfo.MinIndex + i - if totalSize < limit { + if totalSize < g.totalSizeLimit { return } if index == gInfo.MaxIndex { // Special degenerate case, just do nothing. - g.Logger.Error("Group's head may grow without bound", "head", g.Head.Path) + g.logger.Error("Group's head may grow without bound", "head", g.Head.Path) return } + + if ctx.Err() != nil { + return + } + pathToRemove := filePathForIndex(g.Head.Path, index, gInfo.MaxIndex) fInfo, err := os.Stat(pathToRemove) if err != nil { - g.Logger.Error("Failed to fetch info for file", "file", pathToRemove) + g.logger.Error("Failed to fetch info for file", "file", pathToRemove) continue } - err = os.Remove(pathToRemove) - if err != nil { - g.Logger.Error("Failed to remove path", "path", pathToRemove) + + if ctx.Err() != nil { + return + } + + if err = os.Remove(pathToRemove); err != nil { + g.logger.Error("Failed to remove path", "path", pathToRemove) return } totalSize -= fInfo.Size() } } -// RotateFile causes group to close the current head and assign it some index. -// Note it does not create a new head. -func (g *Group) RotateFile() { +// rotateFile causes group to close the current head and assign it +// some index. Panics if it encounters an error. +func (g *Group) rotateFile(ctx context.Context) { g.mtx.Lock() defer g.mtx.Unlock() + if err := ctx.Err(); err != nil { + return + } + headPath := g.Head.Path if err := g.headBuf.Flush(); err != nil { panic(err) } - if err := g.Head.Sync(); err != nil { panic(err) } + err := g.Head.withLock(func() error { + if err := ctx.Err(); err != nil { + return err + } - if err := g.Head.closeFile(); err != nil { - panic(err) - } + if err := g.Head.unsyncCloseFile(); err != nil { + return err + } - indexPath := filePathForIndex(headPath, g.maxIndex, g.maxIndex+1) - if err := os.Rename(headPath, indexPath); err != nil { + indexPath := filePathForIndex(headPath, g.maxIndex, g.maxIndex+1) + return os.Rename(headPath, indexPath) + }) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return + } + if err != nil { panic(err) } diff --git a/internal/libs/autofile/group_test.go b/internal/libs/autofile/group_test.go index 0981923eb4..4f5e346c2a 100644 --- a/internal/libs/autofile/group_test.go +++ b/internal/libs/autofile/group_test.go @@ -1,8 +1,8 @@ package autofile import ( + "context" "io" - "io/ioutil" "os" "path/filepath" "testing" @@ -10,18 +10,19 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" tmrand "github.com/tendermint/tendermint/libs/rand" ) -func createTestGroupWithHeadSizeLimit(t *testing.T, headSizeLimit int64) *Group { +func createTestGroupWithHeadSizeLimit(ctx context.Context, t *testing.T, logger log.Logger, headSizeLimit int64) *Group { testID := tmrand.Str(12) testDir := "_test_" + testID err := tmos.EnsureDir(testDir, 0700) require.NoError(t, err, "Error creating dir") headPath := testDir + "/myfile" - g, err := OpenGroup(headPath, GroupHeadSizeLimit(headSizeLimit)) + g, err := OpenGroup(ctx, logger, headPath, GroupHeadSizeLimit(headSizeLimit)) require.NoError(t, err, "Error opening Group") require.NotEqual(t, nil, g, "Failed to create Group") @@ -43,7 +44,12 @@ func assertGroupInfo(t *testing.T, gInfo GroupInfo, minIndex, maxIndex int, tota } func TestCheckHeadSizeLimit(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 1000*1000) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 1000*1000) // At first, there are no files. assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 0, 0) @@ -58,7 +64,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) // Even calling checkHeadSizeLimit manually won't rotate it. - g.checkHeadSizeLimit() + g.checkHeadSizeLimit(ctx) assertGroupInfo(t, g.ReadGroupInfo(), 0, 0, 999000, 999000) // Write 1000 more bytes. @@ -68,7 +74,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { require.NoError(t, err) // Calling checkHeadSizeLimit this time rolls it. - g.checkHeadSizeLimit() + g.checkHeadSizeLimit(ctx) assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1000000, 0) // Write 1000 more bytes. @@ -78,7 +84,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { require.NoError(t, err) // Calling checkHeadSizeLimit does nothing. - g.checkHeadSizeLimit() + g.checkHeadSizeLimit(ctx) assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 1001000, 1000) // Write 1000 bytes 999 times. @@ -91,7 +97,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { assertGroupInfo(t, g.ReadGroupInfo(), 0, 1, 2000000, 1000000) // Calling checkHeadSizeLimit rolls it again. - g.checkHeadSizeLimit() + g.checkHeadSizeLimit(ctx) assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2000000, 0) // Write 1000 more bytes. @@ -102,7 +108,7 @@ func TestCheckHeadSizeLimit(t *testing.T) { assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) // Calling checkHeadSizeLimit does nothing. - g.checkHeadSizeLimit() + g.checkHeadSizeLimit(ctx) assertGroupInfo(t, g.ReadGroupInfo(), 0, 2, 2001000, 1000) // Cleanup @@ -110,7 +116,11 @@ func TestCheckHeadSizeLimit(t *testing.T) { } func TestRotateFile(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) + logger := log.NewNopLogger() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0) // Create a different temporary directory and move into it, to make sure // relative paths are resolved at Group creation @@ -122,11 +132,8 @@ func TestRotateFile(t *testing.T) { } }() - dir, err := ioutil.TempDir("", "rotate_test") - require.NoError(t, err) - defer os.RemoveAll(dir) - err = os.Chdir(dir) - require.NoError(t, err) + dir := t.TempDir() + require.NoError(t, os.Chdir(dir)) require.True(t, filepath.IsAbs(g.Head.Path)) require.True(t, filepath.IsAbs(g.Dir)) @@ -140,7 +147,7 @@ func TestRotateFile(t *testing.T) { require.NoError(t, err) err = g.FlushAndSync() require.NoError(t, err) - g.RotateFile() + g.rotateFile(ctx) err = g.WriteLine("Line 4") require.NoError(t, err) err = g.WriteLine("Line 5") @@ -151,21 +158,21 @@ func TestRotateFile(t *testing.T) { require.NoError(t, err) // Read g.Head.Path+"000" - body1, err := ioutil.ReadFile(g.Head.Path + ".000") + body1, err := os.ReadFile(g.Head.Path + ".000") assert.NoError(t, err, "Failed to read first rolled file") if string(body1) != "Line 1\nLine 2\nLine 3\n" { t.Errorf("got unexpected contents: [%v]", string(body1)) } // Read g.Head.Path - body2, err := ioutil.ReadFile(g.Head.Path) + body2, err := os.ReadFile(g.Head.Path) assert.NoError(t, err, "Failed to read first rolled file") if string(body2) != "Line 4\nLine 5\nLine 6\n" { t.Errorf("got unexpected contents: [%v]", string(body2)) } // Make sure there are no files in the current, temporary directory - files, err := ioutil.ReadDir(".") + files, err := os.ReadDir(".") require.NoError(t, err) assert.Empty(t, files) @@ -174,7 +181,12 @@ func TestRotateFile(t *testing.T) { } func TestWrite(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) + logger := log.NewNopLogger() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0) written := []byte("Medusa") _, err := g.Write(written) @@ -197,14 +209,19 @@ func TestWrite(t *testing.T) { // test that Read reads the required amount of bytes from all the files in the // group and returns no error if n == size of the given slice. func TestGroupReaderRead(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) + logger := log.NewNopLogger() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0) professor := []byte("Professor Monster") _, err := g.Write(professor) require.NoError(t, err) err = g.FlushAndSync() require.NoError(t, err) - g.RotateFile() + g.rotateFile(ctx) frankenstein := []byte("Frankenstein's Monster") _, err = g.Write(frankenstein) require.NoError(t, err) @@ -230,14 +247,19 @@ func TestGroupReaderRead(t *testing.T) { // test that Read returns an error if number of bytes read < size of // the given slice. Subsequent call should return 0, io.EOF. func TestGroupReaderRead2(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) + logger := log.NewNopLogger() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0) professor := []byte("Professor Monster") _, err := g.Write(professor) require.NoError(t, err) err = g.FlushAndSync() require.NoError(t, err) - g.RotateFile() + g.rotateFile(ctx) frankenstein := []byte("Frankenstein's Monster") frankensteinPart := []byte("Frankenstein") _, err = g.Write(frankensteinPart) // note writing only a part @@ -265,7 +287,11 @@ func TestGroupReaderRead2(t *testing.T) { } func TestMinIndex(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0) assert.Zero(t, g.MinIndex(), "MinIndex should be zero at the beginning") @@ -274,7 +300,11 @@ func TestMinIndex(t *testing.T) { } func TestMaxIndex(t *testing.T) { - g := createTestGroupWithHeadSizeLimit(t, 0) + logger := log.NewNopLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + g := createTestGroupWithHeadSizeLimit(ctx, t, logger, 0) assert.Zero(t, g.MaxIndex(), "MaxIndex should be zero at the beginning") @@ -282,7 +312,7 @@ func TestMaxIndex(t *testing.T) { require.NoError(t, err) err = g.FlushAndSync() require.NoError(t, err) - g.RotateFile() + g.rotateFile(ctx) assert.Equal(t, 1, g.MaxIndex(), "MaxIndex should point to the last file") diff --git a/internal/libs/clist/bench_test.go b/internal/libs/clist/bench_test.go index 95973cc767..ee5d836a7a 100644 --- a/internal/libs/clist/bench_test.go +++ b/internal/libs/clist/bench_test.go @@ -12,7 +12,7 @@ func BenchmarkDetaching(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { start.removed = true - start.DetachNext() + start.detachNext() start.DetachPrev() tmp := nxt nxt = nxt.Next() diff --git a/internal/libs/clist/clist.go b/internal/libs/clist/clist.go index 6cf5157060..3969c94cce 100644 --- a/internal/libs/clist/clist.go +++ b/internal/libs/clist/clist.go @@ -14,8 +14,6 @@ to ensure garbage collection of removed elements. import ( "fmt" "sync" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) // MaxLength is the max allowed number of elements a linked list is @@ -44,12 +42,9 @@ waiting on NextWait() (since it's just a read operation). */ type CElement struct { - mtx tmsync.RWMutex + mtx sync.RWMutex prev *CElement - prevWg *sync.WaitGroup - prevWaitCh chan struct{} next *CElement - nextWg *sync.WaitGroup nextWaitCh chan struct{} removed bool @@ -62,47 +57,20 @@ func (e *CElement) NextWait() *CElement { for { e.mtx.RLock() next := e.next - nextWg := e.nextWg removed := e.removed + signal := e.nextWaitCh e.mtx.RUnlock() if next != nil || removed { return next } - nextWg.Wait() + <-signal // e.next doesn't necessarily exist here. // That's why we need to continue a for-loop. } } -// Blocking implementation of Prev(). -// May return nil iff CElement was head and got removed. -func (e *CElement) PrevWait() *CElement { - for { - e.mtx.RLock() - prev := e.prev - prevWg := e.prevWg - removed := e.removed - e.mtx.RUnlock() - - if prev != nil || removed { - return prev - } - - prevWg.Wait() - } -} - -// PrevWaitChan can be used to wait until Prev becomes not nil. Once it does, -// channel will be closed. -func (e *CElement) PrevWaitChan() <-chan struct{} { - e.mtx.RLock() - defer e.mtx.RUnlock() - - return e.prevWaitCh -} - // NextWaitChan can be used to wait until Next becomes not nil. Once it does, // channel will be closed. func (e *CElement) NextWaitChan() <-chan struct{} { @@ -135,7 +103,7 @@ func (e *CElement) Removed() bool { return isRemoved } -func (e *CElement) DetachNext() { +func (e *CElement) detachNext() { e.mtx.Lock() if !e.removed { e.mtx.Unlock() @@ -157,7 +125,7 @@ func (e *CElement) DetachPrev() { // NOTE: This function needs to be safe for // concurrent goroutines waiting on nextWg. -func (e *CElement) SetNext(newNext *CElement) { +func (e *CElement) setNext(newNext *CElement) { e.mtx.Lock() oldNext := e.next @@ -168,11 +136,9 @@ func (e *CElement) SetNext(newNext *CElement) { // If a WaitGroup is reused to wait for several independent sets of // events, new Add calls must happen after all previous Wait calls have // returned. - e.nextWg = waitGroup1() // WaitGroups are difficult to re-use. e.nextWaitCh = make(chan struct{}) } if oldNext == nil && newNext != nil { - e.nextWg.Done() close(e.nextWaitCh) } e.mtx.Unlock() @@ -180,37 +146,23 @@ func (e *CElement) SetNext(newNext *CElement) { // NOTE: This function needs to be safe for // concurrent goroutines waiting on prevWg -func (e *CElement) SetPrev(newPrev *CElement) { +func (e *CElement) setPrev(newPrev *CElement) { e.mtx.Lock() + defer e.mtx.Unlock() - oldPrev := e.prev e.prev = newPrev - if oldPrev != nil && newPrev == nil { - e.prevWg = waitGroup1() // WaitGroups are difficult to re-use. - e.prevWaitCh = make(chan struct{}) - } - if oldPrev == nil && newPrev != nil { - e.prevWg.Done() - close(e.prevWaitCh) - } - e.mtx.Unlock() } -func (e *CElement) SetRemoved() { +func (e *CElement) setRemoved() { e.mtx.Lock() + defer e.mtx.Unlock() e.removed = true - // This wakes up anyone waiting in either direction. - if e.prev == nil { - e.prevWg.Done() - close(e.prevWaitCh) - } + // This wakes up anyone waiting. if e.next == nil { - e.nextWg.Done() close(e.nextWaitCh) } - e.mtx.Unlock() } //-------------------------------------------------------------------------------- @@ -220,8 +172,7 @@ func (e *CElement) SetRemoved() { // Operations are goroutine-safe. // Panics if length grows beyond the max. type CList struct { - mtx tmsync.RWMutex - wg *sync.WaitGroup + mtx sync.RWMutex waitCh chan struct{} head *CElement // first element tail *CElement // last element @@ -238,7 +189,6 @@ func newWithMax(maxLength int) *CList { l := new(CList) l.maxLen = maxLength - l.wg = waitGroup1() l.waitCh = make(chan struct{}) l.head = nil l.tail = nil @@ -261,18 +211,18 @@ func (l *CList) Front() *CElement { return head } -func (l *CList) FrontWait() *CElement { +func (l *CList) frontWait() *CElement { // Loop until the head is non-nil else wait and try again for { l.mtx.RLock() head := l.head - wg := l.wg + signal := l.waitCh l.mtx.RUnlock() if head != nil { return head } - wg.Wait() + <-signal // NOTE: If you think l.head exists here, think harder. } } @@ -284,22 +234,6 @@ func (l *CList) Back() *CElement { return back } -func (l *CList) BackWait() *CElement { - for { - l.mtx.RLock() - tail := l.tail - wg := l.wg - l.mtx.RUnlock() - - if tail != nil { - return tail - } - wg.Wait() - // l.tail doesn't necessarily exist here. - // That's why we need to continue a for-loop. - } -} - // WaitChan can be used to wait until Front or Back becomes not nil. Once it // does, channel will be closed. func (l *CList) WaitChan() <-chan struct{} { @@ -316,10 +250,7 @@ func (l *CList) PushBack(v interface{}) *CElement { // Construct a new element e := &CElement{ prev: nil, - prevWg: waitGroup1(), - prevWaitCh: make(chan struct{}), next: nil, - nextWg: waitGroup1(), nextWaitCh: make(chan struct{}), removed: false, Value: v, @@ -327,7 +258,6 @@ func (l *CList) PushBack(v interface{}) *CElement { // Release waiters on FrontWait/BackWait maybe if l.len == 0 { - l.wg.Done() close(l.waitCh) } if l.len >= l.maxLen { @@ -340,8 +270,8 @@ func (l *CList) PushBack(v interface{}) *CElement { l.head = e l.tail = e } else { - e.SetPrev(l.tail) // We must init e first. - l.tail.SetNext(e) // This will make e accessible. + e.setPrev(l.tail) // We must init e first. + l.tail.setNext(e) // This will make e accessible. l.tail = e // Update the list. } l.mtx.Unlock() @@ -352,26 +282,23 @@ func (l *CList) PushBack(v interface{}) *CElement { // NOTE: As per the contract of CList, removed elements cannot be added back. func (l *CList) Remove(e *CElement) interface{} { l.mtx.Lock() + defer l.mtx.Unlock() prev := e.Prev() next := e.Next() if l.head == nil || l.tail == nil { - l.mtx.Unlock() panic("Remove(e) on empty CList") } if prev == nil && l.head != e { - l.mtx.Unlock() panic("Remove(e) with false head") } if next == nil && l.tail != e { - l.mtx.Unlock() panic("Remove(e) with false tail") } // If we're removing the only item, make CList FrontWait/BackWait wait. if l.len == 1 { - l.wg = waitGroup1() // WaitGroups are difficult to re-use. l.waitCh = make(chan struct{}) } @@ -382,23 +309,16 @@ func (l *CList) Remove(e *CElement) interface{} { if prev == nil { l.head = next } else { - prev.SetNext(next) + prev.setNext(next) } if next == nil { l.tail = prev } else { - next.SetPrev(prev) + next.setPrev(prev) } // Set .Done() on e, otherwise waiters will wait forever. - e.SetRemoved() + e.setRemoved() - l.mtx.Unlock() return e.Value } - -func waitGroup1() (wg *sync.WaitGroup) { - wg = &sync.WaitGroup{} - wg.Add(1) - return -} diff --git a/internal/libs/clist/clist_test.go b/internal/libs/clist/clist_test.go index e5d02c342e..a3fc4bfb96 100644 --- a/internal/libs/clist/clist_test.go +++ b/internal/libs/clist/clist_test.go @@ -101,7 +101,7 @@ func TestGCFifo(t *testing.T) { tickerDoneCh := make(chan struct{}) go func() { defer close(tickerDoneCh) - ticker := time.NewTicker(time.Second) + ticker := time.NewTicker(250 * time.Millisecond) for { select { case <-ticker.C: @@ -166,7 +166,7 @@ func TestGCRandom(t *testing.T) { tickerDoneCh := make(chan struct{}) go func() { defer close(tickerDoneCh) - ticker := time.NewTicker(time.Second) + ticker := time.NewTicker(250 * time.Millisecond) for { select { case <-ticker.C: @@ -221,7 +221,7 @@ func TestScanRightDeleteRandom(t *testing.T) { default: } if el == nil { - el = l.FrontWait() + el = l.frontWait() restartCounter++ } el = el.Next() @@ -285,14 +285,14 @@ func TestWaitChan(t *testing.T) { done := make(chan struct{}) pushed := 0 go func() { + defer close(done) for i := 1; i < 100; i++ { l.PushBack(i) pushed++ - time.Sleep(time.Duration(mrand.Intn(25)) * time.Millisecond) + time.Sleep(time.Duration(mrand.Intn(20)) * time.Millisecond) } // apply a deterministic pause so the counter has time to catch up - time.Sleep(25 * time.Millisecond) - close(done) + time.Sleep(20 * time.Millisecond) }() next := el @@ -308,7 +308,7 @@ FOR_LOOP: } case <-done: break FOR_LOOP - case <-time.After(10 * time.Second): + case <-time.After(2 * time.Second): t.Fatal("max execution time") } } @@ -317,26 +317,6 @@ FOR_LOOP: t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen) } - // 4) test iterating backwards (PrevWaitChan and Prev) - prev := next - seen = 0 -FOR_LOOP2: - for { - select { - case <-prev.PrevWaitChan(): - prev = prev.Prev() - seen++ - if prev == nil { - t.Fatal("expected PrevWaitChan to block forever on nil when reached first elem") - } - case <-time.After(3 * time.Second): - break FOR_LOOP2 - } - } - - if pushed != seen { - t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen) - } } func TestRemoved(t *testing.T) { diff --git a/internal/libs/fail/fail.go b/internal/libs/fail/fail.go deleted file mode 100644 index 03a2ca6682..0000000000 --- a/internal/libs/fail/fail.go +++ /dev/null @@ -1,40 +0,0 @@ -package fail - -import ( - "fmt" - "os" - "strconv" -) - -func envSet() int { - callIndexToFailS := os.Getenv("FAIL_TEST_INDEX") - - if callIndexToFailS == "" { - return -1 - } - - var err error - callIndexToFail, err := strconv.Atoi(callIndexToFailS) - if err != nil { - return -1 - } - - return callIndexToFail -} - -// Fail when FAIL_TEST_INDEX == callIndex -var callIndex int // indexes Fail calls - -func Fail() { - callIndexToFail := envSet() - if callIndexToFail < 0 { - return - } - - if callIndex == callIndexToFail { - fmt.Printf("*** fail-test %d ***\n", callIndex) - os.Exit(1) - } - - callIndex++ -} diff --git a/internal/libs/flowrate/README.md b/internal/libs/flowrate/README.md deleted file mode 100644 index caed79aa33..0000000000 --- a/internal/libs/flowrate/README.md +++ /dev/null @@ -1,10 +0,0 @@ -Data Flow Rate Control -====================== - -To download and install this package run: - -go get github.com/mxk/go-flowrate/flowrate - -The documentation is available at: - - diff --git a/internal/libs/flowrate/flowrate.go b/internal/libs/flowrate/flowrate.go index 522c46cc73..aaa54a22cc 100644 --- a/internal/libs/flowrate/flowrate.go +++ b/internal/libs/flowrate/flowrate.go @@ -8,18 +8,18 @@ package flowrate import ( "math" + "sync" "time" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) // Monitor monitors and limits the transfer rate of a data stream. type Monitor struct { - mu tmsync.Mutex // Mutex guarding access to all internal fields - active bool // Flag indicating an active transfer - start time.Duration // Transfer start time (clock() value) - bytes int64 // Total number of bytes transferred - samples int64 // Total number of samples taken + mu sync.Mutex // Mutex guarding access to all internal fields + active bool // Flag indicating an active transfer + start time.Duration // Transfer start time (clock() value) + pStartAt time.Time // time of process start + bytes int64 // Total number of bytes transferred + samples int64 // Total number of samples taken rSample float64 // Most recent transfer rate sample (bytes per second) rEMA float64 // Exponential moving average of rSample @@ -46,21 +46,22 @@ type Monitor struct { // // The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, // respectively. -func New(sampleRate, windowSize time.Duration) *Monitor { +func New(startAt time.Time, sampleRate, windowSize time.Duration) *Monitor { if sampleRate = clockRound(sampleRate); sampleRate <= 0 { sampleRate = 5 * clockRate } if windowSize <= 0 { windowSize = 1 * time.Second } - now := clock() + now := clock(startAt) return &Monitor{ - active: true, - start: now, - rWindow: windowSize.Seconds(), - sLast: now, - sRate: sampleRate, - tLast: now, + active: true, + start: now, + rWindow: windowSize.Seconds(), + sLast: now, + sRate: sampleRate, + tLast: now, + pStartAt: startAt, } } @@ -130,7 +131,7 @@ func (m *Monitor) Status() Status { now := m.update(0) s := Status{ Active: m.active, - Start: clockToTime(m.start), + Start: m.pStartAt.Add(m.start), Duration: m.sLast - m.start, Idle: now - m.tLast, Bytes: m.bytes, @@ -223,7 +224,7 @@ func (m *Monitor) update(n int) (now time.Duration) { if !m.active { return } - if now = clock(); n > 0 { + if now = clock(m.pStartAt); n > 0 { m.tLast = now } m.sBytes += int64(n) @@ -274,3 +275,15 @@ func (m *Monitor) waitNextSample(now time.Duration) time.Duration { } return now } + +// CurrentTransferRate returns the current transfer rate +func (m *Monitor) CurrentTransferRate() int64 { + m.mu.Lock() + defer m.mu.Unlock() + + if m.sLast > m.start && m.active { + return round(m.rEMA) + } + + return 0 +} diff --git a/internal/libs/flowrate/io.go b/internal/libs/flowrate/io.go deleted file mode 100644 index fbe0909725..0000000000 --- a/internal/libs/flowrate/io.go +++ /dev/null @@ -1,133 +0,0 @@ -// -// Written by Maxim Khitrov (November 2012) -// - -package flowrate - -import ( - "errors" - "io" -) - -// ErrLimit is returned by the Writer when a non-blocking write is short due to -// the transfer rate limit. -var ErrLimit = errors.New("flowrate: flow rate limit exceeded") - -// Limiter is implemented by the Reader and Writer to provide a consistent -// interface for monitoring and controlling data transfer. -type Limiter interface { - Done() int64 - Status() Status - SetTransferSize(bytes int64) - SetLimit(new int64) (old int64) - SetBlocking(new bool) (old bool) -} - -// Reader implements io.ReadCloser with a restriction on the rate of data -// transfer. -type Reader struct { - io.Reader // Data source - *Monitor // Flow control monitor - - limit int64 // Rate limit in bytes per second (unlimited when <= 0) - block bool // What to do when no new bytes can be read due to the limit -} - -// NewReader restricts all Read operations on r to limit bytes per second. -func NewReader(r io.Reader, limit int64) *Reader { - return &Reader{r, New(0, 0), limit, true} -} - -// Read reads up to len(p) bytes into p without exceeding the current transfer -// rate limit. It returns (0, nil) immediately if r is non-blocking and no new -// bytes can be read at this time. -func (r *Reader) Read(p []byte) (n int, err error) { - p = p[:r.Limit(len(p), r.limit, r.block)] - if len(p) > 0 { - n, err = r.IO(r.Reader.Read(p)) - } - return -} - -// SetLimit changes the transfer rate limit to new bytes per second and returns -// the previous setting. -func (r *Reader) SetLimit(new int64) (old int64) { - old, r.limit = r.limit, new - return -} - -// SetBlocking changes the blocking behavior and returns the previous setting. A -// Read call on a non-blocking reader returns immediately if no additional bytes -// may be read at this time due to the rate limit. -func (r *Reader) SetBlocking(new bool) (old bool) { - old, r.block = r.block, new - return -} - -// Close closes the underlying reader if it implements the io.Closer interface. -func (r *Reader) Close() error { - defer r.Done() - if c, ok := r.Reader.(io.Closer); ok { - return c.Close() - } - return nil -} - -// Writer implements io.WriteCloser with a restriction on the rate of data -// transfer. -type Writer struct { - io.Writer // Data destination - *Monitor // Flow control monitor - - limit int64 // Rate limit in bytes per second (unlimited when <= 0) - block bool // What to do when no new bytes can be written due to the limit -} - -// NewWriter restricts all Write operations on w to limit bytes per second. The -// transfer rate and the default blocking behavior (true) can be changed -// directly on the returned *Writer. -func NewWriter(w io.Writer, limit int64) *Writer { - return &Writer{w, New(0, 0), limit, true} -} - -// Write writes len(p) bytes from p to the underlying data stream without -// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is -// non-blocking and no additional bytes can be written at this time. -func (w *Writer) Write(p []byte) (n int, err error) { - var c int - for len(p) > 0 && err == nil { - s := p[:w.Limit(len(p), w.limit, w.block)] - if len(s) > 0 { - c, err = w.IO(w.Writer.Write(s)) - } else { - return n, ErrLimit - } - p = p[c:] - n += c - } - return -} - -// SetLimit changes the transfer rate limit to new bytes per second and returns -// the previous setting. -func (w *Writer) SetLimit(new int64) (old int64) { - old, w.limit = w.limit, new - return -} - -// SetBlocking changes the blocking behavior and returns the previous setting. A -// Write call on a non-blocking writer returns as soon as no additional bytes -// may be written at this time due to the rate limit. -func (w *Writer) SetBlocking(new bool) (old bool) { - old, w.block = w.block, new - return -} - -// Close closes the underlying writer if it implements the io.Closer interface. -func (w *Writer) Close() error { - defer w.Done() - if c, ok := w.Writer.(io.Closer); ok { - return c.Close() - } - return nil -} diff --git a/internal/libs/flowrate/io_test.go b/internal/libs/flowrate/io_test.go deleted file mode 100644 index 4d7de417e4..0000000000 --- a/internal/libs/flowrate/io_test.go +++ /dev/null @@ -1,197 +0,0 @@ -// -// Written by Maxim Khitrov (November 2012) -// - -package flowrate - -import ( - "bytes" - "testing" - "time" -) - -const ( - _50ms = 50 * time.Millisecond - _100ms = 100 * time.Millisecond - _200ms = 200 * time.Millisecond - _300ms = 300 * time.Millisecond - _400ms = 400 * time.Millisecond - _500ms = 500 * time.Millisecond -) - -func nextStatus(m *Monitor) Status { - samples := m.samples - for i := 0; i < 30; i++ { - if s := m.Status(); s.Samples != samples { - return s - } - time.Sleep(5 * time.Millisecond) - } - return m.Status() -} - -func TestReader(t *testing.T) { - in := make([]byte, 100) - for i := range in { - in[i] = byte(i) - } - b := make([]byte, 100) - r := NewReader(bytes.NewReader(in), 100) - start := time.Now() - - // Make sure r implements Limiter - _ = Limiter(r) - - // 1st read of 10 bytes is performed immediately - if n, err := r.Read(b); n != 10 || err != nil { - t.Fatalf("r.Read(b) expected 10 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("r.Read(b) took too long (%v)", rt) - } - - // No new Reads allowed in the current sample - r.SetBlocking(false) - if n, err := r.Read(b); n != 0 || err != nil { - t.Fatalf("r.Read(b) expected 0 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("r.Read(b) took too long (%v)", rt) - } - - status := [6]Status{0: r.Status()} // No samples in the first status - - // 2nd read of 10 bytes blocks until the next sample - r.SetBlocking(true) - if n, err := r.Read(b[10:]); n != 10 || err != nil { - t.Fatalf("r.Read(b[10:]) expected 10 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt < _100ms { - t.Fatalf("r.Read(b[10:]) returned ahead of time (%v)", rt) - } - - status[1] = r.Status() // 1st sample - status[2] = nextStatus(r.Monitor) // 2nd sample - status[3] = nextStatus(r.Monitor) // No activity for the 3rd sample - - if n := r.Done(); n != 20 { - t.Fatalf("r.Done() expected 20; got %v", n) - } - - status[4] = r.Status() - status[5] = nextStatus(r.Monitor) // Timeout - start = status[0].Start - - // Active, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, Start, Duration, Idle, TimeRem, Progress - want := []Status{ - {start, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true}, - {start, 10, 1, 100, 100, 100, 100, 0, _100ms, 0, 0, 0, true}, - {start, 20, 2, 100, 100, 100, 100, 0, _200ms, _100ms, 0, 0, true}, - {start, 20, 3, 0, 90, 67, 100, 0, _300ms, _200ms, 0, 0, true}, - {start, 20, 3, 0, 0, 67, 100, 0, _300ms, 0, 0, 0, false}, - {start, 20, 3, 0, 0, 67, 100, 0, _300ms, 0, 0, 0, false}, - } - for i, s := range status { - s := s - if !statusesAreEqual(&s, &want[i]) { - t.Errorf("r.Status(%v)\nexpected: %v\ngot : %v", i, want[i], s) - } - } - if !bytes.Equal(b[:20], in[:20]) { - t.Errorf("r.Read() input doesn't match output") - } -} - -func TestWriter(t *testing.T) { - b := make([]byte, 100) - for i := range b { - b[i] = byte(i) - } - w := NewWriter(&bytes.Buffer{}, 200) - start := time.Now() - - // Make sure w implements Limiter - _ = Limiter(w) - - // Non-blocking 20-byte write for the first sample returns ErrLimit - w.SetBlocking(false) - if n, err := w.Write(b); n != 20 || err != ErrLimit { - t.Fatalf("w.Write(b) expected 20 (ErrLimit); got %v (%v)", n, err) - } else if rt := time.Since(start); rt > _50ms { - t.Fatalf("w.Write(b) took too long (%v)", rt) - } - - // Blocking 80-byte write - w.SetBlocking(true) - if n, err := w.Write(b[20:]); n != 80 || err != nil { - t.Fatalf("w.Write(b[20:]) expected 80 (); got %v (%v)", n, err) - } else if rt := time.Since(start); rt < _300ms { - // Explanation for `rt < _300ms` (as opposed to `< _400ms`) - // - // |<-- start | | - // epochs: -----0ms|---100ms|---200ms|---300ms|---400ms - // sends: 20|20 |20 |20 |20# - // - // NOTE: The '#' symbol can thus happen before 400ms is up. - // Thus, we can only panic if rt < _300ms. - t.Fatalf("w.Write(b[20:]) returned ahead of time (%v)", rt) - } - - w.SetTransferSize(100) - status := []Status{w.Status(), nextStatus(w.Monitor)} - start = status[0].Start - - // Active, Bytes, Samples, InstRate, CurRate, AvgRate, PeakRate, BytesRem, Start, Duration, Idle, TimeRem, Progress - want := []Status{ - {start, 80, 4, 200, 200, 200, 200, 20, _400ms, 0, _100ms, 80000, true}, - {start, 100, 5, 200, 200, 200, 200, 0, _500ms, _100ms, 0, 100000, true}, - } - - for i, s := range status { - s := s - if !statusesAreEqual(&s, &want[i]) { - t.Errorf("w.Status(%v)\nexpected: %v\ngot : %v\n", i, want[i], s) - } - } - if !bytes.Equal(b, w.Writer.(*bytes.Buffer).Bytes()) { - t.Errorf("w.Write() input doesn't match output") - } -} - -const maxDeviationForDuration = 50 * time.Millisecond -const maxDeviationForRate int64 = 50 - -// statusesAreEqual returns true if s1 is equal to s2. Equality here means -// general equality of fields except for the duration and rates, which can -// drift due to unpredictable delays (e.g. thread wakes up 25ms after -// `time.Sleep` has ended). -func statusesAreEqual(s1 *Status, s2 *Status) bool { - if s1.Active == s2.Active && - s1.Start == s2.Start && - durationsAreEqual(s1.Duration, s2.Duration, maxDeviationForDuration) && - s1.Idle == s2.Idle && - s1.Bytes == s2.Bytes && - s1.Samples == s2.Samples && - ratesAreEqual(s1.InstRate, s2.InstRate, maxDeviationForRate) && - ratesAreEqual(s1.CurRate, s2.CurRate, maxDeviationForRate) && - ratesAreEqual(s1.AvgRate, s2.AvgRate, maxDeviationForRate) && - ratesAreEqual(s1.PeakRate, s2.PeakRate, maxDeviationForRate) && - s1.BytesRem == s2.BytesRem && - durationsAreEqual(s1.TimeRem, s2.TimeRem, maxDeviationForDuration) && - s1.Progress == s2.Progress { - return true - } - return false -} - -func durationsAreEqual(d1 time.Duration, d2 time.Duration, maxDeviation time.Duration) bool { - return d2-d1 <= maxDeviation -} - -func ratesAreEqual(r1 int64, r2 int64, maxDeviation int64) bool { - sub := r1 - r2 - if sub < 0 { - sub = -sub - } - if sub <= maxDeviation { - return true - } - return false -} diff --git a/internal/libs/flowrate/util.go b/internal/libs/flowrate/util.go index b33ddc7013..ef66f77e53 100644 --- a/internal/libs/flowrate/util.go +++ b/internal/libs/flowrate/util.go @@ -13,18 +13,9 @@ import ( // clockRate is the resolution and precision of clock(). const clockRate = 20 * time.Millisecond -// czero is the process start time rounded down to the nearest clockRate -// increment. -var czero = time.Now().Round(clockRate) - // clock returns a low resolution timestamp relative to the process start time. -func clock() time.Duration { - return time.Now().Round(clockRate).Sub(czero) -} - -// clockToTime converts a clock() timestamp to an absolute time.Time value. -func clockToTime(c time.Duration) time.Time { - return czero.Add(c) +func clock(startAt time.Time) time.Duration { + return time.Now().Round(clockRate).Sub(startAt) } // clockRound returns d rounded to the nearest clockRate increment. diff --git a/internal/libs/protoio/io_test.go b/internal/libs/protoio/io_test.go index a84b34c002..4420ad7863 100644 --- a/internal/libs/protoio/io_test.go +++ b/internal/libs/protoio/io_test.go @@ -44,7 +44,8 @@ import ( "github.com/tendermint/tendermint/internal/libs/protoio" ) -func iotest(writer protoio.WriteCloser, reader protoio.ReadCloser) error { +func iotest(t *testing.T, writer protoio.WriteCloser, reader protoio.ReadCloser) error { + t.Helper() varint := make([]byte, binary.MaxVarintLen64) size := 1000 msgs := make([]*test.NinOptNative, size) @@ -94,9 +95,7 @@ func iotest(writer protoio.WriteCloser, reader protoio.ReadCloser) error { } i++ } - if i != size { - panic("not enough messages read") - } + require.Equal(t, size, i, "messages read ≠ messages written") if err := reader.Close(); err != nil { return err } @@ -121,7 +120,7 @@ func TestVarintNormal(t *testing.T) { buf := newBuffer() writer := protoio.NewDelimitedWriter(buf) reader := protoio.NewDelimitedReader(buf, 1024*1024) - err := iotest(writer, reader) + err := iotest(t, writer, reader) require.NoError(t, err) require.True(t, buf.closed, "did not close buffer") } @@ -130,7 +129,7 @@ func TestVarintNoClose(t *testing.T) { buf := bytes.NewBuffer(nil) writer := protoio.NewDelimitedWriter(buf) reader := protoio.NewDelimitedReader(buf, 1024*1024) - err := iotest(writer, reader) + err := iotest(t, writer, reader) require.NoError(t, err) } @@ -139,7 +138,7 @@ func TestVarintMaxSize(t *testing.T) { buf := newBuffer() writer := protoio.NewDelimitedWriter(buf) reader := protoio.NewDelimitedReader(buf, 20) - err := iotest(writer, reader) + err := iotest(t, writer, reader) require.Error(t, err) } diff --git a/internal/libs/protoio/writer_test.go b/internal/libs/protoio/writer_test.go index c3093d7bd7..520bac1f6b 100644 --- a/internal/libs/protoio/writer_test.go +++ b/internal/libs/protoio/writer_test.go @@ -6,25 +6,28 @@ import ( "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/libs/protoio" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) -func aVote() *types.Vote { +func aVote(t testing.TB) *types.Vote { + t.Helper() + return &types.Vote{ Type: tmproto.SignedMsgType(byte(tmproto.PrevoteType)), Height: 12345, Round: 2, BlockID: types.BlockID{ - Hash: tmhash.Sum([]byte("blockID_hash")), + Hash: crypto.Checksum([]byte("blockID_hash")), PartSetHeader: types.PartSetHeader{ Total: 1000000, - Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + Hash: crypto.Checksum([]byte("blockID_part_set_header_hash")), }, }, - ValidatorIndex: 56789, + ValidatorProTxHash: crypto.RandProTxHash(), + ValidatorIndex: 56789, } } @@ -49,14 +52,14 @@ var sink interface{} func BenchmarkMarshalDelimitedWithMarshalTo(b *testing.B) { msgs := []proto.Message{ - aVote().ToProto(), + aVote(b).ToProto(), } benchmarkMarshalDelimited(b, msgs) } func BenchmarkMarshalDelimitedNoMarshalTo(b *testing.B) { msgs := []proto.Message{ - &excludedMarshalTo{aVote().ToProto()}, + &excludedMarshalTo{aVote(b).ToProto()}, } benchmarkMarshalDelimited(b, msgs) } diff --git a/internal/libs/queue/queue.go b/internal/libs/queue/queue.go new file mode 100644 index 0000000000..7b4199504b --- /dev/null +++ b/internal/libs/queue/queue.go @@ -0,0 +1,232 @@ +// Package queue implements a dynamic FIFO queue with a fixed upper bound +// and a flexible quota mechanism to handle bursty load. +package queue + +import ( + "context" + "errors" + "sync" +) + +var ( + // ErrQueueFull is returned by the Add method of a queue when the queue has + // reached its hard capacity limit. + ErrQueueFull = errors.New("queue is full") + + // ErrNoCredit is returned by the Add method of a queue when the queue has + // exceeded its soft quota and there is insufficient burst credit. + ErrNoCredit = errors.New("insufficient burst credit") + + // ErrQueueClosed is returned by the Add method of a closed queue, and by + // the Wait method of a closed empty queue. + ErrQueueClosed = errors.New("queue is closed") + + // Sentinel errors reported by the New constructor. + errHardLimit = errors.New("hard limit must be > 0 and ≥ soft quota") + errBurstCredit = errors.New("burst credit must be non-negative") +) + +// A Queue is a limited-capacity FIFO queue of arbitrary data items. +// +// A queue has a soft quota and a hard limit on the number of items that may be +// contained in the queue. Adding items in excess of the hard limit will fail +// unconditionally. +// +// For items in excess of the soft quota, a credit system applies: Each queue +// maintains a burst credit score. Adding an item in excess of the soft quota +// costs 1 unit of burst credit. If there is not enough burst credit, the add +// will fail. +// +// The initial burst credit is assigned when the queue is constructed. Removing +// items from the queue adds additional credit if the resulting queue length is +// less than the current soft quota. Burst credit is capped by the hard limit. +// +// A Queue is safe for concurrent use by multiple goroutines. +type Queue struct { + mu sync.Mutex // protects the fields below + + softQuota int // adjusted dynamically (see Add, Remove) + hardLimit int // fixed for the lifespan of the queue + queueLen int // number of entries in the queue list + credit float64 // current burst credit + + closed bool + nempty *sync.Cond + back *entry + front *entry + + // The queue is singly-linked. Front points to the sentinel and back points + // to the newest entry. The oldest entry is front.link if it exists. +} + +// New constructs a new empty queue with the specified options. It reports an +// error if any of the option values are invalid. +func New(opts Options) (*Queue, error) { + if opts.HardLimit <= 0 || opts.HardLimit < opts.SoftQuota { + return nil, errHardLimit + } + if opts.BurstCredit < 0 { + return nil, errBurstCredit + } + if opts.SoftQuota <= 0 { + opts.SoftQuota = opts.HardLimit + } + if opts.BurstCredit == 0 { + opts.BurstCredit = float64(opts.SoftQuota) + } + sentinel := new(entry) + q := &Queue{ + softQuota: opts.SoftQuota, + hardLimit: opts.HardLimit, + credit: opts.BurstCredit, + back: sentinel, + front: sentinel, + } + q.nempty = sync.NewCond(&q.mu) + return q, nil +} + +// Add adds item to the back of the queue. It reports an error and does not +// enqueue the item if the queue is full or closed, or if it exceeds its soft +// quota and there is not enough burst credit. +func (q *Queue) Add(item interface{}) error { + q.mu.Lock() + defer q.mu.Unlock() + + if q.closed { + return ErrQueueClosed + } + + if q.queueLen >= q.softQuota { + if q.queueLen == q.hardLimit { + return ErrQueueFull + } else if q.credit < 1 { + return ErrNoCredit + } + + // Successfully exceeding the soft quota deducts burst credit and raises + // the soft quota. This has the effect of reducing the credit cap and the + // amount of credit given for removing items to better approximate the + // rate at which the consumer is servicing the queue. + q.credit-- + q.softQuota = q.queueLen + 1 + } + e := &entry{item: item} + q.back.link = e + q.back = e + q.queueLen++ + if q.queueLen == 1 { // was empty + q.nempty.Signal() + } + return nil +} + +// Remove removes and returns the frontmost (oldest) item in the queue and +// reports whether an item was available. If the queue is empty, Remove +// returns nil, false. +func (q *Queue) Remove() (interface{}, bool) { + q.mu.Lock() + defer q.mu.Unlock() + + if q.queueLen == 0 { + return nil, false + } + return q.popFront(), true +} + +// Wait blocks until q is non-empty or closed, and then returns the frontmost +// (oldest) item from the queue. If ctx ends before an item is available, Wait +// returns a nil value and a context error. If the queue is closed while it is +// still empty, Wait returns nil, ErrQueueClosed. +func (q *Queue) Wait(ctx context.Context) (interface{}, error) { + // If the context terminates, wake the waiter. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + go func() { <-ctx.Done(); q.nempty.Broadcast() }() + + q.mu.Lock() + defer q.mu.Unlock() + + for q.queueLen == 0 { + if q.closed { + return nil, ErrQueueClosed + } + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + q.nempty.Wait() + } + } + return q.popFront(), nil +} + +// Close closes the queue. After closing, any further Add calls will report an +// error, but items that were added to the queue prior to closing will still be +// available for Remove and Wait. Wait will report an error without blocking if +// it is called on a closed, empty queue. +func (q *Queue) Close() error { + q.mu.Lock() + defer q.mu.Unlock() + q.closed = true + q.nempty.Broadcast() + return nil +} + +// popFront removes the frontmost item of q and returns its value after +// updating quota and credit settings. +// +// Preconditions: The caller holds q.mu and q is not empty. +func (q *Queue) popFront() interface{} { + e := q.front.link + q.front.link = e.link + if e == q.back { + q.back = q.front + } + q.queueLen-- + + if q.queueLen < q.softQuota { + // Successfully removing items from the queue below half the soft quota + // lowers the soft quota. This has the effect of increasing the credit cap + // and the amount of credit given for removing items to better approximate + // the rate at which the consumer is servicing the queue. + if q.softQuota > 1 && q.queueLen < q.softQuota/2 { + q.softQuota-- + } + + // Give credit for being below the soft quota. Note we do this after + // adjusting the quota so the credit reflects the item we just removed. + q.credit += float64(q.softQuota-q.queueLen) / float64(q.softQuota) + if cap := float64(q.hardLimit - q.softQuota); q.credit > cap { + q.credit = cap + } + } + + return e.item +} + +// Options are the initial settings for a Queue. +type Options struct { + // The maximum number of items the queue will ever be permitted to hold. + // This value must be positive, and greater than or equal to SoftQuota. The + // hard limit is fixed and does not change as the queue is used. + // + // The hard limit should be chosen to exceed the largest burst size expected + // under normal operating conditions. + HardLimit int + + // The initial expected maximum number of items the queue should contain on + // an average workload. If this value is zero, it is initialized to the hard + // limit. The soft quota is adjusted from the initial value dynamically as + // the queue is used. + SoftQuota int + + // The initial burst credit score. This value must be greater than or equal + // to zero. If it is zero, the soft quota is used. + BurstCredit float64 +} + +type entry struct { + item interface{} + link *entry +} diff --git a/internal/libs/queue/queue_test.go b/internal/libs/queue/queue_test.go new file mode 100644 index 0000000000..08ecc3955b --- /dev/null +++ b/internal/libs/queue/queue_test.go @@ -0,0 +1,194 @@ +package queue + +import ( + "context" + "testing" + "time" +) + +func TestNew(t *testing.T) { + tests := []struct { + desc string + opts Options + want error + }{ + {"empty options", Options{}, errHardLimit}, + {"zero limit negative quota", Options{SoftQuota: -1}, errHardLimit}, + {"zero limit and quota", Options{SoftQuota: 0}, errHardLimit}, + {"zero limit", Options{SoftQuota: 1, HardLimit: 0}, errHardLimit}, + {"limit less than quota", Options{SoftQuota: 5, HardLimit: 3}, errHardLimit}, + {"negative credit", Options{SoftQuota: 1, HardLimit: 1, BurstCredit: -6}, errBurstCredit}, + {"valid default credit", Options{SoftQuota: 1, HardLimit: 2, BurstCredit: 0}, nil}, + {"valid explicit credit", Options{SoftQuota: 1, HardLimit: 5, BurstCredit: 10}, nil}, + } + + for _, test := range tests { + t.Run(test.desc, func(t *testing.T) { + got, err := New(test.opts) + if err != test.want { + t.Errorf("New(%+v): got (%+v, %v), want err=%v", test.opts, got, err, test.want) + } + }) + } +} + +type testQueue struct { + t *testing.T + *Queue +} + +func (q testQueue) mustAdd(item string) { + q.t.Helper() + if err := q.Add(item); err != nil { + q.t.Errorf("Add(%q): unexpected error: %v", item, err) + } +} + +func (q testQueue) mustRemove(want string) { + q.t.Helper() + got, ok := q.Remove() + if !ok { + q.t.Error("Remove: queue is empty") + } else if got.(string) != want { + q.t.Errorf("Remove: got %q, want %q", got, want) + } +} + +func mustQueue(t *testing.T, opts Options) testQueue { + t.Helper() + + q, err := New(opts) + if err != nil { + t.Fatalf("New(%+v): unexpected error: %v", opts, err) + } + return testQueue{t: t, Queue: q} +} + +func TestHardLimit(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 1, HardLimit: 1}) + q.mustAdd("foo") + if err := q.Add("bar"); err != ErrQueueFull { + t.Errorf("Add: got err=%v, want %v", err, ErrQueueFull) + } +} + +func TestSoftQuota(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 1, HardLimit: 4}) + q.mustAdd("foo") + q.mustAdd("bar") + if err := q.Add("baz"); err != ErrNoCredit { + t.Errorf("Add: got err=%v, want %v", err, ErrNoCredit) + } +} + +func TestBurstCredit(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 2, HardLimit: 5}) + q.mustAdd("foo") + q.mustAdd("bar") + + // We should still have all our initial credit. + if q.credit < 2 { + t.Errorf("Wrong credit: got %f, want ≥ 2", q.credit) + } + + // Removing an item below soft quota should increase our credit. + q.mustRemove("foo") + if q.credit <= 2 { + t.Errorf("wrong credit: got %f, want > 2", q.credit) + } + + // Credit should be capped by the hard limit. + q.mustRemove("bar") + q.mustAdd("baz") + q.mustRemove("baz") + if cap := float64(q.hardLimit - q.softQuota); q.credit > cap { + t.Errorf("Wrong credit: got %f, want ≤ %f", q.credit, cap) + } +} + +func TestClose(t *testing.T) { + q := mustQueue(t, Options{SoftQuota: 2, HardLimit: 10}) + q.mustAdd("alpha") + q.mustAdd("bravo") + q.mustAdd("charlie") + q.Close() + + // After closing the queue, subsequent writes should fail. + if err := q.Add("foxtrot"); err == nil { + t.Error("Add should have failed after Close") + } + + // However, the remaining contents of the queue should still work. + q.mustRemove("alpha") + q.mustRemove("bravo") + q.mustRemove("charlie") +} + +func TestWait(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + q := mustQueue(t, Options{SoftQuota: 2, HardLimit: 2}) + + // A wait on an empty queue should time out. + t.Run("WaitTimeout", func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + got, err := q.Wait(ctx) + if err == nil { + t.Errorf("Wait: got %v, want error", got) + } else { + t.Logf("Wait correctly failed: %v", err) + } + }) + + // A wait on a non-empty queue should report an item. + t.Run("WaitNonEmpty", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const input = "figgy pudding" + q.mustAdd(input) + + got, err := q.Wait(ctx) + if err != nil { + t.Errorf("Wait: unexpected error: %v", err) + } else if got != input { + t.Errorf("Wait: got %q, want %q", got, input) + } + }) + + // Wait should block until an item arrives. + t.Run("WaitOnEmpty", func(t *testing.T) { + const input = "fleet footed kittens" + + done := make(chan struct{}) + go func() { + defer close(done) + got, err := q.Wait(ctx) + if err != nil { + t.Errorf("Wait: unexpected error: %v", err) + } else if got != input { + t.Errorf("Wait: got %q, want %q", got, input) + } + }() + + q.mustAdd(input) + <-done + }) + + // Closing the queue unblocks a wait. + t.Run("UnblockOnClose", func(t *testing.T) { + done := make(chan struct{}) + go func() { + defer close(done) + got, err := q.Wait(ctx) + if err != ErrQueueClosed { + t.Errorf("Wait: got (%v, %v), want %v", got, err, ErrQueueClosed) + } + }() + + q.Close() + <-done + }) +} diff --git a/internal/libs/sync/closer.go b/internal/libs/sync/closer.go deleted file mode 100644 index 815ee1e803..0000000000 --- a/internal/libs/sync/closer.go +++ /dev/null @@ -1,31 +0,0 @@ -package sync - -import "sync" - -// Closer implements a primitive to close a channel that signals process -// termination while allowing a caller to call Close multiple times safely. It -// should be used in cases where guarantees cannot be made about when and how -// many times closure is executed. -type Closer struct { - closeOnce sync.Once - doneCh chan struct{} -} - -// NewCloser returns a reference to a new Closer. -func NewCloser() *Closer { - return &Closer{doneCh: make(chan struct{})} -} - -// Done returns the internal done channel allowing the caller either block or wait -// for the Closer to be terminated/closed. -func (c *Closer) Done() <-chan struct{} { - return c.doneCh -} - -// Close gracefully closes the Closer. A caller should only call Close once, but -// it is safe to call it successive times. -func (c *Closer) Close() { - c.closeOnce.Do(func() { - close(c.doneCh) - }) -} diff --git a/internal/libs/sync/closer_test.go b/internal/libs/sync/closer_test.go deleted file mode 100644 index aea9152159..0000000000 --- a/internal/libs/sync/closer_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package sync_test - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -func TestCloser(t *testing.T) { - closer := tmsync.NewCloser() - - var timeout bool - - select { - case <-closer.Done(): - case <-time.After(time.Second): - timeout = true - } - - for i := 0; i < 10; i++ { - closer.Close() - } - - require.True(t, timeout) - <-closer.Done() -} diff --git a/internal/libs/sync/deadlock.go b/internal/libs/sync/deadlock.go deleted file mode 100644 index 21b5130ba4..0000000000 --- a/internal/libs/sync/deadlock.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build deadlock -// +build deadlock - -package sync - -import ( - deadlock "github.com/sasha-s/go-deadlock" -) - -// A Mutex is a mutual exclusion lock. -type Mutex struct { - deadlock.Mutex -} - -// An RWMutex is a reader/writer mutual exclusion lock. -type RWMutex struct { - deadlock.RWMutex -} diff --git a/internal/libs/sync/sync.go b/internal/libs/sync/sync.go deleted file mode 100644 index c6e7101c60..0000000000 --- a/internal/libs/sync/sync.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !deadlock -// +build !deadlock - -package sync - -import "sync" - -// A Mutex is a mutual exclusion lock. -type Mutex struct { - sync.Mutex -} - -// An RWMutex is a reader/writer mutual exclusion lock. -type RWMutex struct { - sync.RWMutex -} diff --git a/internal/libs/tempfile/tempfile.go b/internal/libs/tempfile/tempfile.go index 0c594bb20f..e30d5a8c60 100644 --- a/internal/libs/tempfile/tempfile.go +++ b/internal/libs/tempfile/tempfile.go @@ -7,9 +7,8 @@ import ( "path/filepath" "strconv" "strings" + "sync" "time" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) const ( @@ -32,7 +31,7 @@ const ( var ( atomicWriteFileRand uint64 - atomicWriteFileRandMu tmsync.Mutex + atomicWriteFileRandMu sync.Mutex ) func writeFileRandReseed() uint64 { diff --git a/internal/libs/tempfile/tempfile_test.go b/internal/libs/tempfile/tempfile_test.go index 5650fe7208..aee540c591 100644 --- a/internal/libs/tempfile/tempfile_test.go +++ b/internal/libs/tempfile/tempfile_test.go @@ -5,10 +5,9 @@ package tempfile import ( "bytes" "fmt" - "io/ioutil" mrand "math/rand" "os" - testing "testing" + "testing" "github.com/stretchr/testify/require" @@ -22,13 +21,13 @@ func TestWriteFileAtomic(t *testing.T) { perm os.FileMode = 0600 ) - f, err := ioutil.TempFile("/tmp", "write-atomic-test-") + f, err := os.CreateTemp(t.TempDir(), "write-atomic-test-") if err != nil { t.Fatal(err) } defer os.Remove(f.Name()) - if err = ioutil.WriteFile(f.Name(), old, 0600); err != nil { + if err = os.WriteFile(f.Name(), old, 0600); err != nil { t.Fatal(err) } @@ -36,7 +35,7 @@ func TestWriteFileAtomic(t *testing.T) { t.Fatal(err) } - rData, err := ioutil.ReadFile(f.Name()) + rData, err := os.ReadFile(f.Name()) if err != nil { t.Fatal(err) } @@ -81,11 +80,11 @@ func TestWriteFileAtomicDuplicateFile(t *testing.T) { err = WriteFileAtomic(fileToWrite, []byte(expectedString), 0777) require.NoError(t, err) // Check that the first atomic file was untouched - firstAtomicFileBytes, err := ioutil.ReadFile(fname) + firstAtomicFileBytes, err := os.ReadFile(fname) require.NoError(t, err, "Error reading first atomic file") require.Equal(t, []byte(testString), firstAtomicFileBytes, "First atomic file was overwritten") // Check that the resultant file is correct - resultantFileBytes, err := ioutil.ReadFile(fileToWrite) + resultantFileBytes, err := os.ReadFile(fileToWrite) require.NoError(t, err, "Error reading resultant file") require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes") @@ -115,7 +114,7 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) { fileRand := randWriteFileSuffix() fname := "/tmp/" + atomicWriteFilePrefix + fileRand f, err := os.OpenFile(fname, atomicWriteFileFlag, 0777) - require.Nil(t, err) + require.NoError(t, err) _, err = f.WriteString(fmt.Sprintf(testString, i)) require.NoError(t, err) defer os.Remove(fname) @@ -132,14 +131,14 @@ func TestWriteFileAtomicManyDuplicates(t *testing.T) { for i := 0; i < atomicWriteFileMaxNumConflicts+2; i++ { fileRand := randWriteFileSuffix() fname := "/tmp/" + atomicWriteFilePrefix + fileRand - firstAtomicFileBytes, err := ioutil.ReadFile(fname) - require.Nil(t, err, "Error reading first atomic file") + firstAtomicFileBytes, err := os.ReadFile(fname) + require.NoError(t, err, "Error reading first atomic file") require.Equal(t, []byte(fmt.Sprintf(testString, i)), firstAtomicFileBytes, "atomic write file %d was overwritten", i) } // Check that the resultant file is correct - resultantFileBytes, err := ioutil.ReadFile(fileToWrite) - require.Nil(t, err, "Error reading resultant file") + resultantFileBytes, err := os.ReadFile(fileToWrite) + require.NoError(t, err, "Error reading resultant file") require.Equal(t, []byte(expectedString), resultantFileBytes, "Written file had incorrect bytes") } diff --git a/internal/libs/timer/throttle_timer.go b/internal/libs/timer/throttle_timer.go index 3f21e3cc04..7bf86e80c1 100644 --- a/internal/libs/timer/throttle_timer.go +++ b/internal/libs/timer/throttle_timer.go @@ -1,9 +1,8 @@ package timer import ( + "sync" "time" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) /* @@ -18,7 +17,7 @@ type ThrottleTimer struct { quit chan struct{} dur time.Duration - mtx tmsync.Mutex + mtx sync.Mutex timer *time.Timer isSet bool } @@ -56,13 +55,6 @@ func (t *ThrottleTimer) Set() { } } -func (t *ThrottleTimer) Unset() { - t.mtx.Lock() - defer t.mtx.Unlock() - t.isSet = false - t.timer.Stop() -} - // For ease of .Stop()'ing services before .Start()'ing them, // we ignore .Stop()'s on nil ThrottleTimers func (t *ThrottleTimer) Stop() bool { diff --git a/internal/libs/timer/throttle_timer_test.go b/internal/libs/timer/throttle_timer_test.go index a56dcadfd0..7ea392c3a4 100644 --- a/internal/libs/timer/throttle_timer_test.go +++ b/internal/libs/timer/throttle_timer_test.go @@ -1,19 +1,18 @@ package timer import ( + "sync" "testing" "time" // make govet noshadow happy... asrt "github.com/stretchr/testify/assert" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" ) type thCounter struct { input chan struct{} - mtx tmsync.Mutex + mtx sync.Mutex count int } diff --git a/internal/mempool/cache.go b/internal/mempool/cache.go index 3cd45d2bc5..c69fc80dd4 100644 --- a/internal/mempool/cache.go +++ b/internal/mempool/cache.go @@ -2,8 +2,8 @@ package mempool import ( "container/list" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/types" ) @@ -29,7 +29,7 @@ var _ TxCache = (*LRUTxCache)(nil) // LRUTxCache maintains a thread-safe LRU cache of raw transactions. The cache // only stores the hash of the raw transaction. type LRUTxCache struct { - mtx tmsync.Mutex + mtx sync.Mutex size int cacheMap map[types.TxKey]*list.Element list *list.List diff --git a/internal/mempool/ids.go b/internal/mempool/ids.go index 1bb03ceaf8..8b171e48a5 100644 --- a/internal/mempool/ids.go +++ b/internal/mempool/ids.go @@ -2,22 +2,20 @@ package mempool import ( "fmt" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/types" ) -// nolint: revive -// TODO: Rename type. -type MempoolIDs struct { - mtx tmsync.RWMutex +type IDs struct { + mtx sync.RWMutex peerMap map[types.NodeID]uint16 nextID uint16 // assumes that a node will never have over 65536 active peers activeIDs map[uint16]struct{} // used to check if a given peerID key is used } -func NewMempoolIDs() *MempoolIDs { - return &MempoolIDs{ +func NewMempoolIDs() *IDs { + return &IDs{ peerMap: make(map[types.NodeID]uint16), // reserve UnknownPeerID for mempoolReactor.BroadcastTx @@ -28,17 +26,22 @@ func NewMempoolIDs() *MempoolIDs { // ReserveForPeer searches for the next unused ID and assigns it to the provided // peer. -func (ids *MempoolIDs) ReserveForPeer(peerID types.NodeID) { +func (ids *IDs) ReserveForPeer(peerID types.NodeID) { ids.mtx.Lock() defer ids.mtx.Unlock() + if _, ok := ids.peerMap[peerID]; ok { + // the peer has been reserved + return + } + curID := ids.nextPeerID() ids.peerMap[peerID] = curID ids.activeIDs[curID] = struct{}{} } // Reclaim returns the ID reserved for the peer back to unused pool. -func (ids *MempoolIDs) Reclaim(peerID types.NodeID) { +func (ids *IDs) Reclaim(peerID types.NodeID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -46,11 +49,14 @@ func (ids *MempoolIDs) Reclaim(peerID types.NodeID) { if ok { delete(ids.activeIDs, removedID) delete(ids.peerMap, peerID) + if removedID < ids.nextID { + ids.nextID = removedID + } } } // GetForPeer returns an ID reserved for the peer. -func (ids *MempoolIDs) GetForPeer(peerID types.NodeID) uint16 { +func (ids *IDs) GetForPeer(peerID types.NodeID) uint16 { ids.mtx.RLock() defer ids.mtx.RUnlock() @@ -59,7 +65,7 @@ func (ids *MempoolIDs) GetForPeer(peerID types.NodeID) uint16 { // nextPeerID returns the next unused peer ID to use. We assume that the mutex // is already held. -func (ids *MempoolIDs) nextPeerID() uint16 { +func (ids *IDs) nextPeerID() uint16 { if len(ids.activeIDs) == MaxActiveIDs { panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", MaxActiveIDs)) } diff --git a/internal/mempool/ids_test.go b/internal/mempool/ids_test.go index a398386275..6601706bcd 100644 --- a/internal/mempool/ids_test.go +++ b/internal/mempool/ids_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/types" ) @@ -12,12 +13,77 @@ func TestMempoolIDsBasic(t *testing.T) { peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) + require.EqualValues(t, 0, ids.GetForPeer(peerID)) ids.ReserveForPeer(peerID) require.EqualValues(t, 1, ids.GetForPeer(peerID)) + ids.Reclaim(peerID) + require.EqualValues(t, 0, ids.GetForPeer(peerID)) ids.ReserveForPeer(peerID) - require.EqualValues(t, 2, ids.GetForPeer(peerID)) - ids.Reclaim(peerID) + require.EqualValues(t, 1, ids.GetForPeer(peerID)) +} + +func TestMempoolIDsPeerDupReserve(t *testing.T) { + ids := NewMempoolIDs() + + peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") + require.NoError(t, err) + require.EqualValues(t, 0, ids.GetForPeer(peerID)) + + ids.ReserveForPeer(peerID) + require.EqualValues(t, 1, ids.GetForPeer(peerID)) + + ids.ReserveForPeer(peerID) + require.EqualValues(t, 1, ids.GetForPeer(peerID)) +} + +func TestMempoolIDs2Peers(t *testing.T) { + ids := NewMempoolIDs() + + peer1ID, _ := types.NewNodeID("0011223344556677889900112233445566778899") + require.EqualValues(t, 0, ids.GetForPeer(peer1ID)) + + ids.ReserveForPeer(peer1ID) + require.EqualValues(t, 1, ids.GetForPeer(peer1ID)) + + ids.Reclaim(peer1ID) + require.EqualValues(t, 0, ids.GetForPeer(peer1ID)) + + peer2ID, _ := types.NewNodeID("1011223344556677889900112233445566778899") + + ids.ReserveForPeer(peer2ID) + require.EqualValues(t, 1, ids.GetForPeer(peer2ID)) + + ids.ReserveForPeer(peer1ID) + require.EqualValues(t, 2, ids.GetForPeer(peer1ID)) +} + +func TestMempoolIDsNextExistID(t *testing.T) { + ids := NewMempoolIDs() + + peer1ID, _ := types.NewNodeID("0011223344556677889900112233445566778899") + ids.ReserveForPeer(peer1ID) + require.EqualValues(t, 1, ids.GetForPeer(peer1ID)) + + peer2ID, _ := types.NewNodeID("1011223344556677889900112233445566778899") + ids.ReserveForPeer(peer2ID) + require.EqualValues(t, 2, ids.GetForPeer(peer2ID)) + + peer3ID, _ := types.NewNodeID("2011223344556677889900112233445566778899") + ids.ReserveForPeer(peer3ID) + require.EqualValues(t, 3, ids.GetForPeer(peer3ID)) + + ids.Reclaim(peer1ID) + require.EqualValues(t, 0, ids.GetForPeer(peer1ID)) + + ids.Reclaim(peer3ID) + require.EqualValues(t, 0, ids.GetForPeer(peer3ID)) + + ids.ReserveForPeer(peer1ID) + require.EqualValues(t, 1, ids.GetForPeer(peer1ID)) + + ids.ReserveForPeer(peer3ID) + require.EqualValues(t, 3, ids.GetForPeer(peer3ID)) } diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index 6e3955dc3b..629fa0bdae 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -1,143 +1,843 @@ package mempool import ( + "bytes" "context" + "errors" "fmt" - "math" + "sync" + "sync/atomic" + "time" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/libs/clist" + "github.com/tendermint/tendermint/libs/log" + tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/types" ) -const ( - MempoolChannel = p2p.ChannelID(0x30) +var _ Mempool = (*TxMempool)(nil) - // PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind - PeerCatchupSleepIntervalMS = 100 +// TxMempoolOption sets an optional parameter on the TxMempool. +type TxMempoolOption func(*TxMempool) - // UnknownPeerID is the peer ID to use when running CheckTx when there is - // no peer (e.g. RPC) - UnknownPeerID uint16 = 0 +// TxMempool defines a prioritized mempool data structure used by the v1 mempool +// reactor. It keeps a thread-safe priority queue of transactions that is used +// when a block proposer constructs a block and a thread-safe linked-list that +// is used to gossip transactions to peers in a FIFO manner. +type TxMempool struct { + logger log.Logger + metrics *Metrics + config *config.MempoolConfig + proxyAppConn abciclient.Client - MaxActiveIDs = math.MaxUint16 -) + // txsAvailable fires once for each height when the mempool is not empty + txsAvailable chan struct{} + notifiedTxsAvailable bool -// Mempool defines the mempool interface. -// -// Updates to the mempool need to be synchronized with committing a block so -// applications can reset their transient state on Commit. -type Mempool interface { - // CheckTx executes a new transaction against the application to determine - // its validity and whether it should be added to the mempool. - CheckTx(ctx context.Context, tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error - - // RemoveTxByKey removes a transaction, identified by its key, - // from the mempool. - RemoveTxByKey(txKey types.TxKey) error - - // ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes - // bytes total with the condition that the total gasWanted must be less than - // maxGas. - // - // If both maxes are negative, there is no cap on the size of all returned - // transactions (~ all available transactions). - ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs + // height defines the last block height process during Update() + height int64 - // ReapMaxTxs reaps up to max transactions from the mempool. If max is - // negative, there is no cap on the size of all returned transactions - // (~ all available transactions). - ReapMaxTxs(max int) types.Txs + // sizeBytes defines the total size of the mempool (sum of all tx bytes) + sizeBytes int64 - // Lock locks the mempool. The consensus must be able to hold lock to safely - // update. - Lock() + // cache defines a fixed-size cache of already seen transactions as this + // reduces pressure on the proxyApp. + cache TxCache - // Unlock unlocks the mempool. - Unlock() + // txStore defines the main storage of valid transactions. Indexes are built + // on top of this store. + txStore *TxStore - // Update informs the mempool that the given txs were committed and can be - // discarded. - // - // NOTE: - // 1. This should be called *after* block is committed by consensus. - // 2. Lock/Unlock must be managed by the caller. - Update( - blockHeight int64, - blockTxs types.Txs, - deliverTxResponses []*abci.ResponseDeliverTx, - newPreFn PreCheckFunc, - newPostFn PostCheckFunc, - ) error - - // FlushAppConn flushes the mempool connection to ensure async callback calls - // are done, e.g. from CheckTx. + // gossipIndex defines the gossiping index of valid transactions via a + // thread-safe linked-list. We also use the gossip index as a cursor for + // rechecking transactions already in the mempool. + gossipIndex *clist.CList + + // recheckCursor and recheckEnd are used as cursors based on the gossip index + // to recheck transactions that are already in the mempool. Iteration is not + // thread-safe and transaction may be mutated in serial order. // - // NOTE: - // 1. Lock/Unlock must be managed by caller. - FlushAppConn() error + // XXX/TODO: It might be somewhat of a codesmell to use the gossip index for + // iterator and cursor management when rechecking transactions. If the gossip + // index changes or is removed in a future refactor, this will have to be + // refactored. Instead, we should consider just keeping a slice of a snapshot + // of the mempool's current transactions during Update and an integer cursor + // into that slice. This, however, requires additional O(n) space complexity. + recheckCursor *clist.CElement // next expected response + recheckEnd *clist.CElement // re-checking stops here - // Flush removes all transactions from the mempool and caches. - Flush() + // priorityIndex defines the priority index of valid transactions via a + // thread-safe priority queue. + priorityIndex *TxPriorityQueue - // TxsAvailable returns a channel which fires once for every height, and only - // when transactions are available in the mempool. - // - // NOTE: - // 1. The returned channel may be nil if EnableTxsAvailable was not called. - TxsAvailable() <-chan struct{} + // heightIndex defines a height-based, in ascending order, transaction index. + // i.e. older transactions are first. + heightIndex *WrappedTxList + + // timestampIndex defines a timestamp-based, in ascending order, transaction + // index. i.e. older transactions are first. + timestampIndex *WrappedTxList + + // A read/write lock is used to safe guard updates, insertions and deletions + // from the mempool. A read-lock is implicitly acquired when executing CheckTx, + // however, a caller must explicitly grab a write-lock via Lock when updating + // the mempool via Update(). + mtx sync.RWMutex + preCheck PreCheckFunc + postCheck PostCheckFunc +} + +func NewTxMempool( + logger log.Logger, + cfg *config.MempoolConfig, + proxyAppConn abciclient.Client, + options ...TxMempoolOption, +) *TxMempool { + + txmp := &TxMempool{ + logger: logger, + config: cfg, + proxyAppConn: proxyAppConn, + height: -1, + cache: NopTxCache{}, + metrics: NopMetrics(), + txStore: NewTxStore(), + gossipIndex: clist.New(), + priorityIndex: NewTxPriorityQueue(), + heightIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { + return wtx1.height >= wtx2.height + }), + timestampIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { + return wtx1.timestamp.After(wtx2.timestamp) || wtx1.timestamp.Equal(wtx2.timestamp) + }), + } + + if cfg.CacheSize > 0 { + txmp.cache = NewLRUTxCache(cfg.CacheSize) + } + + for _, opt := range options { + opt(txmp) + } + + return txmp +} + +// WithPreCheck sets a filter for the mempool to reject a transaction if f(tx) +// returns an error. This is executed before CheckTx. It only applies to the +// first created block. After that, Update() overwrites the existing value. +func WithPreCheck(f PreCheckFunc) TxMempoolOption { + return func(txmp *TxMempool) { txmp.preCheck = f } +} + +// WithPostCheck sets a filter for the mempool to reject a transaction if +// f(tx, resp) returns an error. This is executed after CheckTx. It only applies +// to the first created block. After that, Update overwrites the existing value. +func WithPostCheck(f PostCheckFunc) TxMempoolOption { + return func(txmp *TxMempool) { txmp.postCheck = f } +} + +// WithMetrics sets the mempool's metrics collector. +func WithMetrics(metrics *Metrics) TxMempoolOption { + return func(txmp *TxMempool) { txmp.metrics = metrics } +} + +// Lock obtains a write-lock on the mempool. A caller must be sure to explicitly +// release the lock when finished. +func (txmp *TxMempool) Lock() { + txmp.mtx.Lock() +} - // EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will - // trigger once every height when transactions are available. - EnableTxsAvailable() +// Unlock releases a write-lock on the mempool. +func (txmp *TxMempool) Unlock() { + txmp.mtx.Unlock() +} + +// Size returns the number of valid transactions in the mempool. It is +// thread-safe. +func (txmp *TxMempool) Size() int { + return txmp.txStore.Size() +} - // Size returns the number of transactions in the mempool. - Size() int +// SizeBytes return the total sum in bytes of all the valid transactions in the +// mempool. It is thread-safe. +func (txmp *TxMempool) SizeBytes() int64 { + return atomic.LoadInt64(&txmp.sizeBytes) +} + +// FlushAppConn executes FlushSync on the mempool's proxyAppConn. +// +// NOTE: The caller must obtain a write-lock prior to execution. +func (txmp *TxMempool) FlushAppConn(ctx context.Context) error { + return txmp.proxyAppConn.Flush(ctx) +} - // SizeBytes returns the total size of all txs in the mempool. - SizeBytes() int64 +// WaitForNextTx returns a blocking channel that will be closed when the next +// valid transaction is available to gossip. It is thread-safe. +func (txmp *TxMempool) WaitForNextTx() <-chan struct{} { + return txmp.gossipIndex.WaitChan() } -// PreCheckFunc is an optional filter executed before CheckTx and rejects -// transaction if false is returned. An example would be to ensure that a -// transaction doesn't exceeded the block size. -type PreCheckFunc func(types.Tx) error +// NextGossipTx returns the next valid transaction to gossip. A caller must wait +// for WaitForNextTx to signal a transaction is available to gossip first. It is +// thread-safe. +func (txmp *TxMempool) NextGossipTx() *clist.CElement { + return txmp.gossipIndex.Front() +} + +// EnableTxsAvailable enables the mempool to trigger events when transactions +// are available on a block by block basis. +func (txmp *TxMempool) EnableTxsAvailable() { + txmp.mtx.Lock() + defer txmp.mtx.Unlock() + + txmp.txsAvailable = make(chan struct{}, 1) +} + +// TxsAvailable returns a channel which fires once for every height, and only +// when transactions are available in the mempool. It is thread-safe. +func (txmp *TxMempool) TxsAvailable() <-chan struct{} { + return txmp.txsAvailable +} -// PostCheckFunc is an optional filter executed after CheckTx and rejects -// transaction if false is returned. An example would be to ensure a -// transaction doesn't require more gas than available for the block. -type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error +// CheckTx executes the ABCI CheckTx method for a given transaction. +// It acquires a read-lock and attempts to execute the application's +// CheckTx ABCI method synchronously. We return an error if any of +// the following happen: +// +// - The CheckTx execution fails. +// - The transaction already exists in the cache and we've already received the +// transaction from the peer. Otherwise, if it solely exists in the cache, we +// return nil. +// - The transaction size exceeds the maximum transaction size as defined by the +// configuration provided to the mempool. +// - The transaction fails Pre-Check (if it is defined). +// - The proxyAppConn fails, e.g. the buffer is full. +// +// If the mempool is full, we still execute CheckTx and attempt to find a lower +// priority transaction to evict. If such a transaction exists, we remove the +// lower priority transaction and add the new one with higher priority. +// +// NOTE: +// - The applications' CheckTx implementation may panic. +// - The caller is not to explicitly require any locks for executing CheckTx. +func (txmp *TxMempool) CheckTx( + ctx context.Context, + tx types.Tx, + cb func(*abci.ResponseCheckTx), + txInfo TxInfo, +) error { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() -// PreCheckMaxBytes checks that the size of the transaction is smaller or equal -// to the expected maxBytes. -func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { - return func(tx types.Tx) error { - txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx}) + if txSize := len(tx); txSize > txmp.config.MaxTxBytes { + return types.ErrTxTooLarge{ + Max: txmp.config.MaxTxBytes, + Actual: txSize, + } + } - if txSize > maxBytes { - return fmt.Errorf("tx size is too big: %d, max: %d", txSize, maxBytes) + if txmp.preCheck != nil { + if err := txmp.preCheck(tx); err != nil { + return types.ErrPreCheck{Reason: err} } + } + + if err := txmp.proxyAppConn.Error(); err != nil { + return err + } + + txHash := tx.Key() + + // We add the transaction to the mempool's cache and if the + // transaction is already present in the cache, i.e. false is returned, then we + // check if we've seen this transaction and error if we have. + if !txmp.cache.Push(tx) { + txmp.txStore.GetOrSetPeerByTxHash(txHash, txInfo.SenderID) + return types.ErrTxInCache + } + + res, err := txmp.proxyAppConn.CheckTx(ctx, &abci.RequestCheckTx{Tx: tx}) + if err != nil { + txmp.cache.Remove(tx) + return err + } + if txmp.recheckCursor != nil { + return errors.New("recheck cursor is non-nil") + } + + wtx := &WrappedTx{ + tx: tx, + hash: txHash, + timestamp: time.Now().UTC(), + height: txmp.height, + } + + txmp.defaultTxCallback(tx, res) + txmp.initTxCallback(wtx, res, txInfo) + + if cb != nil { + cb(res) + } + + return nil +} + +func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { + txmp.Lock() + defer txmp.Unlock() + + // remove the committed transaction from the transaction store and indexes + if wtx := txmp.txStore.GetTxByHash(txKey); wtx != nil { + txmp.removeTx(wtx, false) return nil } + + return errors.New("transaction not found") +} + +// Flush empties the mempool. It acquires a read-lock, fetches all the +// transactions currently in the transaction store and removes each transaction +// from the store and all indexes and finally resets the cache. +// +// NOTE: +// - Flushing the mempool may leave the mempool in an inconsistent state. +func (txmp *TxMempool) Flush() { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + txmp.heightIndex.Reset() + txmp.timestampIndex.Reset() + + for _, wtx := range txmp.txStore.GetAllTxs() { + txmp.removeTx(wtx, false) + } + + atomic.SwapInt64(&txmp.sizeBytes, 0) + txmp.cache.Reset() } -// PostCheckMaxGas checks that the wanted gas is smaller or equal to the passed -// maxGas. Returns nil if maxGas is -1. -func PostCheckMaxGas(maxGas int64) PostCheckFunc { - return func(tx types.Tx, res *abci.ResponseCheckTx) error { - if maxGas == -1 { - return nil +// ReapMaxBytesMaxGas returns a list of transactions within the provided size +// and gas constraints. Transaction are retrieved in priority order. +// +// NOTE: +// - Transactions returned are not removed from the mempool transaction +// store or indexes. +func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + var ( + totalGas int64 + totalSize int64 + ) + + // wTxs contains a list of *WrappedTx retrieved from the priority queue that + // need to be re-enqueued prior to returning. + wTxs := make([]*WrappedTx, 0, txmp.priorityIndex.NumTxs()) + defer func() { + for _, wtx := range wTxs { + txmp.priorityIndex.PushTx(wtx) } - if res.GasWanted < 0 { - return fmt.Errorf("gas wanted %d is negative", - res.GasWanted) + }() + + txs := make([]types.Tx, 0, txmp.priorityIndex.NumTxs()) + for txmp.priorityIndex.NumTxs() > 0 { + wtx := txmp.priorityIndex.PopTx() + txs = append(txs, wtx.tx) + wTxs = append(wTxs, wtx) + size := types.ComputeProtoSizeForTxs([]types.Tx{wtx.tx}) + + // Ensure we have capacity for the transaction with respect to the + // transaction size. + if maxBytes > -1 && totalSize+size > maxBytes { + return txs[:len(txs)-1] } - if res.GasWanted > maxGas { - return fmt.Errorf("gas wanted %d is greater than max gas %d", - res.GasWanted, maxGas) + + totalSize += size + + // ensure we have capacity for the transaction with respect to total gas + gas := totalGas + wtx.gasWanted + if maxGas > -1 && gas > maxGas { + return txs[:len(txs)-1] } - return nil + totalGas = gas + } + + return txs +} + +// ReapMaxTxs returns a list of transactions within the provided number of +// transactions bound. Transaction are retrieved in priority order. +// +// NOTE: +// - Transactions returned are not removed from the mempool transaction +// store or indexes. +func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { + txmp.mtx.RLock() + defer txmp.mtx.RUnlock() + + numTxs := txmp.priorityIndex.NumTxs() + if max < 0 { + max = numTxs + } + + cap := tmmath.MinInt(numTxs, max) + + // wTxs contains a list of *WrappedTx retrieved from the priority queue that + // need to be re-enqueued prior to returning. + wTxs := make([]*WrappedTx, 0, cap) + txs := make([]types.Tx, 0, cap) + for txmp.priorityIndex.NumTxs() > 0 && len(txs) < max { + wtx := txmp.priorityIndex.PopTx() + txs = append(txs, wtx.tx) + wTxs = append(wTxs, wtx) + } + for _, wtx := range wTxs { + txmp.priorityIndex.PushTx(wtx) + } + return txs +} + +// Update iterates over all the transactions provided by the block producer, +// removes them from the cache (if applicable), and removes +// the transactions from the main transaction store and associated indexes. +// If there are transactions remaining in the mempool, we initiate a +// re-CheckTx for them (if applicable), otherwise, we notify the caller more +// transactions are available. +// +// NOTE: +// - The caller must explicitly acquire a write-lock. +func (txmp *TxMempool) Update( + ctx context.Context, + blockHeight int64, + blockTxs types.Txs, + execTxResult []*abci.ExecTxResult, + newPreFn PreCheckFunc, + newPostFn PostCheckFunc, +) error { + txmp.height = blockHeight + txmp.notifiedTxsAvailable = false + + if newPreFn != nil { + txmp.preCheck = newPreFn + } + if newPostFn != nil { + txmp.postCheck = newPostFn + } + + for i, tx := range blockTxs { + if execTxResult[i].Code == abci.CodeTypeOK { + // add the valid committed transaction to the cache (if missing) + _ = txmp.cache.Push(tx) + } else if !txmp.config.KeepInvalidTxsInCache { + // allow invalid transactions to be re-submitted + txmp.cache.Remove(tx) + } + + // remove the committed transaction from the transaction store and indexes + if wtx := txmp.txStore.GetTxByHash(tx.Key()); wtx != nil { + txmp.removeTx(wtx, false) + } + } + + txmp.purgeExpiredTxs(blockHeight) + + // If there any uncommitted transactions left in the mempool, we either + // initiate re-CheckTx per remaining transaction or notify that remaining + // transactions are left. + if txmp.Size() > 0 { + if txmp.config.Recheck { + txmp.logger.Debug( + "executing re-CheckTx for all remaining transactions", + "num_txs", txmp.Size(), + "height", blockHeight, + ) + txmp.updateReCheckTxs(ctx) + } else { + txmp.notifyTxsAvailable() + } + } + + txmp.metrics.Size.Set(float64(txmp.Size())) + return nil +} + +// initTxCallback is the callback invoked for a new unique transaction after CheckTx +// has been executed by the ABCI application for the first time on that transaction. +// CheckTx can be called again for the same transaction later when re-checking; +// however, this callback will not be called. +// +// initTxCallback runs after the ABCI application executes CheckTx. +// It runs the postCheck hook if one is defined on the mempool. +// If the CheckTx response response code is not OK, or if the postCheck hook +// reports an error, the transaction is rejected. Otherwise, we attempt to insert +// the transaction into the mempool. +// +// When inserting a transaction, we first check if there is sufficient capacity. +// If there is, the transaction is added to the txStore and all indexes. +// Otherwise, if the mempool is full, we attempt to find a lower priority transaction +// to evict in place of the new incoming transaction. If no such transaction exists, +// the new incoming transaction is rejected. +// +// NOTE: +// - An explicit lock is NOT required. +func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.ResponseCheckTx, txInfo TxInfo) { + var err error + if txmp.postCheck != nil { + err = txmp.postCheck(wtx.tx, res) + } + + if err != nil || res.Code != abci.CodeTypeOK { + // ignore bad transactions + txmp.logger.Info( + "rejected bad transaction", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "peer_id", txInfo.SenderNodeID, + "code", res.Code, + "post_check_err", err, + ) + + txmp.metrics.FailedTxs.Add(1) + + if !txmp.config.KeepInvalidTxsInCache { + txmp.cache.Remove(wtx.tx) + } + if err != nil { + res.MempoolError = err.Error() + } + return + } + + sender := res.Sender + priority := res.Priority + + if len(sender) > 0 { + if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil { + txmp.logger.Error( + "rejected incoming good transaction; tx already exists for sender", + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "sender", sender, + ) + txmp.metrics.RejectedTxs.Add(1) + return + } + } + + if err := txmp.canAddTx(wtx); err != nil { + evictTxs := txmp.priorityIndex.GetEvictableTxs( + priority, + int64(wtx.Size()), + txmp.SizeBytes(), + txmp.config.MaxTxsBytes, + ) + if len(evictTxs) == 0 { + // No room for the new incoming transaction so we just remove it from + // the cache. + txmp.cache.Remove(wtx.tx) + txmp.logger.Error( + "rejected incoming good transaction; mempool full", + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "err", err.Error(), + ) + txmp.metrics.RejectedTxs.Add(1) + return + } + + // evict an existing transaction(s) + // + // NOTE: + // - The transaction, toEvict, can be removed while a concurrent + // reCheckTx callback is being executed for the same transaction. + for _, toEvict := range evictTxs { + txmp.removeTx(toEvict, true) + txmp.logger.Debug( + "evicted existing good transaction; mempool full", + "old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()), + "old_priority", toEvict.priority, + "new_tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "new_priority", wtx.priority, + ) + txmp.metrics.EvictedTxs.Add(1) + } + } + + wtx.gasWanted = res.GasWanted + wtx.priority = priority + wtx.sender = sender + wtx.peers = map[uint16]struct{}{ + txInfo.SenderID: {}, + } + + txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size())) + txmp.metrics.Size.Set(float64(txmp.Size())) + + txmp.insertTx(wtx) + txmp.logger.Debug( + "inserted good transaction", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "height", txmp.height, + "num_txs", txmp.Size(), + ) + txmp.notifyTxsAvailable() +} + +// defaultTxCallback is the CheckTx application callback used when a +// transaction is being re-checked (if re-checking is enabled). The +// caller must hold a mempool write-lock (via Lock()) and when +// executing Update(), if the mempool is non-empty and Recheck is +// enabled, then all remaining transactions will be rechecked via +// CheckTx. The order transactions are rechecked must be the same as +// the order in which this callback is called. +func (txmp *TxMempool) defaultTxCallback(tx types.Tx, res *abci.ResponseCheckTx) { + if txmp.recheckCursor == nil { + return + } + + txmp.metrics.RecheckTimes.Add(1) + + wtx := txmp.recheckCursor.Value.(*WrappedTx) + + // Search through the remaining list of tx to recheck for a transaction that matches + // the one we received from the ABCI application. + for { + if bytes.Equal(tx, wtx.tx) { + // We've found a tx in the recheck list that matches the tx that we + // received from the ABCI application. + // Break, and use this transaction for further checks. + break + } + + txmp.logger.Error( + "re-CheckTx transaction mismatch", + "got", wtx.tx.Hash(), + "expected", tx.Key(), + ) + + if txmp.recheckCursor == txmp.recheckEnd { + // we reached the end of the recheckTx list without finding a tx + // matching the one we received from the ABCI application. + // Return without processing any tx. + txmp.recheckCursor = nil + return + } + + txmp.recheckCursor = txmp.recheckCursor.Next() + wtx = txmp.recheckCursor.Value.(*WrappedTx) + } + + // Only evaluate transactions that have not been removed. This can happen + // if an existing transaction is evicted during CheckTx and while this + // callback is being executed for the same evicted transaction. + if !txmp.txStore.IsTxRemoved(wtx.hash) { + var err error + if txmp.postCheck != nil { + err = txmp.postCheck(tx, res) + } + + if res.Code == abci.CodeTypeOK && err == nil { + wtx.priority = res.Priority + } else { + txmp.logger.Debug( + "existing transaction no longer valid; failed re-CheckTx callback", + "priority", wtx.priority, + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "err", err, + "code", res.Code, + ) + + if wtx.gossipEl != txmp.recheckCursor { + panic("corrupted reCheckTx cursor") + } + + txmp.removeTx(wtx, !txmp.config.KeepInvalidTxsInCache) + } + } + + // move reCheckTx cursor to next element + if txmp.recheckCursor == txmp.recheckEnd { + txmp.recheckCursor = nil + } else { + txmp.recheckCursor = txmp.recheckCursor.Next() + } + + if txmp.recheckCursor == nil { + txmp.logger.Debug("finished rechecking transactions") + + if txmp.Size() > 0 { + txmp.notifyTxsAvailable() + } + } + + txmp.metrics.Size.Set(float64(txmp.Size())) +} + +// updateReCheckTxs updates the recheck cursors using the gossipIndex. For +// each transaction, it executes CheckTx. The global callback defined on +// the proxyAppConn will be executed for each transaction after CheckTx is +// executed. +// +// NOTE: +// - The caller must have a write-lock when executing updateReCheckTxs. +func (txmp *TxMempool) updateReCheckTxs(ctx context.Context) { + if txmp.Size() == 0 { + panic("attempted to update re-CheckTx txs when mempool is empty") + } + + txmp.recheckCursor = txmp.gossipIndex.Front() + txmp.recheckEnd = txmp.gossipIndex.Back() + + for e := txmp.gossipIndex.Front(); e != nil; e = e.Next() { + wtx := e.Value.(*WrappedTx) + + // Only execute CheckTx if the transaction is not marked as removed which + // could happen if the transaction was evicted. + if !txmp.txStore.IsTxRemoved(wtx.hash) { + res, err := txmp.proxyAppConn.CheckTx(ctx, &abci.RequestCheckTx{ + Tx: wtx.tx, + Type: abci.CheckTxType_Recheck, + }) + if err != nil { + // no need in retrying since the tx will be rechecked after the next block + txmp.logger.Error("failed to execute CheckTx during rechecking", "err", err) + continue + } + txmp.defaultTxCallback(wtx.tx, res) + } + } + + if err := txmp.proxyAppConn.Flush(ctx); err != nil { + txmp.logger.Error("failed to flush transactions during rechecking", "err", err) + } +} + +// canAddTx returns an error if we cannot insert the provided *WrappedTx into +// the mempool due to mempool configured constraints. If it returns nil, +// the transaction can be inserted into the mempool. +func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { + var ( + numTxs = txmp.Size() + sizeBytes = txmp.SizeBytes() + ) + + if numTxs >= txmp.config.Size || int64(wtx.Size())+sizeBytes > txmp.config.MaxTxsBytes { + return types.ErrMempoolIsFull{ + NumTxs: numTxs, + MaxTxs: txmp.config.Size, + TxsBytes: sizeBytes, + MaxTxsBytes: txmp.config.MaxTxsBytes, + } + } + + return nil +} + +func (txmp *TxMempool) insertTx(wtx *WrappedTx) { + txmp.txStore.SetTx(wtx) + txmp.priorityIndex.PushTx(wtx) + txmp.heightIndex.Insert(wtx) + txmp.timestampIndex.Insert(wtx) + + // Insert the transaction into the gossip index and mark the reference to the + // linked-list element, which will be needed at a later point when the + // transaction is removed. + gossipEl := txmp.gossipIndex.PushBack(wtx) + wtx.gossipEl = gossipEl + + atomic.AddInt64(&txmp.sizeBytes, int64(wtx.Size())) +} + +func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) { + if txmp.txStore.IsTxRemoved(wtx.hash) { + return + } + + txmp.txStore.RemoveTx(wtx) + txmp.priorityIndex.RemoveTx(wtx) + txmp.heightIndex.Remove(wtx) + txmp.timestampIndex.Remove(wtx) + + // Remove the transaction from the gossip index and cleanup the linked-list + // element so it can be garbage collected. + txmp.gossipIndex.Remove(wtx.gossipEl) + wtx.gossipEl.DetachPrev() + + atomic.AddInt64(&txmp.sizeBytes, int64(-wtx.Size())) + + if removeFromCache { + txmp.cache.Remove(wtx.tx) + } +} + +// purgeExpiredTxs removes all transactions that have exceeded their respective +// height- and/or time-based TTLs from their respective indexes. Every expired +// transaction will be removed from the mempool, but preserved in the cache. +// +// NOTE: purgeExpiredTxs must only be called during TxMempool#Update in which +// the caller has a write-lock on the mempool and so we can safely iterate over +// the height and time based indexes. +func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { + now := time.Now() + expiredTxs := make(map[types.TxKey]*WrappedTx) + + if txmp.config.TTLNumBlocks > 0 { + purgeIdx := -1 + for i, wtx := range txmp.heightIndex.txs { + if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks { + expiredTxs[wtx.tx.Key()] = wtx + purgeIdx = i + } else { + // since the index is sorted, we know no other txs can be be purged + break + } + } + + if purgeIdx >= 0 { + txmp.heightIndex.txs = txmp.heightIndex.txs[purgeIdx+1:] + } + } + + if txmp.config.TTLDuration > 0 { + purgeIdx := -1 + for i, wtx := range txmp.timestampIndex.txs { + if now.Sub(wtx.timestamp) > txmp.config.TTLDuration { + expiredTxs[wtx.tx.Key()] = wtx + purgeIdx = i + } else { + // since the index is sorted, we know no other txs can be be purged + break + } + } + + if purgeIdx >= 0 { + txmp.timestampIndex.txs = txmp.timestampIndex.txs[purgeIdx+1:] + } + } + + for _, wtx := range expiredTxs { + txmp.removeTx(wtx, false) + } +} + +func (txmp *TxMempool) notifyTxsAvailable() { + if txmp.Size() == 0 { + panic("attempt to notify txs available but mempool is empty!") + } + + if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable { + // channel cap is 1, so this will send once + txmp.notifiedTxsAvailable = true + + select { + case txmp.txsAvailable <- struct{}{}: + default: + } } } diff --git a/internal/mempool/mempool_bench_test.go b/internal/mempool/mempool_bench_test.go new file mode 100644 index 0000000000..14fb22197e --- /dev/null +++ b/internal/mempool/mempool_bench_test.go @@ -0,0 +1,50 @@ +package mempool + +import ( + "context" + "fmt" + "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/require" + + abciclient "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/libs/log" +) + +func BenchmarkTxMempool_CheckTx(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client := abciclient.NewLocalClient(log.NewNopLogger(), kvstore.NewApplication()) + if err := client.Start(ctx); err != nil { + b.Fatal(err) + } + + // setup the cache and the mempool number for hitting GetEvictableTxs during the + // benchmark. 5000 is the current default mempool size in the TM config. + txmp := setup(b, client, 10000) + txmp.config.Size = 5000 + + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + const peerID = 1 + + b.ResetTimer() + + for n := 0; n < b.N; n++ { + b.StopTimer() + prefix := make([]byte, 20) + _, err := rng.Read(prefix) + require.NoError(b, err) + + priority := int64(rng.Intn(9999-1000) + 1000) + tx := []byte(fmt.Sprintf("sender-%d-%d=%X=%d", n, peerID, prefix, priority)) + txInfo := TxInfo{SenderID: uint16(peerID)} + + b.StartTimer() + + require.NoError(b, txmp.CheckTx(ctx, tx, nil, txInfo)) + } +} diff --git a/internal/mempool/v1/mempool_test.go b/internal/mempool/mempool_test.go similarity index 61% rename from internal/mempool/v1/mempool_test.go rename to internal/mempool/mempool_test.go index 72a72861c9..946377b1cd 100644 --- a/internal/mempool/v1/mempool_test.go +++ b/internal/mempool/mempool_test.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "bytes" @@ -21,7 +21,6 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) @@ -37,7 +36,7 @@ type testTx struct { priority int64 } -func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { +func (app *application) CheckTx(_ context.Context, req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { var ( priority int64 sender string @@ -48,56 +47,50 @@ func (app *application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { if len(parts) == 3 { v, err := strconv.ParseInt(string(parts[2]), 10, 64) if err != nil { - return abci.ResponseCheckTx{ + return &abci.ResponseCheckTx{ Priority: priority, Code: 100, GasWanted: 1, - } + }, nil } priority = v sender = string(parts[0]) } else { - return abci.ResponseCheckTx{ + return &abci.ResponseCheckTx{ Priority: priority, Code: 101, GasWanted: 1, - } + }, nil } - return abci.ResponseCheckTx{ + return &abci.ResponseCheckTx{ Priority: priority, Sender: sender, Code: code.CodeTypeOK, GasWanted: 1, - } + }, nil } -func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool { +func setup(t testing.TB, app abciclient.Client, cacheSize int, options ...TxMempoolOption) *TxMempool { t.Helper() - app := &application{kvstore.NewApplication()} - cc := abciclient.NewLocalCreator(app) + logger := log.NewNopLogger() - cfg, err := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) + cfg, err := config.ResetTestRoot(t.TempDir(), strings.ReplaceAll(t.Name(), "/", "|")) require.NoError(t, err) cfg.Mempool.CacheSize = cacheSize - appConnMem, err := cc() - require.NoError(t, err) - require.NoError(t, appConnMem.Start()) - - t.Cleanup(func() { - os.RemoveAll(cfg.RootDir) - require.NoError(t, appConnMem.Stop()) - }) + t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) - return NewTxMempool(log.TestingLogger().With("test", t.Name()), cfg.Mempool, appConnMem, 0, options...) + return NewTxMempool(logger.With("test", t.Name()), cfg.Mempool, app, options...) } -func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx { +func checkTxs(ctx context.Context, t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx { + t.Helper() + txs := make([]testTx, numTxs) - txInfo := mempool.TxInfo{SenderID: peerID} + txInfo := TxInfo{SenderID: peerID} rng := rand.New(rand.NewSource(time.Now().UnixNano())) @@ -112,14 +105,33 @@ func checkTxs(t *testing.T, txmp *TxMempool, numTxs int, peerID uint16) []testTx tx: []byte(fmt.Sprintf("sender-%d-%d=%X=%d", i, peerID, prefix, priority)), priority: priority, } - require.NoError(t, txmp.CheckTx(context.Background(), txs[i].tx, nil, txInfo)) + require.NoError(t, txmp.CheckTx(ctx, txs[i].tx, nil, txInfo)) } return txs } +func convertTex(in []testTx) types.Txs { + out := make([]types.Tx, len(in)) + + for idx := range in { + out[idx] = in[idx].tx + } + + return out +} + func TestTxMempool_TxsAvailable(t *testing.T) { - txmp := setup(t, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()}) + if err := client.Start(ctx); err != nil { + t.Fatal(err) + } + t.Cleanup(client.Wait) + + txmp := setup(t, client, 0) txmp.EnableTxsAvailable() ensureNoTxFire := func() { @@ -145,7 +157,7 @@ func TestTxMempool_TxsAvailable(t *testing.T) { // Execute CheckTx for some transactions and ensure TxsAvailable only fires // once. - txs := checkTxs(t, txmp, 100, 0) + txs := checkTxs(ctx, t, txmp, 100, 0) ensureTxFire() ensureNoTxFire() @@ -154,27 +166,36 @@ func TestTxMempool_TxsAvailable(t *testing.T) { rawTxs[i] = tx.tx } - responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50])) + responses := make([]*abci.ExecTxResult, len(rawTxs[:50])) for i := 0; i < len(responses); i++ { - responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} + responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK} } // commit half the transactions and ensure we fire an event txmp.Lock() - require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil)) txmp.Unlock() ensureTxFire() ensureNoTxFire() // Execute CheckTx for more transactions and ensure we do not fire another // event as we're still on the same height (1). - _ = checkTxs(t, txmp, 100, 0) + _ = checkTxs(ctx, t, txmp, 100, 0) ensureNoTxFire() } func TestTxMempool_Size(t *testing.T) { - txmp := setup(t, 0) - txs := checkTxs(t, txmp, 100, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()}) + if err := client.Start(ctx); err != nil { + t.Fatal(err) + } + t.Cleanup(client.Wait) + + txmp := setup(t, client, 0) + txs := checkTxs(ctx, t, txmp, 100, 0) require.Equal(t, len(txs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) @@ -183,13 +204,13 @@ func TestTxMempool_Size(t *testing.T) { rawTxs[i] = tx.tx } - responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50])) + responses := make([]*abci.ExecTxResult, len(rawTxs[:50])) for i := 0; i < len(responses); i++ { - responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} + responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK} } txmp.Lock() - require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil)) txmp.Unlock() require.Equal(t, len(rawTxs)/2, txmp.Size()) @@ -197,8 +218,17 @@ func TestTxMempool_Size(t *testing.T) { } func TestTxMempool_Flush(t *testing.T) { - txmp := setup(t, 0) - txs := checkTxs(t, txmp, 100, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()}) + if err := client.Start(ctx); err != nil { + t.Fatal(err) + } + t.Cleanup(client.Wait) + + txmp := setup(t, client, 0) + txs := checkTxs(ctx, t, txmp, 100, 0) require.Equal(t, len(txs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) @@ -207,13 +237,13 @@ func TestTxMempool_Flush(t *testing.T) { rawTxs[i] = tx.tx } - responses := make([]*abci.ResponseDeliverTx, len(rawTxs[:50])) + responses := make([]*abci.ExecTxResult, len(rawTxs[:50])) for i := 0; i < len(responses); i++ { - responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} + responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK} } txmp.Lock() - require.NoError(t, txmp.Update(1, rawTxs[:50], responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, 1, rawTxs[:50], responses, nil, nil)) txmp.Unlock() txmp.Flush() @@ -222,8 +252,17 @@ func TestTxMempool_Flush(t *testing.T) { } func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { - txmp := setup(t, 0) - tTxs := checkTxs(t, txmp, 100, 0) // all txs request 1 gas unit + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()}) + if err := client.Start(ctx); err != nil { + t.Fatal(err) + } + t.Cleanup(client.Wait) + + txmp := setup(t, client, 0) + tTxs := checkTxs(ctx, t, txmp, 100, 0) // all txs request 1 gas unit require.Equal(t, len(tTxs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) @@ -272,8 +311,17 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { } func TestTxMempool_ReapMaxTxs(t *testing.T) { - txmp := setup(t, 0) - tTxs := checkTxs(t, txmp, 100, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()}) + if err := client.Start(ctx); err != nil { + t.Fatal(err) + } + t.Cleanup(client.Wait) + + txmp := setup(t, client, 0) + tTxs := checkTxs(ctx, t, txmp, 100, 0) require.Equal(t, len(tTxs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) @@ -321,24 +369,41 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) { } func TestTxMempool_CheckTxExceedsMaxSize(t *testing.T) { - txmp := setup(t, 0) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()}) + if err := client.Start(ctx); err != nil { + t.Fatal(err) + } + t.Cleanup(client.Wait) + txmp := setup(t, client, 0) rng := rand.New(rand.NewSource(time.Now().UnixNano())) tx := make([]byte, txmp.config.MaxTxBytes+1) _, err := rng.Read(tx) require.NoError(t, err) - require.Error(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: 0})) + require.Error(t, txmp.CheckTx(ctx, tx, nil, TxInfo{SenderID: 0})) tx = make([]byte, txmp.config.MaxTxBytes-1) _, err = rng.Read(tx) require.NoError(t, err) - require.NoError(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: 0})) + require.NoError(t, txmp.CheckTx(ctx, tx, nil, TxInfo{SenderID: 0})) } func TestTxMempool_CheckTxSamePeer(t *testing.T) { - txmp := setup(t, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()}) + if err := client.Start(ctx); err != nil { + t.Fatal(err) + } + t.Cleanup(client.Wait) + + txmp := setup(t, client, 100) peerID := uint16(1) rng := rand.New(rand.NewSource(time.Now().UnixNano())) @@ -348,12 +413,21 @@ func TestTxMempool_CheckTxSamePeer(t *testing.T) { tx := []byte(fmt.Sprintf("sender-0=%X=%d", prefix, 50)) - require.NoError(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: peerID})) - require.Error(t, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: peerID})) + require.NoError(t, txmp.CheckTx(ctx, tx, nil, TxInfo{SenderID: peerID})) + require.Error(t, txmp.CheckTx(ctx, tx, nil, TxInfo{SenderID: peerID})) } func TestTxMempool_CheckTxSameSender(t *testing.T) { - txmp := setup(t, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()}) + if err := client.Start(ctx); err != nil { + t.Fatal(err) + } + t.Cleanup(client.Wait) + + txmp := setup(t, client, 100) peerID := uint16(1) rng := rand.New(rand.NewSource(time.Now().UnixNano())) @@ -368,14 +442,23 @@ func TestTxMempool_CheckTxSameSender(t *testing.T) { tx1 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix1, 50)) tx2 := []byte(fmt.Sprintf("sender-0=%X=%d", prefix2, 50)) - require.NoError(t, txmp.CheckTx(context.Background(), tx1, nil, mempool.TxInfo{SenderID: peerID})) + require.NoError(t, txmp.CheckTx(ctx, tx1, nil, TxInfo{SenderID: peerID})) require.Equal(t, 1, txmp.Size()) - require.NoError(t, txmp.CheckTx(context.Background(), tx2, nil, mempool.TxInfo{SenderID: peerID})) + require.NoError(t, txmp.CheckTx(ctx, tx2, nil, TxInfo{SenderID: peerID})) require.Equal(t, 1, txmp.Size()) } func TestTxMempool_ConcurrentTxs(t *testing.T) { - txmp := setup(t, 100) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()}) + if err := client.Start(ctx); err != nil { + t.Fatal(err) + } + t.Cleanup(client.Wait) + + txmp := setup(t, client, 100) rng := rand.New(rand.NewSource(time.Now().UnixNano())) checkTxDone := make(chan struct{}) @@ -384,7 +467,7 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) { wg.Add(1) go func() { for i := 0; i < 20; i++ { - _ = checkTxs(t, txmp, 100, 0) + _ = checkTxs(ctx, t, txmp, 100, 0) dur := rng.Intn(1000-500) + 500 time.Sleep(time.Duration(dur) * time.Millisecond) } @@ -404,7 +487,7 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) { for range ticker.C { reapedTxs := txmp.ReapMaxTxs(200) if len(reapedTxs) > 0 { - responses := make([]*abci.ResponseDeliverTx, len(reapedTxs)) + responses := make([]*abci.ExecTxResult, len(reapedTxs)) for i := 0; i < len(responses); i++ { var code uint32 @@ -414,11 +497,11 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) { code = abci.CodeTypeOK } - responses[i] = &abci.ResponseDeliverTx{Code: code} + responses[i] = &abci.ExecTxResult{Code: code} } txmp.Lock() - require.NoError(t, txmp.Update(height, reapedTxs, responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, height, reapedTxs, responses, nil, nil)) txmp.Unlock() height++ @@ -439,30 +522,39 @@ func TestTxMempool_ConcurrentTxs(t *testing.T) { } func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { - txmp := setup(t, 500) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()}) + if err := client.Start(ctx); err != nil { + t.Fatal(err) + } + t.Cleanup(client.Wait) + + txmp := setup(t, client, 500) txmp.height = 100 txmp.config.TTLNumBlocks = 10 - tTxs := checkTxs(t, txmp, 100, 0) + tTxs := checkTxs(ctx, t, txmp, 100, 0) require.Equal(t, len(tTxs), txmp.Size()) require.Equal(t, 100, txmp.heightIndex.Size()) // reap 5 txs at the next height -- no txs should expire reapedTxs := txmp.ReapMaxTxs(5) - responses := make([]*abci.ResponseDeliverTx, len(reapedTxs)) + responses := make([]*abci.ExecTxResult, len(reapedTxs)) for i := 0; i < len(responses); i++ { - responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} + responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK} } txmp.Lock() - require.NoError(t, txmp.Update(txmp.height+1, reapedTxs, responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, txmp.height+1, reapedTxs, responses, nil, nil)) txmp.Unlock() require.Equal(t, 95, txmp.Size()) require.Equal(t, 95, txmp.heightIndex.Size()) // check more txs at height 101 - _ = checkTxs(t, txmp, 50, 1) + _ = checkTxs(ctx, t, txmp, 50, 1) require.Equal(t, 145, txmp.Size()) require.Equal(t, 145, txmp.heightIndex.Size()) @@ -475,13 +567,13 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { // removed. However, we do know that that at most 95 txs can be expired and // removed. reapedTxs = txmp.ReapMaxTxs(5) - responses = make([]*abci.ResponseDeliverTx, len(reapedTxs)) + responses = make([]*abci.ExecTxResult, len(reapedTxs)) for i := 0; i < len(responses); i++ { - responses[i] = &abci.ResponseDeliverTx{Code: abci.CodeTypeOK} + responses[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK} } txmp.Lock() - require.NoError(t, txmp.Update(txmp.height+10, reapedTxs, responses, nil, nil)) + require.NoError(t, txmp.Update(ctx, txmp.height+10, reapedTxs, responses, nil, nil)) txmp.Unlock() require.GreaterOrEqual(t, txmp.Size(), 45) @@ -489,6 +581,9 @@ func TestTxMempool_ExpiredTxs_NumBlocks(t *testing.T) { } func TestTxMempool_CheckTxPostCheckError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cases := []struct { name string err error @@ -505,25 +600,32 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) { for _, tc := range cases { testCase := tc t.Run(testCase.name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client := abciclient.NewLocalClient(log.NewNopLogger(), &application{Application: kvstore.NewApplication()}) + if err := client.Start(ctx); err != nil { + t.Fatal(err) + } + t.Cleanup(client.Wait) + postCheckFn := func(_ types.Tx, _ *abci.ResponseCheckTx) error { return testCase.err } - txmp := setup(t, 0, WithPostCheck(postCheckFn)) + txmp := setup(t, client, 0, WithPostCheck(postCheckFn)) rng := rand.New(rand.NewSource(time.Now().UnixNano())) tx := make([]byte, txmp.config.MaxTxBytes-1) _, err := rng.Read(tx) require.NoError(t, err) - callback := func(res *abci.Response) { - checkTxRes, ok := res.Value.(*abci.Response_CheckTx) - require.True(t, ok) + callback := func(res *abci.ResponseCheckTx) { expectedErrString := "" if testCase.err != nil { expectedErrString = testCase.err.Error() } - require.Equal(t, expectedErrString, checkTxRes.CheckTx.MempoolError) + require.Equal(t, expectedErrString, res.MempoolError) } - require.NoError(t, txmp.CheckTx(context.Background(), tx, callback, mempool.TxInfo{SenderID: 0})) + require.NoError(t, txmp.CheckTx(ctx, tx, callback, TxInfo{SenderID: 0})) }) } } diff --git a/internal/mempool/mock/mempool.go b/internal/mempool/mock/mempool.go deleted file mode 100644 index 8e6f0c7bfb..0000000000 --- a/internal/mempool/mock/mempool.go +++ /dev/null @@ -1,45 +0,0 @@ -package mock - -import ( - "context" - - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/internal/libs/clist" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/types" -) - -// Mempool is an empty implementation of a Mempool, useful for testing. -type Mempool struct{} - -var _ mempool.Mempool = Mempool{} - -func (Mempool) Lock() {} -func (Mempool) Unlock() {} -func (Mempool) Size() int { return 0 } -func (Mempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempool.TxInfo) error { - return nil -} -func (Mempool) RemoveTxByKey(txKey types.TxKey) error { return nil } -func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } -func (Mempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } -func (Mempool) Update( - _ int64, - _ types.Txs, - _ []*abci.ResponseDeliverTx, - _ mempool.PreCheckFunc, - _ mempool.PostCheckFunc, -) error { - return nil -} -func (Mempool) Flush() {} -func (Mempool) FlushAppConn() error { return nil } -func (Mempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } -func (Mempool) EnableTxsAvailable() {} -func (Mempool) SizeBytes() int64 { return 0 } - -func (Mempool) TxsFront() *clist.CElement { return nil } -func (Mempool) TxsWaitChan() <-chan struct{} { return nil } - -func (Mempool) InitWAL() error { return nil } -func (Mempool) CloseWAL() {} diff --git a/internal/mempool/mocks/mempool.go b/internal/mempool/mocks/mempool.go new file mode 100644 index 0000000000..b82d7d63e8 --- /dev/null +++ b/internal/mempool/mocks/mempool.go @@ -0,0 +1,184 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + abcitypes "github.com/tendermint/tendermint/abci/types" + + mempool "github.com/tendermint/tendermint/internal/mempool" + + mock "github.com/stretchr/testify/mock" + + testing "testing" + + types "github.com/tendermint/tendermint/types" +) + +// Mempool is an autogenerated mock type for the Mempool type +type Mempool struct { + mock.Mock +} + +// CheckTx provides a mock function with given fields: ctx, tx, callback, txInfo +func (_m *Mempool) CheckTx(ctx context.Context, tx types.Tx, callback func(*abcitypes.ResponseCheckTx), txInfo mempool.TxInfo) error { + ret := _m.Called(ctx, tx, callback, txInfo) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.Tx, func(*abcitypes.ResponseCheckTx), mempool.TxInfo) error); ok { + r0 = rf(ctx, tx, callback, txInfo) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// EnableTxsAvailable provides a mock function with given fields: +func (_m *Mempool) EnableTxsAvailable() { + _m.Called() +} + +// Flush provides a mock function with given fields: +func (_m *Mempool) Flush() { + _m.Called() +} + +// FlushAppConn provides a mock function with given fields: _a0 +func (_m *Mempool) FlushAppConn(_a0 context.Context) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Lock provides a mock function with given fields: +func (_m *Mempool) Lock() { + _m.Called() +} + +// ReapMaxBytesMaxGas provides a mock function with given fields: maxBytes, maxGas +func (_m *Mempool) ReapMaxBytesMaxGas(maxBytes int64, maxGas int64) types.Txs { + ret := _m.Called(maxBytes, maxGas) + + var r0 types.Txs + if rf, ok := ret.Get(0).(func(int64, int64) types.Txs); ok { + r0 = rf(maxBytes, maxGas) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Txs) + } + } + + return r0 +} + +// ReapMaxTxs provides a mock function with given fields: max +func (_m *Mempool) ReapMaxTxs(max int) types.Txs { + ret := _m.Called(max) + + var r0 types.Txs + if rf, ok := ret.Get(0).(func(int) types.Txs); ok { + r0 = rf(max) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.Txs) + } + } + + return r0 +} + +// RemoveTxByKey provides a mock function with given fields: txKey +func (_m *Mempool) RemoveTxByKey(txKey types.TxKey) error { + ret := _m.Called(txKey) + + var r0 error + if rf, ok := ret.Get(0).(func(types.TxKey) error); ok { + r0 = rf(txKey) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Size provides a mock function with given fields: +func (_m *Mempool) Size() int { + ret := _m.Called() + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +// SizeBytes provides a mock function with given fields: +func (_m *Mempool) SizeBytes() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// TxsAvailable provides a mock function with given fields: +func (_m *Mempool) TxsAvailable() <-chan struct{} { + ret := _m.Called() + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// Unlock provides a mock function with given fields: +func (_m *Mempool) Unlock() { + _m.Called() +} + +// Update provides a mock function with given fields: ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn +func (_m *Mempool) Update(ctx context.Context, blockHeight int64, blockTxs types.Txs, txResults []*abcitypes.ExecTxResult, newPreFn mempool.PreCheckFunc, newPostFn mempool.PostCheckFunc) error { + ret := _m.Called(ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, int64, types.Txs, []*abcitypes.ExecTxResult, mempool.PreCheckFunc, mempool.PostCheckFunc) error); ok { + r0 = rf(ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewMempool creates a new instance of Mempool. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewMempool(t testing.TB) *Mempool { + mock := &Mempool{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/mempool/v1/priority_queue.go b/internal/mempool/priority_queue.go similarity index 97% rename from internal/mempool/v1/priority_queue.go rename to internal/mempool/priority_queue.go index df74a92d3c..e31997397e 100644 --- a/internal/mempool/v1/priority_queue.go +++ b/internal/mempool/priority_queue.go @@ -1,17 +1,16 @@ -package v1 +package mempool import ( "container/heap" "sort" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "sync" ) var _ heap.Interface = (*TxPriorityQueue)(nil) // TxPriorityQueue defines a thread-safe priority queue for valid transactions. type TxPriorityQueue struct { - mtx tmsync.RWMutex + mtx sync.RWMutex txs []*WrappedTx } diff --git a/internal/mempool/v1/priority_queue_test.go b/internal/mempool/priority_queue_test.go similarity index 99% rename from internal/mempool/v1/priority_queue_test.go rename to internal/mempool/priority_queue_test.go index c0048f388f..ddc84806da 100644 --- a/internal/mempool/v1/priority_queue_test.go +++ b/internal/mempool/priority_queue_test.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "math/rand" diff --git a/internal/mempool/v1/reactor.go b/internal/mempool/reactor.go similarity index 56% rename from internal/mempool/v1/reactor.go rename to internal/mempool/reactor.go index b40bc295c4..ea199b28b0 100644 --- a/internal/mempool/v1/reactor.go +++ b/internal/mempool/reactor.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "context" @@ -10,8 +10,6 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" @@ -36,52 +34,43 @@ type PeerManager interface { // txs to the peers you received it from. type Reactor struct { service.BaseService + logger log.Logger cfg *config.MempoolConfig mempool *TxMempool - ids *mempool.MempoolIDs + ids *IDs - // XXX: Currently, this is the only way to get information about a peer. Ideally, - // we rely on message-oriented communication to get necessary peer data. - // ref: https://github.com/tendermint/tendermint/issues/5670 - peerMgr PeerManager - - mempoolCh *p2p.Channel - peerUpdates *p2p.PeerUpdates - closeCh chan struct{} - - // peerWG is used to coordinate graceful termination of all peer broadcasting - // goroutines. - peerWG sync.WaitGroup + getPeerHeight func(types.NodeID) int64 + peerEvents p2p.PeerEventSubscriber + chCreator p2p.ChannelCreator // observePanic is a function for observing panics that were recovered in methods on // Reactor. observePanic is called with the recovered value. observePanic func(interface{}) - mtx tmsync.Mutex - peerRoutines map[types.NodeID]*tmsync.Closer + mtx sync.Mutex + peerRoutines map[types.NodeID]context.CancelFunc } // NewReactor returns a reference to a new reactor. func NewReactor( logger log.Logger, cfg *config.MempoolConfig, - peerMgr PeerManager, txmp *TxMempool, - mempoolCh *p2p.Channel, - peerUpdates *p2p.PeerUpdates, + chCreator p2p.ChannelCreator, + peerEvents p2p.PeerEventSubscriber, + getPeerHeight func(types.NodeID) int64, ) *Reactor { - r := &Reactor{ - cfg: cfg, - peerMgr: peerMgr, - mempool: txmp, - ids: mempool.NewMempoolIDs(), - mempoolCh: mempoolCh, - peerUpdates: peerUpdates, - closeCh: make(chan struct{}), - peerRoutines: make(map[types.NodeID]*tmsync.Closer), - observePanic: defaultObservePanic, + logger: logger, + cfg: cfg, + mempool: txmp, + ids: NewMempoolIDs(), + chCreator: chCreator, + peerEvents: peerEvents, + getPeerHeight: getPeerHeight, + peerRoutines: make(map[types.NodeID]context.CancelFunc), + observePanic: defaultObservePanic, } r.BaseService = *service.NewBaseService(logger, "Mempool", r) @@ -90,14 +79,9 @@ func NewReactor( func defaultObservePanic(r interface{}) {} -// GetChannelShims returns a map of ChannelDescriptorShim objects, where each -// object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding -// p2p proto.Message the new p2p Channel is responsible for handling. -// -// -// TODO: Remove once p2p refactor is complete. -// ref: https://github.com/tendermint/tendermint/issues/5670 -func GetChannelShims(cfg *config.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim { +// getChannelDescriptor produces an instance of a descriptor for this +// package's required channels. +func getChannelDescriptor(cfg *config.MempoolConfig) *p2p.ChannelDescriptor { largestTx := make([]byte, cfg.MaxTxBytes) batchMsg := protomem.Message{ Sum: &protomem.Message_Txs{ @@ -105,17 +89,13 @@ func GetChannelShims(cfg *config.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe }, } - return map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - mempool.MempoolChannel: { - MsgType: new(protomem.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(mempool.MempoolChannel), - Priority: 5, - RecvMessageCapacity: batchMsg.Size(), - RecvBufferCapacity: 128, - MaxSendBytes: 5000, - }, - }, + return &p2p.ChannelDescriptor{ + ID: MempoolChannel, + MessageType: new(protomem.Message), + Priority: 5, + RecvMessageCapacity: batchMsg.Size(), + RecvBufferCapacity: 128, + Name: "mempool", } } @@ -123,46 +103,32 @@ func GetChannelShims(cfg *config.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. -func (r *Reactor) OnStart() error { +func (r *Reactor) OnStart(ctx context.Context) error { if !r.cfg.Broadcast { - r.Logger.Info("tx broadcasting is disabled") + r.logger.Info("tx broadcasting is disabled") + } + + ch, err := r.chCreator(ctx, getChannelDescriptor(r.cfg)) + if err != nil { + return err } - go r.processMempoolCh() - go r.processPeerUpdates() + go r.processMempoolCh(ctx, ch) + go r.processPeerUpdates(ctx, r.peerEvents(ctx), ch) return nil } // OnStop stops the reactor by signaling to all spawned goroutines to exit and // blocking until they all exit. -func (r *Reactor) OnStop() { - r.mtx.Lock() - for _, c := range r.peerRoutines { - c.Close() - } - r.mtx.Unlock() - - // wait for all spawned peer tx broadcasting goroutines to gracefully exit - r.peerWG.Wait() - - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - // Wait for all p2p Channels to be closed before returning. This ensures we - // can easily reason about synchronization of all p2p Channels and ensure no - // panics will occur. - <-r.mempoolCh.Done() - <-r.peerUpdates.Done() -} +func (r *Reactor) OnStop() {} // handleMempoolMessage handles envelopes sent from peers on the MempoolChannel. // For every tx in the message, we execute CheckTx. It returns an error if an // empty set of txs are sent in an envelope or if we receive an unexpected // message type. -func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { - logger := r.Logger.With("peer", envelope.From) +func (r *Reactor) handleMempoolMessage(ctx context.Context, envelope *p2p.Envelope) error { + logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { case *protomem.Txs: @@ -171,14 +137,25 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { return errors.New("empty txs received from peer") } - txInfo := mempool.TxInfo{SenderID: r.ids.GetForPeer(envelope.From)} + txInfo := TxInfo{SenderID: r.ids.GetForPeer(envelope.From)} if len(envelope.From) != 0 { txInfo.SenderNodeID = envelope.From } for _, tx := range protoTxs { - if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil { - logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err) + if err := r.mempool.CheckTx(ctx, types.Tx(tx), nil, txInfo); err != nil { + if errors.Is(err, types.ErrTxInCache) { + // if the tx is in the cache, + // then we've been gossiped a + // Tx that we've already + // got. Gossip should be + // smarter, but it's not a + // problem. + continue + } + logger.Error("checktx failed for tx", + "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), + "err", err) } } @@ -192,12 +169,12 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { r.observePanic(e) err = fmt.Errorf("panic in processing message: %v", e) - r.Logger.Error( + r.logger.Error( "recovering from processing message panic", "err", err, "stack", string(debug.Stack()), @@ -205,38 +182,32 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err } }() - // r.Logger.Debug("received message", "peer", envelope.From) - - switch chID { - case mempool.MempoolChannel: - err = r.handleMempoolMessage(envelope) + // r.logger.Debug("received message", "peer", envelope.From) + switch envelope.ChannelID { + case MempoolChannel: + err = r.handleMempoolMessage(ctx, envelope) default: - err = fmt.Errorf("unknown channel ID (%d) for envelope (%T)", chID, envelope.Message) + err = fmt.Errorf("unknown channel ID (%d) for envelope (%T)", envelope.ChannelID, envelope.Message) } - return err + return } // processMempoolCh implements a blocking event loop where we listen for p2p // Envelope messages from the mempoolCh. -func (r *Reactor) processMempoolCh() { - defer r.mempoolCh.Close() - - for { - select { - case envelope := <-r.mempoolCh.In: - if err := r.handleMessage(r.mempoolCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.mempoolCh.ID, "envelope", envelope, "err", err) - r.mempoolCh.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } +func (r *Reactor) processMempoolCh(ctx context.Context, mempoolCh *p2p.Channel) { + iter := mempoolCh.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, envelope); err != nil { + r.logger.Error("failed to process message", "ch_id", envelope.ChannelID, "envelope", envelope, "err", err) + if serr := mempoolCh.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return } - - case <-r.closeCh: - r.Logger.Debug("stopped listening on mempool channel; closing...") - return } } } @@ -246,8 +217,8 @@ func (r *Reactor) processMempoolCh() { // goroutine or not. If not, we start one for the newly added peer. For down or // removed peers, we remove the peer from the mempool peer ID set and signal to // stop the tx broadcasting goroutine. -func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) +func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate, mempoolCh *p2p.Channel) { + r.logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) r.mtx.Lock() defer r.mtx.Unlock() @@ -256,8 +227,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { case p2p.PeerStatusUp: // Do not allow starting new tx broadcast loops after reactor shutdown // has been initiated. This can happen after we've manually closed all - // peer broadcast loops and closed r.closeCh, but the router still sends - // in-flight peer updates. + // peer broadcast, but the router still sends in-flight peer updates. if !r.IsRunning() { return } @@ -269,15 +239,13 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // safely, and finally start the goroutine to broadcast txs to that peer. _, ok := r.peerRoutines[peerUpdate.NodeID] if !ok { - closer := tmsync.NewCloser() - - r.peerRoutines[peerUpdate.NodeID] = closer - r.peerWG.Add(1) + pctx, pcancel := context.WithCancel(ctx) + r.peerRoutines[peerUpdate.NodeID] = pcancel r.ids.ReserveForPeer(peerUpdate.NodeID) // start a broadcast routine ensuring all txs are forwarded to the peer - go r.broadcastTxRoutine(peerUpdate.NodeID, closer) + go r.broadcastTxRoutine(pctx, peerUpdate.NodeID, mempoolCh) } } @@ -290,7 +258,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // from the map of peer tx broadcasting goroutines. closer, ok := r.peerRoutines[peerUpdate.NodeID] if ok { - closer.Close() + closer() } } } @@ -298,22 +266,18 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates() { - defer r.peerUpdates.Close() - +func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates, mempoolCh *p2p.Channel) { for { select { - case peerUpdate := <-r.peerUpdates.Updates(): - r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.Logger.Debug("stopped listening on peer updates channel; closing...") + case <-ctx.Done(): return + case peerUpdate := <-peerUpdates.Updates(): + r.processPeerUpdate(ctx, peerUpdate, mempoolCh) } } } -func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) { +func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID, mempoolCh *p2p.Channel) { peerMempoolID := r.ids.GetForPeer(peerID) var nextGossipTx *clist.CElement @@ -323,11 +287,9 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) delete(r.peerRoutines, peerID) r.mtx.Unlock() - r.peerWG.Done() - if e := recover(); e != nil { r.observePanic(e) - r.Logger.Error( + r.logger.Error( "recovering from broadcasting mempool loop", "err", e, "stack", string(debug.Stack()), @@ -336,7 +298,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) }() for { - if !r.IsRunning() { + if !r.IsRunning() || ctx.Err() != nil { return } @@ -345,30 +307,22 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) // start from the beginning. if nextGossipTx == nil { select { + case <-ctx.Done(): + return case <-r.mempool.WaitForNextTx(): // wait until a tx is available if nextGossipTx = r.mempool.NextGossipTx(); nextGossipTx == nil { continue } - - case <-closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. - return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. - return } } memTx := nextGossipTx.Value.(*WrappedTx) - if r.peerMgr != nil { - height := r.peerMgr.GetHeight(peerID) + if r.getPeerHeight != nil { + height := r.getPeerHeight(peerID) if height > 0 && height < memTx.height-1 { // allow for a lag of one block - time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) + time.Sleep(PeerCatchupSleepIntervalMS * time.Millisecond) continue } } @@ -378,13 +332,16 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) if ok := r.mempool.txStore.TxHasPeer(memTx.hash, peerMempoolID); !ok { // Send the mempool tx to the corresponding peer. Note, the peer may be // behind and thus would not be able to process the mempool tx correctly. - r.mempoolCh.Out <- p2p.Envelope{ + if err := mempoolCh.Send(ctx, p2p.Envelope{ To: peerID, Message: &protomem.Txs{ Txs: [][]byte{memTx.tx}, }, + }); err != nil { + return } - r.Logger.Debug( + + r.logger.Debug( "gossiped tx to peer", "tx", fmt.Sprintf("%X", memTx.tx.Hash()), "peer", peerID, @@ -394,15 +351,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) select { case <-nextGossipTx.NextWaitChan(): nextGossipTx = nextGossipTx.Next() - - case <-closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. - return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. + case <-ctx.Done(): return } } diff --git a/internal/mempool/reactor_test.go b/internal/mempool/reactor_test.go new file mode 100644 index 0000000000..8ceae20135 --- /dev/null +++ b/internal/mempool/reactor_test.go @@ -0,0 +1,422 @@ +package mempool + +import ( + "context" + "fmt" + "os" + "runtime" + "strings" + "sync" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/require" + + abciclient "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/example/kvstore" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/p2p/p2ptest" + "github.com/tendermint/tendermint/libs/log" + tmrand "github.com/tendermint/tendermint/libs/rand" + protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" + "github.com/tendermint/tendermint/types" +) + +type reactorTestSuite struct { + network *p2ptest.Network + logger log.Logger + + reactors map[types.NodeID]*Reactor + mempoolChannels map[types.NodeID]*p2p.Channel + mempools map[types.NodeID]*TxMempool + kvstores map[types.NodeID]*kvstore.Application + + peerChans map[types.NodeID]chan p2p.PeerUpdate + peerUpdates map[types.NodeID]*p2p.PeerUpdates + + nodes []types.NodeID +} + +func setupReactors(ctx context.Context, t *testing.T, logger log.Logger, numNodes int, chBuf uint) *reactorTestSuite { + t.Helper() + + cfg, err := config.ResetTestRoot(t.TempDir(), strings.ReplaceAll(t.Name(), "/", "|")) + require.NoError(t, err) + t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) + + rts := &reactorTestSuite{ + logger: log.NewNopLogger().With("testCase", t.Name()), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}), + reactors: make(map[types.NodeID]*Reactor, numNodes), + mempoolChannels: make(map[types.NodeID]*p2p.Channel, numNodes), + mempools: make(map[types.NodeID]*TxMempool, numNodes), + kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), + peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), + peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), + } + + chDesc := getChannelDescriptor(cfg.Mempool) + rts.mempoolChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc) + + for nodeID := range rts.network.Nodes { + rts.kvstores[nodeID] = kvstore.NewApplication() + + client := abciclient.NewLocalClient(logger, rts.kvstores[nodeID]) + require.NoError(t, client.Start(ctx)) + t.Cleanup(client.Wait) + + mempool := setup(t, client, 0) + rts.mempools[nodeID] = mempool + + rts.peerChans[nodeID] = make(chan p2p.PeerUpdate, chBuf) + rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) + rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID]) + + chCreator := func(ctx context.Context, chDesc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + return rts.mempoolChannels[nodeID], nil + } + + rts.reactors[nodeID] = NewReactor( + rts.logger.With("nodeID", nodeID), + cfg.Mempool, + mempool, + chCreator, + func(ctx context.Context) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] }, + rts.network.Nodes[nodeID].PeerManager.GetHeight, + ) + rts.nodes = append(rts.nodes, nodeID) + + require.NoError(t, rts.reactors[nodeID].Start(ctx)) + require.True(t, rts.reactors[nodeID].IsRunning()) + } + + require.Len(t, rts.reactors, numNodes) + + t.Cleanup(func() { + for nodeID := range rts.reactors { + if rts.reactors[nodeID].IsRunning() { + rts.reactors[nodeID].Stop() + rts.reactors[nodeID].Wait() + require.False(t, rts.reactors[nodeID].IsRunning()) + } + + } + }) + + t.Cleanup(leaktest.Check(t)) + + return rts +} + +func (rts *reactorTestSuite) start(ctx context.Context, t *testing.T) { + t.Helper() + rts.network.Start(ctx, t) + + require.Len(t, + rts.network.RandomNode().PeerManager.Peers(), + len(rts.nodes)-1, + "network does not have expected number of nodes") +} + +func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs []types.Tx, ids ...types.NodeID) { + t.Helper() + + // ensure that the transactions get fully broadcast to the + // rest of the network + wg := &sync.WaitGroup{} + for name, pool := range rts.mempools { + if !p2ptest.NodeInSlice(name, ids) { + continue + } + if len(txs) == pool.Size() { + continue + } + + wg.Add(1) + go func(name types.NodeID, pool *TxMempool) { + defer wg.Done() + require.Eventually(t, func() bool { return len(txs) == pool.Size() }, + time.Minute, + 250*time.Millisecond, + "node=%q, ntx=%d, size=%d", name, len(txs), pool.Size(), + ) + }(name, pool) + } + wg.Wait() +} + +func TestReactorBroadcastDoesNotPanic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const numNodes = 2 + + logger := log.NewNopLogger() + rts := setupReactors(ctx, t, logger, numNodes, 0) + + observePanic := func(r interface{}) { + t.Fatal("panic detected in reactor") + } + + primary := rts.nodes[0] + secondary := rts.nodes[1] + primaryReactor := rts.reactors[primary] + primaryMempool := primaryReactor.mempool + secondaryReactor := rts.reactors[secondary] + + primaryReactor.observePanic = observePanic + secondaryReactor.observePanic = observePanic + + firstTx := &WrappedTx{} + primaryMempool.insertTx(firstTx) + + // run the router + rts.start(ctx, t) + + go primaryReactor.broadcastTxRoutine(ctx, secondary, rts.mempoolChannels[primary]) + + wg := &sync.WaitGroup{} + for i := 0; i < 50; i++ { + next := &WrappedTx{} + wg.Add(1) + go func() { + defer wg.Done() + primaryMempool.insertTx(next) + }() + } + + primaryReactor.Stop() + wg.Wait() +} + +func TestReactorBroadcastTxs(t *testing.T) { + numTxs := 512 + numNodes := 4 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + rts := setupReactors(ctx, t, logger, numNodes, uint(numTxs)) + + primary := rts.nodes[0] + secondaries := rts.nodes[1:] + + txs := checkTxs(ctx, t, rts.reactors[primary].mempool, numTxs, UnknownPeerID) + + require.Equal(t, numTxs, rts.reactors[primary].mempool.Size()) + + rts.start(ctx, t) + + // Wait till all secondary suites (reactor) received all mempool txs from the + // primary suite (node). + rts.waitForTxns(t, convertTex(txs), secondaries...) +} + +// regression test for https://github.com/tendermint/tendermint/issues/5408 +func TestReactorConcurrency(t *testing.T) { + numTxs := 10 + numNodes := 2 + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + rts := setupReactors(ctx, t, logger, numNodes, 0) + + primary := rts.nodes[0] + secondary := rts.nodes[1] + + rts.start(ctx, t) + + var wg sync.WaitGroup + + for i := 0; i < runtime.NumCPU()*2; i++ { + wg.Add(2) + + // 1. submit a bunch of txs + // 2. update the whole mempool + + txs := checkTxs(ctx, t, rts.reactors[primary].mempool, numTxs, UnknownPeerID) + go func() { + defer wg.Done() + + mempool := rts.mempools[primary] + + mempool.Lock() + defer mempool.Unlock() + + deliverTxResponses := make([]*abci.ExecTxResult, len(txs)) + for i := range txs { + deliverTxResponses[i] = &abci.ExecTxResult{Code: 0} + } + + require.NoError(t, mempool.Update(ctx, 1, convertTex(txs), deliverTxResponses, nil, nil)) + }() + + // 1. submit a bunch of txs + // 2. update none + _ = checkTxs(ctx, t, rts.reactors[secondary].mempool, numTxs, UnknownPeerID) + go func() { + defer wg.Done() + + mempool := rts.mempools[secondary] + + mempool.Lock() + defer mempool.Unlock() + + err := mempool.Update(ctx, 1, []types.Tx{}, make([]*abci.ExecTxResult, 0), nil, nil) + require.NoError(t, err) + }() + } + + wg.Wait() +} + +func TestReactorNoBroadcastToSender(t *testing.T) { + numTxs := 1000 + numNodes := 2 + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + rts := setupReactors(ctx, t, logger, numNodes, uint(numTxs)) + + primary := rts.nodes[0] + secondary := rts.nodes[1] + + peerID := uint16(1) + _ = checkTxs(ctx, t, rts.mempools[primary], numTxs, peerID) + + rts.start(ctx, t) + + time.Sleep(100 * time.Millisecond) + + require.Eventually(t, func() bool { + return rts.mempools[secondary].Size() == 0 + }, time.Minute, 100*time.Millisecond) +} + +func TestReactor_MaxTxBytes(t *testing.T) { + numNodes := 2 + cfg := config.TestConfig() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + rts := setupReactors(ctx, t, logger, numNodes, 0) + + primary := rts.nodes[0] + secondary := rts.nodes[1] + + // Broadcast a tx, which has the max size and ensure it's received by the + // second reactor. + tx1 := tmrand.Bytes(cfg.Mempool.MaxTxBytes) + err := rts.reactors[primary].mempool.CheckTx( + ctx, + tx1, + nil, + TxInfo{ + SenderID: UnknownPeerID, + }, + ) + require.NoError(t, err) + + rts.start(ctx, t) + + rts.reactors[primary].mempool.Flush() + rts.reactors[secondary].mempool.Flush() + + // broadcast a tx, which is beyond the max size and ensure it's not sent + tx2 := tmrand.Bytes(cfg.Mempool.MaxTxBytes + 1) + err = rts.mempools[primary].CheckTx(ctx, tx2, nil, TxInfo{SenderID: UnknownPeerID}) + require.Error(t, err) +} + +func TestDontExhaustMaxActiveIDs(t *testing.T) { + // we're creating a single node network, but not starting the + // network. + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + rts := setupReactors(ctx, t, logger, 1, MaxActiveIDs+1) + + nodeID := rts.nodes[0] + + peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") + require.NoError(t, err) + + // ensure the reactor does not panic (i.e. exhaust active IDs) + for i := 0; i < MaxActiveIDs+1; i++ { + rts.peerChans[nodeID] <- p2p.PeerUpdate{ + Status: p2p.PeerStatusUp, + NodeID: peerID, + } + + require.NoError(t, rts.mempoolChannels[nodeID].Send(ctx, p2p.Envelope{ + To: peerID, + Message: &protomem.Txs{ + Txs: [][]byte{}, + }, + })) + } +} + +func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + // 0 is already reserved for UnknownPeerID + ids := NewMempoolIDs() + + for i := 0; i < MaxActiveIDs-1; i++ { + peerID, err := types.NewNodeID(fmt.Sprintf("%040d", i)) + require.NoError(t, err) + ids.ReserveForPeer(peerID) + } + + peerID, err := types.NewNodeID(fmt.Sprintf("%040d", MaxActiveIDs-1)) + require.NoError(t, err) + require.Panics(t, func() { + ids.ReserveForPeer(peerID) + }) +} + +func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + rts := setupReactors(ctx, t, logger, 2, 2) + + primary := rts.nodes[0] + secondary := rts.nodes[1] + + rts.start(ctx, t) + + // disconnect peer + rts.peerChans[primary] <- p2p.PeerUpdate{ + Status: p2p.PeerStatusDown, + NodeID: secondary, + } + time.Sleep(500 * time.Millisecond) + + txs := checkTxs(ctx, t, rts.reactors[primary].mempool, 4, UnknownPeerID) + require.Equal(t, 4, len(txs)) + require.Equal(t, 4, rts.mempools[primary].Size()) + require.Equal(t, 0, rts.mempools[secondary].Size()) +} diff --git a/internal/mempool/tx.go b/internal/mempool/tx.go index adafdf85e3..c7113c9513 100644 --- a/internal/mempool/tx.go +++ b/internal/mempool/tx.go @@ -1,6 +1,11 @@ package mempool import ( + "sort" + "sync" + "time" + + "github.com/tendermint/tendermint/internal/libs/clist" "github.com/tendermint/tendermint/types" ) @@ -15,3 +20,274 @@ type TxInfo struct { // SenderNodeID is the actual types.NodeID of the sender. SenderNodeID types.NodeID } + +// WrappedTx defines a wrapper around a raw transaction with additional metadata +// that is used for indexing. +type WrappedTx struct { + // tx represents the raw binary transaction data + tx types.Tx + + // hash defines the transaction hash and the primary key used in the mempool + hash types.TxKey + + // height defines the height at which the transaction was validated at + height int64 + + // gasWanted defines the amount of gas the transaction sender requires + gasWanted int64 + + // priority defines the transaction's priority as specified by the application + // in the ResponseCheckTx response. + priority int64 + + // sender defines the transaction's sender as specified by the application in + // the ResponseCheckTx response. + sender string + + // timestamp is the time at which the node first received the transaction from + // a peer. It is used as a second dimension is prioritizing transactions when + // two transactions have the same priority. + timestamp time.Time + + // peers records a mapping of all peers that sent a given transaction + peers map[uint16]struct{} + + // heapIndex defines the index of the item in the heap + heapIndex int + + // gossipEl references the linked-list element in the gossip index + gossipEl *clist.CElement + + // removed marks the transaction as removed from the mempool. This is set + // during RemoveTx and is needed due to the fact that a given existing + // transaction in the mempool can be evicted when it is simultaneously having + // a reCheckTx callback executed. + removed bool +} + +func (wtx *WrappedTx) Size() int { + return len(wtx.tx) +} + +// TxStore implements a thread-safe mapping of valid transaction(s). +// +// NOTE: +// - Concurrent read-only access to a *WrappedTx object is OK. However, mutative +// access is not allowed. Regardless, it is not expected for the mempool to +// need mutative access. +type TxStore struct { + mtx sync.RWMutex + hashTxs map[types.TxKey]*WrappedTx // primary index + senderTxs map[string]*WrappedTx // sender is defined by the ABCI application +} + +func NewTxStore() *TxStore { + return &TxStore{ + senderTxs: make(map[string]*WrappedTx), + hashTxs: make(map[types.TxKey]*WrappedTx), + } +} + +// Size returns the total number of transactions in the store. +func (txs *TxStore) Size() int { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + return len(txs.hashTxs) +} + +// GetAllTxs returns all the transactions currently in the store. +func (txs *TxStore) GetAllTxs() []*WrappedTx { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + wTxs := make([]*WrappedTx, len(txs.hashTxs)) + i := 0 + for _, wtx := range txs.hashTxs { + wTxs[i] = wtx + i++ + } + + return wTxs +} + +// GetTxBySender returns a *WrappedTx by the transaction's sender property +// defined by the ABCI application. +func (txs *TxStore) GetTxBySender(sender string) *WrappedTx { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + return txs.senderTxs[sender] +} + +// GetTxByHash returns a *WrappedTx by the transaction's hash. +func (txs *TxStore) GetTxByHash(hash types.TxKey) *WrappedTx { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + return txs.hashTxs[hash] +} + +// IsTxRemoved returns true if a transaction by hash is marked as removed and +// false otherwise. +func (txs *TxStore) IsTxRemoved(hash types.TxKey) bool { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + wtx, ok := txs.hashTxs[hash] + if ok { + return wtx.removed + } + + return false +} + +// SetTx stores a *WrappedTx by it's hash. If the transaction also contains a +// non-empty sender, we additionally store the transaction by the sender as +// defined by the ABCI application. +func (txs *TxStore) SetTx(wtx *WrappedTx) { + txs.mtx.Lock() + defer txs.mtx.Unlock() + + if len(wtx.sender) > 0 { + txs.senderTxs[wtx.sender] = wtx + } + + txs.hashTxs[wtx.tx.Key()] = wtx +} + +// RemoveTx removes a *WrappedTx from the transaction store. It deletes all +// indexes of the transaction. +func (txs *TxStore) RemoveTx(wtx *WrappedTx) { + txs.mtx.Lock() + defer txs.mtx.Unlock() + + if len(wtx.sender) > 0 { + delete(txs.senderTxs, wtx.sender) + } + + delete(txs.hashTxs, wtx.tx.Key()) + wtx.removed = true +} + +// TxHasPeer returns true if a transaction by hash has a given peer ID and false +// otherwise. If the transaction does not exist, false is returned. +func (txs *TxStore) TxHasPeer(hash types.TxKey, peerID uint16) bool { + txs.mtx.RLock() + defer txs.mtx.RUnlock() + + wtx := txs.hashTxs[hash] + if wtx == nil { + return false + } + + _, ok := wtx.peers[peerID] + return ok +} + +// GetOrSetPeerByTxHash looks up a WrappedTx by transaction hash and adds the +// given peerID to the WrappedTx's set of peers that sent us this transaction. +// We return true if we've already recorded the given peer for this transaction +// and false otherwise. If the transaction does not exist by hash, we return +// (nil, false). +func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*WrappedTx, bool) { + txs.mtx.Lock() + defer txs.mtx.Unlock() + + wtx := txs.hashTxs[hash] + if wtx == nil { + return nil, false + } + + if wtx.peers == nil { + wtx.peers = make(map[uint16]struct{}) + } + + if _, ok := wtx.peers[peerID]; ok { + return wtx, true + } + + wtx.peers[peerID] = struct{}{} + return wtx, false +} + +// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be +// used to build generic transaction indexes in the mempool. It accepts a +// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx +// references which is used during Insert in order to determine sorted order. If +// less returns true, a <= b. +type WrappedTxList struct { + mtx sync.RWMutex + txs []*WrappedTx + less func(*WrappedTx, *WrappedTx) bool +} + +func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList { + return &WrappedTxList{ + txs: make([]*WrappedTx, 0), + less: less, + } +} + +// Size returns the number of WrappedTx objects in the list. +func (wtl *WrappedTxList) Size() int { + wtl.mtx.RLock() + defer wtl.mtx.RUnlock() + + return len(wtl.txs) +} + +// Reset resets the list of transactions to an empty list. +func (wtl *WrappedTxList) Reset() { + wtl.mtx.Lock() + defer wtl.mtx.Unlock() + + wtl.txs = make([]*WrappedTx, 0) +} + +// Insert inserts a WrappedTx reference into the sorted list based on the list's +// comparator function. +func (wtl *WrappedTxList) Insert(wtx *WrappedTx) { + wtl.mtx.Lock() + defer wtl.mtx.Unlock() + + i := sort.Search(len(wtl.txs), func(i int) bool { + return wtl.less(wtl.txs[i], wtx) + }) + + if i == len(wtl.txs) { + // insert at the end + wtl.txs = append(wtl.txs, wtx) + return + } + + // Make space for the inserted element by shifting values at the insertion + // index up one index. + // + // NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs). + wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...) + wtl.txs[i] = wtx +} + +// Remove attempts to remove a WrappedTx from the sorted list. +func (wtl *WrappedTxList) Remove(wtx *WrappedTx) { + wtl.mtx.Lock() + defer wtl.mtx.Unlock() + + i := sort.Search(len(wtl.txs), func(i int) bool { + return wtl.less(wtl.txs[i], wtx) + }) + + // Since the list is sorted, we evaluate all elements starting at i. Note, if + // the element does not exist, we may potentially evaluate the entire remainder + // of the list. However, a caller should not be expected to call Remove with a + // non-existing element. + for i < len(wtl.txs) { + if wtl.txs[i] == wtx { + wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...) + return + } + + i++ + } +} diff --git a/internal/mempool/v1/tx_test.go b/internal/mempool/tx_test.go similarity index 99% rename from internal/mempool/v1/tx_test.go rename to internal/mempool/tx_test.go index fb4beafab8..c6d494b047 100644 --- a/internal/mempool/v1/tx_test.go +++ b/internal/mempool/tx_test.go @@ -1,4 +1,4 @@ -package v1 +package mempool import ( "fmt" @@ -8,6 +8,7 @@ import ( "time" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/types" ) diff --git a/internal/mempool/types.go b/internal/mempool/types.go new file mode 100644 index 0000000000..a51d286e28 --- /dev/null +++ b/internal/mempool/types.go @@ -0,0 +1,146 @@ +package mempool + +import ( + "context" + "fmt" + "math" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/types" +) + +const ( + MempoolChannel = p2p.ChannelID(0x30) + + // PeerCatchupSleepIntervalMS defines how much time to sleep if a peer is behind + PeerCatchupSleepIntervalMS = 100 + + // UnknownPeerID is the peer ID to use when running CheckTx when there is + // no peer (e.g. RPC) + UnknownPeerID uint16 = 0 + + MaxActiveIDs = math.MaxUint16 +) + +//go:generate ../../scripts/mockery_generate.sh Mempool + +// Mempool defines the mempool interface. +// +// Updates to the mempool need to be synchronized with committing a block so +// applications can reset their transient state on Commit. +type Mempool interface { + // CheckTx executes a new transaction against the application to determine + // its validity and whether it should be added to the mempool. + CheckTx(ctx context.Context, tx types.Tx, callback func(*abci.ResponseCheckTx), txInfo TxInfo) error + + // RemoveTxByKey removes a transaction, identified by its key, + // from the mempool. + RemoveTxByKey(txKey types.TxKey) error + + // ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes + // bytes total with the condition that the total gasWanted must be less than + // maxGas. + // + // If both maxes are negative, there is no cap on the size of all returned + // transactions (~ all available transactions). + ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs + + // ReapMaxTxs reaps up to max transactions from the mempool. If max is + // negative, there is no cap on the size of all returned transactions + // (~ all available transactions). + ReapMaxTxs(max int) types.Txs + + // Lock locks the mempool. The consensus must be able to hold lock to safely + // update. + Lock() + + // Unlock unlocks the mempool. + Unlock() + + // Update informs the mempool that the given txs were committed and can be + // discarded. + // + // NOTE: + // 1. This should be called *after* block is committed by consensus. + // 2. Lock/Unlock must be managed by the caller. + Update( + ctx context.Context, + blockHeight int64, + blockTxs types.Txs, + txResults []*abci.ExecTxResult, + newPreFn PreCheckFunc, + newPostFn PostCheckFunc, + ) error + + // FlushAppConn flushes the mempool connection to ensure async callback calls + // are done, e.g. from CheckTx. + // + // NOTE: + // 1. Lock/Unlock must be managed by caller. + FlushAppConn(context.Context) error + + // Flush removes all transactions from the mempool and caches. + Flush() + + // TxsAvailable returns a channel which fires once for every height, and only + // when transactions are available in the mempool. + // + // NOTE: + // 1. The returned channel may be nil if EnableTxsAvailable was not called. + TxsAvailable() <-chan struct{} + + // EnableTxsAvailable initializes the TxsAvailable channel, ensuring it will + // trigger once every height when transactions are available. + EnableTxsAvailable() + + // Size returns the number of transactions in the mempool. + Size() int + + // SizeBytes returns the total size of all txs in the mempool. + SizeBytes() int64 +} + +// PreCheckFunc is an optional filter executed before CheckTx and rejects +// transaction if false is returned. An example would be to ensure that a +// transaction doesn't exceeded the block size. +type PreCheckFunc func(types.Tx) error + +// PostCheckFunc is an optional filter executed after CheckTx and rejects +// transaction if false is returned. An example would be to ensure a +// transaction doesn't require more gas than available for the block. +type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error + +// PreCheckMaxBytes checks that the size of the transaction is smaller or equal +// to the expected maxBytes. +func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { + return func(tx types.Tx) error { + txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx}) + + if txSize > maxBytes { + return fmt.Errorf("tx size is too big: %d, max: %d", txSize, maxBytes) + } + + return nil + } +} + +// PostCheckMaxGas checks that the wanted gas is smaller or equal to the passed +// maxGas. Returns nil if maxGas is -1. +func PostCheckMaxGas(maxGas int64) PostCheckFunc { + return func(tx types.Tx, res *abci.ResponseCheckTx) error { + if maxGas == -1 { + return nil + } + if res.GasWanted < 0 { + return fmt.Errorf("gas wanted %d is negative", + res.GasWanted) + } + if res.GasWanted > maxGas { + return fmt.Errorf("gas wanted %d is greater than max gas %d", + res.GasWanted, maxGas) + } + + return nil + } +} diff --git a/internal/mempool/v0/bench_test.go b/internal/mempool/v0/bench_test.go deleted file mode 100644 index 35558f857f..0000000000 --- a/internal/mempool/v0/bench_test.go +++ /dev/null @@ -1,107 +0,0 @@ -package v0 - -import ( - "context" - "encoding/binary" - "sync/atomic" - "testing" - - "github.com/stretchr/testify/require" - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/internal/mempool" -) - -func BenchmarkReap(b *testing.B) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(b, err) - defer cleanup() - - mp.config.Size = 100000 - - size := 10000 - for i := 0; i < size; i++ { - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, uint64(i)) - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - b.Fatal(err) - } - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - mp.ReapMaxBytesMaxGas(100000000, 10000000) - } -} - -func BenchmarkCheckTx(b *testing.B) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(b, err) - defer cleanup() - - mp.config.Size = 1000000 - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - b.StopTimer() - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, uint64(i)) - b.StartTimer() - - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkParallelCheckTx(b *testing.B) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(b, err) - defer cleanup() - - mp.config.Size = 100000000 - - var txcnt uint64 - next := func() uint64 { - return atomic.AddUint64(&txcnt, 1) - 1 - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, next()) - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - b.Fatal(err) - } - } - }) -} - -func BenchmarkCheckDuplicateTx(b *testing.B) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(b, err) - defer cleanup() - - mp.config.Size = 1000000 - - for i := 0; i < b.N; i++ { - tx := make([]byte, 8) - binary.BigEndian.PutUint64(tx, uint64(i)) - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { - b.Fatal(err) - } - - if err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err == nil { - b.Fatal("tx should be duplicate") - } - } -} diff --git a/internal/mempool/v0/cache_test.go b/internal/mempool/v0/cache_test.go deleted file mode 100644 index 953e82b8dc..0000000000 --- a/internal/mempool/v0/cache_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package v0 - -import ( - "context" - "crypto/sha256" - "testing" - - "github.com/stretchr/testify/require" - - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/types" -) - -func TestCacheAfterUpdate(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - - // reAddIndices & txsInCache can have elements > numTxsToCreate - // also assumes max index is 255 for convenience - // txs in cache also checks order of elements - tests := []struct { - numTxsToCreate int - updateIndices []int - reAddIndices []int - txsInCache []int - }{ - {1, []int{}, []int{1}, []int{1, 0}}, // adding new txs works - {2, []int{1}, []int{}, []int{1, 0}}, // update doesn't remove tx from cache - {2, []int{2}, []int{}, []int{2, 1, 0}}, // update adds new tx to cache - {2, []int{1}, []int{1}, []int{1, 0}}, // re-adding after update doesn't make dupe - } - for tcIndex, tc := range tests { - for i := 0; i < tc.numTxsToCreate; i++ { - tx := types.Tx{byte(i)} - err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) - require.NoError(t, err) - } - - updateTxs := []types.Tx{} - for _, v := range tc.updateIndices { - tx := types.Tx{byte(v)} - updateTxs = append(updateTxs, tx) - } - err := mp.Update(int64(tcIndex), updateTxs, abciResponses(len(updateTxs), abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - - for _, v := range tc.reAddIndices { - tx := types.Tx{byte(v)} - _ = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) - } - - cache := mp.cache.(*mempool.LRUTxCache) - node := cache.GetList().Front() - counter := 0 - for node != nil { - require.NotEqual(t, len(tc.txsInCache), counter, - "cache larger than expected on testcase %d", tcIndex) - - nodeVal := node.Value.(types.TxKey) - expectedBz := sha256.Sum256([]byte{byte(tc.txsInCache[len(tc.txsInCache)-counter-1])}) - // Reference for reading the errors: - // >>> sha256('\x00').hexdigest() - // '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d' - // >>> sha256('\x01').hexdigest() - // '4bf5122f344554c53bde2ebb8cd2b7e3d1600ad631c385a5d7cce23c7785459a' - // >>> sha256('\x02').hexdigest() - // 'dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986' - - require.EqualValues(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex) - counter++ - node = node.Next() - } - require.Equal(t, len(tc.txsInCache), counter, - "cache smaller than expected on testcase %d", tcIndex) - mp.Flush() - } -} diff --git a/internal/mempool/v0/clist_mempool.go b/internal/mempool/v0/clist_mempool.go deleted file mode 100644 index 66f3949462..0000000000 --- a/internal/mempool/v0/clist_mempool.go +++ /dev/null @@ -1,698 +0,0 @@ -package v0 - -import ( - "bytes" - "context" - "errors" - "sync" - "sync/atomic" - - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/proxy" - "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" - "github.com/tendermint/tendermint/types" -) - -// CListMempool is an ordered in-memory pool for transactions before they are -// proposed in a consensus round. Transaction validity is checked using the -// CheckTx abci message before the transaction is added to the pool. The -// mempool uses a concurrent list structure for storing transactions that can -// be efficiently accessed by multiple concurrent readers. -type CListMempool struct { - // Atomic integers - height int64 // the last block Update()'d to - txsBytes int64 // total size of mempool, in bytes - - // notify listeners (ie. consensus) when txs are available - notifiedTxsAvailable bool - txsAvailable chan struct{} // fires once for each height, when the mempool is not empty - - config *config.MempoolConfig - - // Exclusive mutex for Update method to prevent concurrent execution of - // CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods. - updateMtx tmsync.RWMutex - preCheck mempool.PreCheckFunc - postCheck mempool.PostCheckFunc - - txs *clist.CList // concurrent linked-list of good txs - proxyAppConn proxy.AppConnMempool - - // Track whether we're rechecking txs. - // These are not protected by a mutex and are expected to be mutated in - // serial (ie. by abci responses which are called in serial). - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here - - // Map for quick access to txs to record sender in CheckTx. - // txsMap: txKey -> CElement - txsMap sync.Map - - // Keep a cache of already-seen txs. - // This reduces the pressure on the proxyApp. - cache mempool.TxCache - - logger log.Logger - metrics *mempool.Metrics -} - -var _ mempool.Mempool = &CListMempool{} - -// CListMempoolOption sets an optional parameter on the mempool. -type CListMempoolOption func(*CListMempool) - -// NewCListMempool returns a new mempool with the given configuration and -// connection to an application. -func NewCListMempool( - cfg *config.MempoolConfig, - proxyAppConn proxy.AppConnMempool, - height int64, - options ...CListMempoolOption, -) *CListMempool { - - mp := &CListMempool{ - config: cfg, - proxyAppConn: proxyAppConn, - txs: clist.New(), - height: height, - recheckCursor: nil, - recheckEnd: nil, - logger: log.NewNopLogger(), - metrics: mempool.NopMetrics(), - } - - if cfg.CacheSize > 0 { - mp.cache = mempool.NewLRUTxCache(cfg.CacheSize) - } else { - mp.cache = mempool.NopTxCache{} - } - - proxyAppConn.SetResponseCallback(mp.globalCb) - - for _, option := range options { - option(mp) - } - - return mp -} - -// NOTE: not thread safe - should only be called once, on startup -func (mem *CListMempool) EnableTxsAvailable() { - mem.txsAvailable = make(chan struct{}, 1) -} - -// SetLogger sets the Logger. -func (mem *CListMempool) SetLogger(l log.Logger) { - mem.logger = l -} - -// WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns -// false. This is ran before CheckTx. Only applies to the first created block. -// After that, Update overwrites the existing value. -func WithPreCheck(f mempool.PreCheckFunc) CListMempoolOption { - return func(mem *CListMempool) { mem.preCheck = f } -} - -// WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns -// false. This is ran after CheckTx. Only applies to the first created block. -// After that, Update overwrites the existing value. -func WithPostCheck(f mempool.PostCheckFunc) CListMempoolOption { - return func(mem *CListMempool) { mem.postCheck = f } -} - -// WithMetrics sets the metrics. -func WithMetrics(metrics *mempool.Metrics) CListMempoolOption { - return func(mem *CListMempool) { mem.metrics = metrics } -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) Lock() { - mem.updateMtx.Lock() -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) Unlock() { - mem.updateMtx.Unlock() -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) Size() int { - return mem.txs.Len() -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) SizeBytes() int64 { - return atomic.LoadInt64(&mem.txsBytes) -} - -// Lock() must be help by the caller during execution. -func (mem *CListMempool) FlushAppConn() error { - return mem.proxyAppConn.FlushSync(context.Background()) -} - -// XXX: Unsafe! Calling Flush may leave mempool in inconsistent state. -func (mem *CListMempool) Flush() { - mem.updateMtx.RLock() - defer mem.updateMtx.RUnlock() - - _ = atomic.SwapInt64(&mem.txsBytes, 0) - mem.cache.Reset() - - for e := mem.txs.Front(); e != nil; e = e.Next() { - mem.txs.Remove(e) - e.DetachPrev() - } - - mem.txsMap.Range(func(key, _ interface{}) bool { - mem.txsMap.Delete(key) - return true - }) -} - -// TxsFront returns the first transaction in the ordered list for peer -// goroutines to call .NextWait() on. -// FIXME: leaking implementation details! -// -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) TxsFront() *clist.CElement { - return mem.txs.Front() -} - -// TxsWaitChan returns a channel to wait on transactions. It will be closed -// once the mempool is not empty (ie. the internal `mem.txs` has at least one -// element) -// -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) TxsWaitChan() <-chan struct{} { - return mem.txs.WaitChan() -} - -// It blocks if we're waiting on Update() or Reap(). -// cb: A callback from the CheckTx command. -// It gets called from another goroutine. -// CONTRACT: Either cb will get called, or err returned. -// -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) CheckTx( - ctx context.Context, - tx types.Tx, - cb func(*abci.Response), - txInfo mempool.TxInfo, -) error { - - mem.updateMtx.RLock() - // use defer to unlock mutex because application (*local client*) might panic - defer mem.updateMtx.RUnlock() - - txSize := len(tx) - - if err := mem.isFull(txSize); err != nil { - return err - } - - if txSize > mem.config.MaxTxBytes { - return types.ErrTxTooLarge{ - Max: mem.config.MaxTxBytes, - Actual: txSize, - } - } - - if mem.preCheck != nil { - if err := mem.preCheck(tx); err != nil { - return types.ErrPreCheck{ - Reason: err, - } - } - } - - // NOTE: proxyAppConn may error if tx buffer is full - if err := mem.proxyAppConn.Error(); err != nil { - return err - } - - if !mem.cache.Push(tx) { // if the transaction already exists in the cache - // Record a new sender for a tx we've already seen. - // Note it's possible a tx is still in the cache but no longer in the mempool - // (eg. after committing a block, txs are removed from mempool but not cache), - // so we only record the sender for txs still in the mempool. - if e, ok := mem.txsMap.Load(tx.Key()); ok { - memTx := e.(*clist.CElement).Value.(*mempoolTx) - _, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true) - // TODO: consider punishing peer for dups, - // its non-trivial since invalid txs can become valid, - // but they can spam the same tx with little cost to them atm. - if loaded { - return types.ErrTxInCache - } - } - - mem.logger.Debug("tx exists already in cache", "tx", tx.Hash()) - return nil - } - - if ctx == nil { - ctx = context.Background() - } - - reqRes, err := mem.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{Tx: tx}) - if err != nil { - mem.cache.Remove(tx) - return err - } - reqRes.SetCallback(mem.reqResCb(tx, txInfo.SenderID, txInfo.SenderNodeID, cb)) - - return nil -} - -// Global callback that will be called after every ABCI response. -// Having a single global callback avoids needing to set a callback for each request. -// However, processing the checkTx response requires the peerID (so we can track which txs we heard from who), -// and peerID is not included in the ABCI request, so we have to set request-specific callbacks that -// include this information. If we're not in the midst of a recheck, this function will just return, -// so the request specific callback can do the work. -// -// When rechecking, we don't need the peerID, so the recheck callback happens -// here. -func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) { - if mem.recheckCursor == nil { - return - } - - mem.metrics.RecheckTimes.Add(1) - mem.resCbRecheck(req, res) - - // update metrics - mem.metrics.Size.Set(float64(mem.Size())) -} - -// Request specific callback that should be set on individual reqRes objects -// to incorporate local information when processing the response. -// This allows us to track the peer that sent us this tx, so we can avoid sending it back to them. -// NOTE: alternatively, we could include this information in the ABCI request itself. -// -// External callers of CheckTx, like the RPC, can also pass an externalCb through here that is called -// when all other response processing is complete. -// -// Used in CheckTx to record PeerID who sent us the tx. -func (mem *CListMempool) reqResCb( - tx []byte, - peerID uint16, - peerP2PID types.NodeID, - externalCb func(*abci.Response), -) func(res *abci.Response) { - return func(res *abci.Response) { - if mem.recheckCursor != nil { - // this should never happen - panic("recheck cursor is not nil in reqResCb") - } - - mem.resCbFirstTime(tx, peerID, peerP2PID, res) - - // update metrics - mem.metrics.Size.Set(float64(mem.Size())) - - // passed in by the caller of CheckTx, eg. the RPC - if externalCb != nil { - externalCb(res) - } - } -} - -// Called from: -// - resCbFirstTime (lock not held) if tx is valid -func (mem *CListMempool) addTx(memTx *mempoolTx) { - e := mem.txs.PushBack(memTx) - mem.txsMap.Store(memTx.tx.Key(), e) - atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) - mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) -} - -// Called from: -// - Update (lock held) if tx was committed -// - resCbRecheck (lock not held) if tx was invalidated -func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { - mem.txs.Remove(elem) - elem.DetachPrev() - mem.txsMap.Delete(tx.Key()) - atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) - - if removeFromCache { - mem.cache.Remove(tx) - } -} - -// RemoveTxByKey removes a transaction from the mempool by its TxKey index. -func (mem *CListMempool) RemoveTxByKey(txKey types.TxKey) error { - if e, ok := mem.txsMap.Load(txKey); ok { - memTx := e.(*clist.CElement).Value.(*mempoolTx) - if memTx != nil { - mem.removeTx(memTx.tx, e.(*clist.CElement), false) - return nil - } - return errors.New("transaction not found") - } - return errors.New("invalid transaction found") -} - -func (mem *CListMempool) isFull(txSize int) error { - var ( - memSize = mem.Size() - txsBytes = mem.SizeBytes() - ) - - if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes { - return types.ErrMempoolIsFull{ - NumTxs: memSize, - MaxTxs: mem.config.Size, - TxsBytes: txsBytes, - MaxTxsBytes: mem.config.MaxTxsBytes, - } - } - - return nil -} - -// callback, which is called after the app checked the tx for the first time. -// -// The case where the app checks the tx for the second and subsequent times is -// handled by the resCbRecheck callback. -func (mem *CListMempool) resCbFirstTime( - tx []byte, - peerID uint16, - peerP2PID types.NodeID, - res *abci.Response, -) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - var postCheckErr error - if mem.postCheck != nil { - postCheckErr = mem.postCheck(tx, r.CheckTx) - } - if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { - // Check mempool isn't full again to reduce the chance of exceeding the - // limits. - if err := mem.isFull(len(tx)); err != nil { - // remove from cache (mempool might have a space later) - mem.cache.Remove(tx) - mem.logger.Error(err.Error()) - return - } - - memTx := &mempoolTx{ - height: mem.height, - gasWanted: r.CheckTx.GasWanted, - tx: tx, - } - memTx.senders.Store(peerID, true) - mem.addTx(memTx) - mem.logger.Debug( - "added good transaction", - "tx", types.Tx(tx).Hash(), - "res", r, - "height", memTx.height, - "total", mem.Size(), - ) - mem.notifyTxsAvailable() - } else { - // ignore bad transaction - mem.logger.Debug( - "rejected bad transaction", - "tx", types.Tx(tx).Hash(), - "peerID", peerP2PID, - "res", r, - "err", postCheckErr, - ) - mem.metrics.FailedTxs.Add(1) - - if !mem.config.KeepInvalidTxsInCache { - // remove from cache (it might be good later) - mem.cache.Remove(tx) - } - } - - default: - // ignore other messages - } -} - -// callback, which is called after the app rechecked the tx. -// -// The case where the app checks the tx for the first time is handled by the -// resCbFirstTime callback. -func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - tx := req.GetCheckTx().Tx - memTx := mem.recheckCursor.Value.(*mempoolTx) - - // Search through the remaining list of tx to recheck for a transaction that matches - // the one we received from the ABCI application. - for { - if bytes.Equal(tx, memTx.tx) { - // We've found a tx in the recheck list that matches the tx that we - // received from the ABCI application. - // Break, and use this transaction for further checks. - break - } - - mem.logger.Error( - "re-CheckTx transaction mismatch", - "got", types.Tx(tx), - "expected", memTx.tx, - ) - - if mem.recheckCursor == mem.recheckEnd { - // we reached the end of the recheckTx list without finding a tx - // matching the one we received from the ABCI application. - // Return without processing any tx. - mem.recheckCursor = nil - return - } - - mem.recheckCursor = mem.recheckCursor.Next() - memTx = mem.recheckCursor.Value.(*mempoolTx) - } - - var postCheckErr error - if mem.postCheck != nil { - postCheckErr = mem.postCheck(tx, r.CheckTx) - } - if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { - // Good, nothing to do. - } else { - // Tx became invalidated due to newly committed block. - mem.logger.Debug("tx is no longer valid", "tx", types.Tx(tx).Hash(), "res", r, "err", postCheckErr) - // NOTE: we remove tx from the cache because it might be good later - mem.removeTx(tx, mem.recheckCursor, !mem.config.KeepInvalidTxsInCache) - } - if mem.recheckCursor == mem.recheckEnd { - mem.recheckCursor = nil - } else { - mem.recheckCursor = mem.recheckCursor.Next() - } - if mem.recheckCursor == nil { - // Done! - mem.logger.Debug("done rechecking txs") - - // incase the recheck removed all txs - if mem.Size() > 0 { - mem.notifyTxsAvailable() - } - } - default: - // ignore other messages - } -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) TxsAvailable() <-chan struct{} { - return mem.txsAvailable -} - -func (mem *CListMempool) notifyTxsAvailable() { - if mem.Size() == 0 { - panic("notified txs available but mempool is empty!") - } - if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { - // channel cap is 1, so this will send once - mem.notifiedTxsAvailable = true - select { - case mem.txsAvailable <- struct{}{}: - default: - } - } -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { - mem.updateMtx.RLock() - defer mem.updateMtx.RUnlock() - - var ( - totalGas int64 - runningSize int64 - ) - - // TODO: we will get a performance boost if we have a good estimate of avg - // size per tx, and set the initial capacity based off of that. - // txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max/mem.avgTxSize)) - txs := make([]types.Tx, 0, mem.txs.Len()) - for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) - - txs = append(txs, memTx.tx) - - dataSize := types.ComputeProtoSizeForTxs([]types.Tx{memTx.tx}) - - // Check total size requirement - if maxBytes > -1 && runningSize+dataSize > maxBytes { - return txs[:len(txs)-1] - } - - runningSize += dataSize - - // Check total gas requirement. - // If maxGas is negative, skip this check. - // Since newTotalGas < masGas, which - // must be non-negative, it follows that this won't overflow. - newTotalGas := totalGas + memTx.gasWanted - if maxGas > -1 && newTotalGas > maxGas { - return txs[:len(txs)-1] - } - totalGas = newTotalGas - } - return txs -} - -// Safe for concurrent use by multiple goroutines. -func (mem *CListMempool) ReapMaxTxs(max int) types.Txs { - mem.updateMtx.RLock() - defer mem.updateMtx.RUnlock() - - if max < 0 { - max = mem.txs.Len() - } - - txs := make([]types.Tx, 0, tmmath.MinInt(mem.txs.Len(), max)) - for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { - memTx := e.Value.(*mempoolTx) - txs = append(txs, memTx.tx) - } - return txs -} - -// Lock() must be help by the caller during execution. -func (mem *CListMempool) Update( - height int64, - txs types.Txs, - deliverTxResponses []*abci.ResponseDeliverTx, - preCheck mempool.PreCheckFunc, - postCheck mempool.PostCheckFunc, -) error { - // Set height - mem.height = height - mem.notifiedTxsAvailable = false - - if preCheck != nil { - mem.preCheck = preCheck - } - if postCheck != nil { - mem.postCheck = postCheck - } - - for i, tx := range txs { - if deliverTxResponses[i].Code == abci.CodeTypeOK { - // Add valid committed tx to the cache (if missing). - _ = mem.cache.Push(tx) - } else if !mem.config.KeepInvalidTxsInCache { - // Allow invalid transactions to be resubmitted. - mem.cache.Remove(tx) - } - - // Remove committed tx from the mempool. - // - // Note an evil proposer can drop valid txs! - // Mempool before: - // 100 -> 101 -> 102 - // Block, proposed by an evil proposer: - // 101 -> 102 - // Mempool after: - // 100 - // https://github.com/tendermint/tendermint/issues/3322. - if e, ok := mem.txsMap.Load(tx.Key()); ok { - mem.removeTx(tx, e.(*clist.CElement), false) - } - } - - // Either recheck non-committed txs to see if they became invalid - // or just notify there're some txs left. - if mem.Size() > 0 { - if mem.config.Recheck { - mem.logger.Debug("recheck txs", "numtxs", mem.Size(), "height", height) - mem.recheckTxs() - // At this point, mem.txs are being rechecked. - // mem.recheckCursor re-scans mem.txs and possibly removes some txs. - // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. - } else { - mem.notifyTxsAvailable() - } - } - - // Update metrics - mem.metrics.Size.Set(float64(mem.Size())) - - return nil -} - -func (mem *CListMempool) recheckTxs() { - if mem.Size() == 0 { - panic("recheckTxs is called, but the mempool is empty") - } - - mem.recheckCursor = mem.txs.Front() - mem.recheckEnd = mem.txs.Back() - - ctx := context.Background() - - // Push txs to proxyAppConn - // NOTE: globalCb may be called concurrently. - for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) - _, err := mem.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{ - Tx: memTx.tx, - Type: abci.CheckTxType_Recheck, - }) - if err != nil { - // No need in retrying since memTx will be rechecked after next block. - mem.logger.Error("Can't check tx", "err", err) - } - } - - _, err := mem.proxyAppConn.FlushAsync(ctx) - if err != nil { - mem.logger.Error("Can't flush txs", "err", err) - } -} - -//-------------------------------------------------------------------------------- - -// mempoolTx is a transaction that successfully ran -type mempoolTx struct { - height int64 // height that this tx had been validated in - gasWanted int64 // amount of gas this tx states it will require - tx types.Tx // - - // ids of peers who've sent us this tx (as a map for quick lookups). - // senders: PeerID -> bool - senders sync.Map -} - -// Height returns the height for this transaction -func (memTx *mempoolTx) Height() int64 { - return atomic.LoadInt64(&memTx.height) -} diff --git a/internal/mempool/v0/clist_mempool_test.go b/internal/mempool/v0/clist_mempool_test.go deleted file mode 100644 index f31d225656..0000000000 --- a/internal/mempool/v0/clist_mempool_test.go +++ /dev/null @@ -1,687 +0,0 @@ -package v0 - -import ( - "context" - "crypto/rand" - "encoding/binary" - "fmt" - mrand "math/rand" - "os" - "strconv" - "testing" - "time" - - "github.com/gogo/protobuf/proto" - gogotypes "github.com/gogo/protobuf/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - abciclient "github.com/tendermint/tendermint/abci/client" - abciclimocks "github.com/tendermint/tendermint/abci/client/mocks" - "github.com/tendermint/tendermint/abci/example/kvstore" - abciserver "github.com/tendermint/tendermint/abci/server" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/libs/log" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -// A cleanupFunc cleans up any config / test files created for a particular -// test. -type cleanupFunc func() - -func newMempoolWithApp(cc abciclient.Creator) (*CListMempool, cleanupFunc, error) { - conf, err := config.ResetTestRoot("mempool_test") - if err != nil { - return nil, func() {}, err - } - - mp, cu := newMempoolWithAppAndConfig(cc, conf) - return mp, cu, nil -} - -func newMempoolWithAppAndConfig(cc abciclient.Creator, cfg *config.Config) (*CListMempool, cleanupFunc) { - appConnMem, _ := cc() - appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) - err := appConnMem.Start() - if err != nil { - panic(err) - } - - mp := NewCListMempool(cfg.Mempool, appConnMem, 0) - mp.SetLogger(log.TestingLogger()) - - return mp, func() { os.RemoveAll(cfg.RootDir) } -} - -func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { - timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) - select { - case <-ch: - t.Fatal("Expected not to fire") - case <-timer.C: - } -} - -func ensureFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { - timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) - select { - case <-ch: - case <-timer.C: - t.Fatal("Expected to fire") - } -} - -func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types.Txs { - txs := make(types.Txs, count) - txInfo := mempool.TxInfo{SenderID: peerID} - for i := 0; i < count; i++ { - txBytes := make([]byte, 20) - txs[i] = txBytes - _, err := rand.Read(txBytes) - if err != nil { - t.Error(err) - } - if err := mp.CheckTx(context.Background(), txBytes, nil, txInfo); err != nil { - // Skip invalid txs. - // TestMempoolFilters will fail otherwise. It asserts a number of txs - // returned. - if types.IsPreCheckError(err) { - continue - } - t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i) - } - } - return txs -} - -func TestReapMaxBytesMaxGas(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - - // Ensure gas calculation behaves as expected - checkTxs(t, mp, 1, mempool.UnknownPeerID) - tx0 := mp.TxsFront().Value.(*mempoolTx) - // assert that kv store has gas wanted = 1. - require.Equal(t, app.CheckTx(abci.RequestCheckTx{Tx: tx0.tx}).GasWanted, int64(1), "KVStore had a gas value neq to 1") - require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly") - // ensure each tx is 20 bytes long - require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes") - mp.Flush() - - // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. - // each tx has 20 bytes - tests := []struct { - numTxsToCreate int - maxBytes int64 - maxGas int64 - expectedNumTxs int - }{ - {20, -1, -1, 20}, - {20, -1, 0, 0}, - {20, -1, 10, 10}, - {20, -1, 30, 20}, - {20, 0, -1, 0}, - {20, 0, 10, 0}, - {20, 10, 10, 0}, - {20, 24, 10, 1}, - {20, 240, 5, 5}, - {20, 240, -1, 10}, - {20, 240, 10, 10}, - {20, 240, 15, 10}, - {20, 20000, -1, 20}, - {20, 20000, 5, 5}, - {20, 20000, 30, 20}, - } - for tcIndex, tt := range tests { - checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID) - got := mp.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas) - assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d", - len(got), tt.expectedNumTxs, tcIndex) - mp.Flush() - } -} - -func TestMempoolFilters(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - emptyTxArr := []types.Tx{[]byte{}} - - nopPreFilter := func(tx types.Tx) error { return nil } - nopPostFilter := func(tx types.Tx, res *abci.ResponseCheckTx) error { return nil } - - // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. - // each tx has 20 bytes - tests := []struct { - numTxsToCreate int - preFilter mempool.PreCheckFunc - postFilter mempool.PostCheckFunc - expectedNumTxs int - }{ - {10, nopPreFilter, nopPostFilter, 10}, - {10, mempool.PreCheckMaxBytes(10), nopPostFilter, 0}, - {10, mempool.PreCheckMaxBytes(22), nopPostFilter, 10}, - {10, nopPreFilter, mempool.PostCheckMaxGas(-1), 10}, - {10, nopPreFilter, mempool.PostCheckMaxGas(0), 0}, - {10, nopPreFilter, mempool.PostCheckMaxGas(1), 10}, - {10, nopPreFilter, mempool.PostCheckMaxGas(3000), 10}, - {10, mempool.PreCheckMaxBytes(10), mempool.PostCheckMaxGas(20), 0}, - {10, mempool.PreCheckMaxBytes(30), mempool.PostCheckMaxGas(20), 10}, - {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(1), 10}, - {10, mempool.PreCheckMaxBytes(22), mempool.PostCheckMaxGas(0), 0}, - } - for tcIndex, tt := range tests { - err := mp.Update(1, emptyTxArr, abciResponses(len(emptyTxArr), abci.CodeTypeOK), tt.preFilter, tt.postFilter) - require.NoError(t, err) - checkTxs(t, mp, tt.numTxsToCreate, mempool.UnknownPeerID) - require.Equal(t, tt.expectedNumTxs, mp.Size(), "mempool had the incorrect size, on test case %d", tcIndex) - mp.Flush() - } -} - -func TestMempoolUpdate(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - - // 1. Adds valid txs to the cache - { - err := mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - err = mp.CheckTx(context.Background(), []byte{0x01}, nil, mempool.TxInfo{}) - require.NoError(t, err) - } - - // 2. Removes valid txs from the mempool - { - err := mp.CheckTx(context.Background(), []byte{0x02}, nil, mempool.TxInfo{}) - require.NoError(t, err) - err = mp.Update(1, []types.Tx{[]byte{0x02}}, abciResponses(1, abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - assert.Zero(t, mp.Size()) - } - - // 3. Removes invalid transactions from the cache and the mempool (if present) - { - err := mp.CheckTx(context.Background(), []byte{0x03}, nil, mempool.TxInfo{}) - require.NoError(t, err) - err = mp.Update(1, []types.Tx{[]byte{0x03}}, abciResponses(1, 1), nil, nil) - require.NoError(t, err) - assert.Zero(t, mp.Size()) - - err = mp.CheckTx(context.Background(), []byte{0x03}, nil, mempool.TxInfo{}) - require.NoError(t, err) - } -} - -func TestMempoolUpdateDoesNotPanicWhenApplicationMissedTx(t *testing.T) { - var callback abciclient.Callback - mockClient := new(abciclimocks.Client) - mockClient.On("Start").Return(nil) - mockClient.On("SetLogger", mock.Anything) - - mockClient.On("Error").Return(nil).Times(4) - mockClient.On("FlushAsync", mock.Anything).Return(abciclient.NewReqRes(abci.ToRequestFlush()), nil) - mockClient.On("SetResponseCallback", mock.MatchedBy(func(cb abciclient.Callback) bool { callback = cb; return true })) - - cc := func() (abciclient.Client, error) { - return mockClient, nil - } - - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - - // Add 4 transactions to the mempool by calling the mempool's `CheckTx` on each of them. - txs := []types.Tx{[]byte{0x01}, []byte{0x02}, []byte{0x03}, []byte{0x04}} - for _, tx := range txs { - reqRes := abciclient.NewReqRes(abci.ToRequestCheckTx(abci.RequestCheckTx{Tx: tx})) - reqRes.Response = abci.ToResponseCheckTx(abci.ResponseCheckTx{Code: abci.CodeTypeOK}) - // SetDone allows the ReqRes to process its callback synchronously. - // This simulates the Response being ready for the client immediately. - reqRes.SetDone() - - mockClient.On("CheckTxAsync", mock.Anything, mock.Anything).Return(reqRes, nil) - err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) - require.NoError(t, err) - } - - // Calling update to remove the first transaction from the mempool. - // This call also triggers the mempool to recheck its remaining transactions. - err = mp.Update(0, []types.Tx{txs[0]}, abciResponses(1, abci.CodeTypeOK), nil, nil) - require.Nil(t, err) - - // The mempool has now sent its requests off to the client to be rechecked - // and is waiting for the corresponding callbacks to be called. - // We now call the mempool-supplied callback on the first and third transaction. - // This simulates the client dropping the second request. - // Previous versions of this code panicked when the ABCI application missed - // a recheck-tx request. - resp := abci.ResponseCheckTx{Code: abci.CodeTypeOK} - req := abci.RequestCheckTx{Tx: txs[1]} - callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp)) - - req = abci.RequestCheckTx{Tx: txs[3]} - callback(abci.ToRequestCheckTx(req), abci.ToResponseCheckTx(resp)) - mockClient.AssertExpectations(t) -} - -func TestMempool_KeepInvalidTxsInCache(t *testing.T) { - testCases := []bool{true, false} - - for _, keepInvalidTxs := range testCases { - t.Run(strconv.FormatBool(keepInvalidTxs), func(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - wcfg := config.DefaultConfig() - wcfg.Mempool.KeepInvalidTxsInCache = keepInvalidTxs - mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg) - defer cleanup() - - valid := make([]byte, 8) - binary.BigEndian.PutUint64(valid, 0) - - invalid := make([]byte, 8) - binary.BigEndian.PutUint64(invalid, 1) - - // CheckTx will add the transaction to cache as soon as app.CheckTX returns status OK (which it does) - err := mp.CheckTx(context.Background(), invalid, nil, mempool.TxInfo{}) - require.NoError(t, err) - added := mp.cache.Push(invalid) - require.False(t, added, "tx should be added to cache in mp.CheckTx") - - // simulate new block - _ = app.DeliverTx(abci.RequestDeliverTx{Tx: valid}) - _ = app.DeliverTx(abci.RequestDeliverTx{Tx: invalid}) - err = mp.Update(1, []types.Tx{valid, invalid}, []*abci.ResponseDeliverTx{{Code: abci.CodeTypeOK}, {Code: 2}}, nil, nil) - require.NoError(t, err) - - // Valid transaction should be in the cache and not removed by mp.Update() - added = mp.cache.Push(valid) - assert.False(t, added) - - // With when KeepInvalidTxsInCache = false, invalid transaction is removed - // on mp.Update() and needs to be re-added - added = mp.cache.Push(invalid) - assert.Equal(t, !keepInvalidTxs, added) - }) - } -} - -func TestTxsAvailable(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - mp.EnableTxsAvailable() - - timeoutMS := 500 - - // with no txs, it shouldnt fire - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // send a bunch of txs, it should only fire once - txs := checkTxs(t, mp, 100, mempool.UnknownPeerID) - ensureFire(t, mp.TxsAvailable(), timeoutMS) - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // call update with half the txs. - // it should fire once now for the new height - // since there are still txs left - committedTxs, txs := txs[:50], txs[50:] - if err := mp.Update(1, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { - t.Error(err) - } - ensureFire(t, mp.TxsAvailable(), timeoutMS) - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // send a bunch more txs. we already fired for this height so it shouldnt fire again - moreTxs := checkTxs(t, mp, 50, mempool.UnknownPeerID) - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // now call update with all the txs. it should not fire as there are no txs left - committedTxs = append(txs, moreTxs...) - if err := mp.Update(2, committedTxs, abciResponses(len(committedTxs), abci.CodeTypeOK), nil, nil); err != nil { - t.Error(err) - } - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) - - // send a bunch more txs, it should only fire once - checkTxs(t, mp, 100, mempool.UnknownPeerID) - ensureFire(t, mp.TxsAvailable(), timeoutMS) - ensureNoFire(t, mp.TxsAvailable(), timeoutMS) -} - -func TestSerialReap(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - - mp, cleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - - appConnCon, _ := cc() - appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) - err = appConnCon.Start() - require.Nil(t, err) - - cacheMap := make(map[string]struct{}) - deliverTxsRange := func(start, end int) { - // Deliver some txs. - for i := start; i < end; i++ { - - // This will succeed - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(i)) - err := mp.CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) - _, cached := cacheMap[string(txBytes)] - if cached { - require.NotNil(t, err, "expected error for cached tx") - } else { - require.Nil(t, err, "expected no err for uncached tx") - } - cacheMap[string(txBytes)] = struct{}{} - - // Duplicates are cached and should return error - err = mp.CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) - require.NotNil(t, err, "Expected error after CheckTx on duplicated tx") - } - } - - reapCheck := func(exp int) { - txs := mp.ReapMaxBytesMaxGas(-1, -1) - require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs))) - } - - updateRange := func(start, end int) { - txs := make([]types.Tx, 0) - for i := start; i < end; i++ { - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(i)) - txs = append(txs, txBytes) - } - if err := mp.Update(0, txs, abciResponses(len(txs), abci.CodeTypeOK), nil, nil); err != nil { - t.Error(err) - } - } - - commitRange := func(start, end int) { - ctx := context.Background() - // Deliver some txs. - for i := start; i < end; i++ { - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(i)) - res, err := appConnCon.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes}) - if err != nil { - t.Errorf("client error committing tx: %v", err) - } - if res.IsErr() { - t.Errorf("error committing tx. Code:%v result:%X log:%v", - res.Code, res.Data, res.Log) - } - } - res, err := appConnCon.CommitSync(ctx) - if err != nil { - t.Errorf("client error committing: %v", err) - } - if len(res.Data) != crypto.DefaultAppHashSize { - t.Errorf("error committing. Hash:%X", res.Data) - } - } - - //---------------------------------------- - - // Deliver some txs. - deliverTxsRange(0, 100) - - // Reap the txs. - reapCheck(100) - - // Reap again. We should get the same amount - reapCheck(100) - - // Deliver 0 to 999, we should reap 900 new txs - // because 100 were already counted. - deliverTxsRange(0, 1000) - - // Reap the txs. - reapCheck(1000) - - // Reap again. We should get the same amount - reapCheck(1000) - - // Commit from the conensus AppConn - commitRange(0, 500) - updateRange(0, 500) - - // We should have 500 left. - reapCheck(500) - - // Deliver 100 invalid txs and 100 valid txs - deliverTxsRange(900, 1100) - - // We should have 600 now. - reapCheck(600) -} - -func TestMempool_CheckTxChecksTxSize(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - mempl, cleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - - maxTxSize := mempl.config.MaxTxBytes - - testCases := []struct { - len int - err bool - }{ - // check small txs. no error - 0: {10, false}, - 1: {1000, false}, - 2: {1000000, false}, - - // check around maxTxSize - 3: {maxTxSize - 1, false}, - 4: {maxTxSize, false}, - 5: {maxTxSize + 1, true}, - } - - for i, testCase := range testCases { - caseString := fmt.Sprintf("case %d, len %d", i, testCase.len) - - tx := tmrand.Bytes(testCase.len) - - err := mempl.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) - bv := gogotypes.BytesValue{Value: tx} - bz, err2 := bv.Marshal() - require.NoError(t, err2) - require.Equal(t, len(bz), proto.Size(&bv), caseString) - - if !testCase.err { - require.NoError(t, err, caseString) - } else { - require.Equal(t, err, types.ErrTxTooLarge{ - Max: maxTxSize, - Actual: testCase.len, - }, caseString) - } - } -} - -func TestMempoolTxsBytes(t *testing.T) { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - cfg, err := config.ResetTestRoot("mempool_test") - require.NoError(t, err) - - cfg.Mempool.MaxTxsBytes = 10 - mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) - defer cleanup() - - // 1. zero by default - assert.EqualValues(t, 0, mp.SizeBytes()) - - // 2. len(tx) after CheckTx - err = mp.CheckTx(context.Background(), []byte{0x01}, nil, mempool.TxInfo{}) - require.NoError(t, err) - assert.EqualValues(t, 1, mp.SizeBytes()) - - // 3. zero again after tx is removed by Update - err = mp.Update(1, []types.Tx{[]byte{0x01}}, abciResponses(1, abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - assert.EqualValues(t, 0, mp.SizeBytes()) - - // 4. zero after Flush - err = mp.CheckTx(context.Background(), []byte{0x02, 0x03}, nil, mempool.TxInfo{}) - require.NoError(t, err) - assert.EqualValues(t, 2, mp.SizeBytes()) - - mp.Flush() - assert.EqualValues(t, 0, mp.SizeBytes()) - - // 5. ErrMempoolIsFull is returned when/if MaxTxsBytes limit is reached. - err = mp.CheckTx( - context.Background(), - []byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04}, - nil, - mempool.TxInfo{}, - ) - require.NoError(t, err) - - err = mp.CheckTx(context.Background(), []byte{0x05}, nil, mempool.TxInfo{}) - if assert.Error(t, err) { - assert.IsType(t, types.ErrMempoolIsFull{}, err) - } - - // 6. zero after tx is rechecked and removed due to not being valid anymore - app2 := kvstore.NewApplication() - cc = abciclient.NewLocalCreator(app2) - mp, cleanup, err = newMempoolWithApp(cc) - require.NoError(t, err) - defer cleanup() - - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(0)) - - err = mp.CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) - require.NoError(t, err) - assert.EqualValues(t, 8, mp.SizeBytes()) - - appConnCon, _ := cc() - appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) - err = appConnCon.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := appConnCon.Stop(); err != nil { - t.Error(err) - } - }) - ctx := context.Background() - res, err := appConnCon.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes}) - require.NoError(t, err) - require.EqualValues(t, 0, res.Code) - res2, err := appConnCon.CommitSync(ctx) - require.NoError(t, err) - require.NotEmpty(t, res2.Data) - - // Pretend like we committed nothing so txBytes gets rechecked and removed. - err = mp.Update(1, []types.Tx{}, abciResponses(0, abci.CodeTypeOK), nil, nil) - require.NoError(t, err) - assert.EqualValues(t, 8, mp.SizeBytes()) - - // 7. Test RemoveTxByKey function - err = mp.CheckTx(context.Background(), []byte{0x06}, nil, mempool.TxInfo{}) - require.NoError(t, err) - assert.EqualValues(t, 9, mp.SizeBytes()) - assert.Error(t, mp.RemoveTxByKey(types.Tx([]byte{0x07}).Key())) - assert.EqualValues(t, 9, mp.SizeBytes()) - assert.NoError(t, mp.RemoveTxByKey(types.Tx([]byte{0x06}).Key())) - assert.EqualValues(t, 8, mp.SizeBytes()) - -} - -// This will non-deterministically catch some concurrency failures like -// https://github.com/tendermint/tendermint/issues/3509 -// TODO: all of the tests should probably also run using the remote proxy app -// since otherwise we're not actually testing the concurrency of the mempool here! -func TestMempoolRemoteAppConcurrency(t *testing.T) { - sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - app := kvstore.NewApplication() - cc, server := newRemoteApp(t, sockPath, app) - t.Cleanup(func() { - if err := server.Stop(); err != nil { - t.Error(err) - } - }) - cfg, err := config.ResetTestRoot("mempool_test") - require.NoError(t, err) - - mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) - defer cleanup() - - // generate small number of txs - nTxs := 10 - txLen := 200 - txs := make([]types.Tx, nTxs) - for i := 0; i < nTxs; i++ { - txs[i] = tmrand.Bytes(txLen) - } - - // simulate a group of peers sending them over and over - N := cfg.Mempool.Size - maxPeers := 5 - for i := 0; i < N; i++ { - peerID := mrand.Intn(maxPeers) - txNum := mrand.Intn(nTxs) - tx := txs[txNum] - - // this will err with ErrTxInCache many times ... - mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{SenderID: uint16(peerID)}) //nolint: errcheck // will error - } - err = mp.FlushAppConn() - require.NoError(t, err) -} - -// caller must close server -func newRemoteApp( - t *testing.T, - addr string, - app abci.Application, -) ( - clientCreator abciclient.Creator, - server service.Service, -) { - clientCreator = abciclient.NewRemoteCreator(addr, "socket", true) - - // Start server - server = abciserver.NewSocketServer(addr, app) - server.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := server.Start(); err != nil { - t.Fatalf("Error starting socket server: %v", err.Error()) - } - return clientCreator, server -} - -func abciResponses(n int, code uint32) []*abci.ResponseDeliverTx { - responses := make([]*abci.ResponseDeliverTx, 0, n) - for i := 0; i < n; i++ { - responses = append(responses, &abci.ResponseDeliverTx{Code: code}) - } - return responses -} diff --git a/internal/mempool/v0/doc.go b/internal/mempool/v0/doc.go deleted file mode 100644 index 3b5d0d20d4..0000000000 --- a/internal/mempool/v0/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// The mempool pushes new txs onto the proxyAppConn. -// It gets a stream of (req, res) tuples from the proxy. -// The mempool stores good txs in a concurrent linked-list. - -// Multiple concurrent go-routines can traverse this linked-list -// safely by calling .NextWait() on each element. - -// So we have several go-routines: -// 1. Consensus calling Update() and ReapMaxBytesMaxGas() synchronously -// 2. Many mempool reactor's peer routines calling CheckTx() -// 3. Many mempool reactor's peer routines traversing the txs linked list - -// To manage these goroutines, there are three methods of locking. -// 1. Mutations to the linked-list is protected by an internal mtx (CList is goroutine-safe) -// 2. Mutations to the linked-list elements are atomic -// 3. CheckTx() and/or ReapMaxBytesMaxGas() calls can be paused upon Update(), protected by .updateMtx - -// Garbage collection of old elements from mempool.txs is handlde via the -// DetachPrev() call, which makes old elements not reachable by peer -// broadcastTxRoutine(). - -// TODO: Better handle abci client errors. (make it automatically handle connection errors) -package v0 diff --git a/internal/mempool/v0/reactor.go b/internal/mempool/v0/reactor.go deleted file mode 100644 index c71eb12587..0000000000 --- a/internal/mempool/v0/reactor.go +++ /dev/null @@ -1,402 +0,0 @@ -package v0 - -import ( - "context" - "errors" - "fmt" - "runtime/debug" - "sync" - "time" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" - "github.com/tendermint/tendermint/types" -) - -var ( - _ service.Service = (*Reactor)(nil) - _ p2p.Wrapper = (*protomem.Message)(nil) -) - -// PeerManager defines the interface contract required for getting necessary -// peer information. This should eventually be replaced with a message-oriented -// approach utilizing the p2p stack. -type PeerManager interface { - GetHeight(types.NodeID) int64 -} - -// Reactor implements a service that contains mempool of txs that are broadcasted -// amongst peers. It maintains a map from peer ID to counter, to prevent gossiping -// txs to the peers you received it from. -type Reactor struct { - service.BaseService - - cfg *config.MempoolConfig - mempool *CListMempool - ids *mempool.MempoolIDs - - // XXX: Currently, this is the only way to get information about a peer. Ideally, - // we rely on message-oriented communication to get necessary peer data. - // ref: https://github.com/tendermint/tendermint/issues/5670 - peerMgr PeerManager - - mempoolCh *p2p.Channel - peerUpdates *p2p.PeerUpdates - closeCh chan struct{} - - // peerWG is used to coordinate graceful termination of all peer broadcasting - // goroutines. - peerWG sync.WaitGroup - - mtx tmsync.Mutex - peerRoutines map[types.NodeID]*tmsync.Closer -} - -// NewReactor returns a reference to a new reactor. -func NewReactor( - logger log.Logger, - cfg *config.MempoolConfig, - peerMgr PeerManager, - mp *CListMempool, - mempoolCh *p2p.Channel, - peerUpdates *p2p.PeerUpdates, -) *Reactor { - - r := &Reactor{ - cfg: cfg, - peerMgr: peerMgr, - mempool: mp, - ids: mempool.NewMempoolIDs(), - mempoolCh: mempoolCh, - peerUpdates: peerUpdates, - closeCh: make(chan struct{}), - peerRoutines: make(map[types.NodeID]*tmsync.Closer), - } - - r.BaseService = *service.NewBaseService(logger, "Mempool", r) - return r -} - -// GetChannelShims returns a map of ChannelDescriptorShim objects, where each -// object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding -// p2p proto.Message the new p2p Channel is responsible for handling. -// -// -// TODO: Remove once p2p refactor is complete. -// ref: https://github.com/tendermint/tendermint/issues/5670 -func GetChannelShims(cfg *config.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim { - largestTx := make([]byte, cfg.MaxTxBytes) - batchMsg := protomem.Message{ - Sum: &protomem.Message_Txs{ - Txs: &protomem.Txs{Txs: [][]byte{largestTx}}, - }, - } - - return map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - mempool.MempoolChannel: { - MsgType: new(protomem.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(mempool.MempoolChannel), - Priority: 5, - RecvMessageCapacity: batchMsg.Size(), - RecvBufferCapacity: 128, - MaxSendBytes: 5000, - }, - }, - } -} - -// OnStart starts separate go routines for each p2p Channel and listens for -// envelopes on each. In addition, it also listens for peer updates and handles -// messages on that p2p channel accordingly. The caller must be sure to execute -// OnStop to ensure the outbound p2p Channels are closed. -func (r *Reactor) OnStart() error { - if !r.cfg.Broadcast { - r.Logger.Info("tx broadcasting is disabled") - } - - go r.processMempoolCh() - go r.processPeerUpdates() - - return nil -} - -// OnStop stops the reactor by signaling to all spawned goroutines to exit and -// blocking until they all exit. -func (r *Reactor) OnStop() { - r.mtx.Lock() - for _, c := range r.peerRoutines { - c.Close() - } - r.mtx.Unlock() - - // wait for all spawned peer tx broadcasting goroutines to gracefully exit - r.peerWG.Wait() - - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - // Wait for all p2p Channels to be closed before returning. This ensures we - // can easily reason about synchronization of all p2p Channels and ensure no - // panics will occur. - <-r.mempoolCh.Done() - <-r.peerUpdates.Done() -} - -// handleMempoolMessage handles envelopes sent from peers on the MempoolChannel. -// For every tx in the message, we execute CheckTx. It returns an error if an -// empty set of txs are sent in an envelope or if we receive an unexpected -// message type. -func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { - logger := r.Logger.With("peer", envelope.From) - - switch msg := envelope.Message.(type) { - case *protomem.Txs: - protoTxs := msg.GetTxs() - if len(protoTxs) == 0 { - return errors.New("empty txs received from peer") - } - - txInfo := mempool.TxInfo{SenderID: r.ids.GetForPeer(envelope.From)} - if len(envelope.From) != 0 { - txInfo.SenderNodeID = envelope.From - } - - for _, tx := range protoTxs { - if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil { - logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err) - } - } - - default: - return fmt.Errorf("received unknown message: %T", msg) - } - - return nil -} - -// handleMessage handles an Envelope sent from a peer on a specific p2p Channel. -// It will handle errors and any possible panics gracefully. A caller can handle -// any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("panic in processing message: %v", e) - r.Logger.Error( - "recovering from processing message panic", - "err", err, - "stack", string(debug.Stack()), - ) - } - }() - - // r.Logger.Debug("received message", "peer", envelope.From) - - switch chID { - case mempool.MempoolChannel: - err = r.handleMempoolMessage(envelope) - - default: - err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) - } - - return err -} - -// processMempoolCh implements a blocking event loop where we listen for p2p -// Envelope messages from the mempoolCh. -func (r *Reactor) processMempoolCh() { - defer r.mempoolCh.Close() - - for { - select { - case envelope := <-r.mempoolCh.In: - if err := r.handleMessage(r.mempoolCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.mempoolCh.ID, "envelope", envelope, "err", err) - r.mempoolCh.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } - } - - case <-r.closeCh: - r.Logger.Debug("stopped listening on mempool channel; closing...") - return - } - } -} - -// processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we -// check if the reactor is running and if we've already started a tx broadcasting -// goroutine or not. If not, we start one for the newly added peer. For down or -// removed peers, we remove the peer from the mempool peer ID set and signal to -// stop the tx broadcasting goroutine. -func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) - - r.mtx.Lock() - defer r.mtx.Unlock() - - switch peerUpdate.Status { - case p2p.PeerStatusUp: - // Do not allow starting new tx broadcast loops after reactor shutdown - // has been initiated. This can happen after we've manually closed all - // peer broadcast loops and closed r.closeCh, but the router still sends - // in-flight peer updates. - if !r.IsRunning() { - return - } - - if r.cfg.Broadcast { - // Check if we've already started a goroutine for this peer, if not we create - // a new done channel so we can explicitly close the goroutine if the peer - // is later removed, we increment the waitgroup so the reactor can stop - // safely, and finally start the goroutine to broadcast txs to that peer. - _, ok := r.peerRoutines[peerUpdate.NodeID] - if !ok { - closer := tmsync.NewCloser() - - r.peerRoutines[peerUpdate.NodeID] = closer - r.peerWG.Add(1) - - r.ids.ReserveForPeer(peerUpdate.NodeID) - - // start a broadcast routine ensuring all txs are forwarded to the peer - go r.broadcastTxRoutine(peerUpdate.NodeID, closer) - } - } - - case p2p.PeerStatusDown: - r.ids.Reclaim(peerUpdate.NodeID) - - // Check if we've started a tx broadcasting goroutine for this peer. - // If we have, we signal to terminate the goroutine via the channel's closure. - // This will internally decrement the peer waitgroup and remove the peer - // from the map of peer tx broadcasting goroutines. - closer, ok := r.peerRoutines[peerUpdate.NodeID] - if ok { - closer.Close() - } - } -} - -// processPeerUpdates initiates a blocking process where we listen for and handle -// PeerUpdate messages. When the reactor is stopped, we will catch the signal and -// close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates() { - defer r.peerUpdates.Close() - - for { - select { - case peerUpdate := <-r.peerUpdates.Updates(): - r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.Logger.Debug("stopped listening on peer updates channel; closing...") - return - } - } -} - -func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) { - peerMempoolID := r.ids.GetForPeer(peerID) - var next *clist.CElement - - // remove the peer ID from the map of routines and mark the waitgroup as done - defer func() { - r.mtx.Lock() - delete(r.peerRoutines, peerID) - r.mtx.Unlock() - - r.peerWG.Done() - - if e := recover(); e != nil { - r.Logger.Error( - "recovering from broadcasting mempool loop", - "err", e, - "stack", string(debug.Stack()), - ) - } - }() - - for { - if !r.IsRunning() { - return - } - - // This happens because the CElement we were looking at got garbage - // collected (removed). That is, .NextWait() returned nil. Go ahead and - // start from the beginning. - if next == nil { - select { - case <-r.mempool.TxsWaitChan(): // wait until a tx is available - if next = r.mempool.TxsFront(); next == nil { - continue - } - - case <-closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. - return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. - return - } - } - - memTx := next.Value.(*mempoolTx) - - if r.peerMgr != nil { - height := r.peerMgr.GetHeight(peerID) - if height > 0 && height < memTx.Height()-1 { - // allow for a lag of one block - time.Sleep(mempool.PeerCatchupSleepIntervalMS * time.Millisecond) - continue - } - } - - // NOTE: Transaction batching was disabled due to: - // https://github.com/tendermint/tendermint/issues/5796 - - if _, ok := memTx.senders.Load(peerMempoolID); !ok { - // Send the mempool tx to the corresponding peer. Note, the peer may be - // behind and thus would not be able to process the mempool tx correctly. - r.mempoolCh.Out <- p2p.Envelope{ - To: peerID, - Message: &protomem.Txs{ - Txs: [][]byte{memTx.tx}, - }, - } - r.Logger.Debug( - "gossiped tx to peer", - "tx", fmt.Sprintf("%X", memTx.tx.Hash()), - "peer", peerID, - ) - } - - select { - case <-next.NextWaitChan(): - // see the start of the for loop for nil check - next = next.Next() - - case <-closer.Done(): - // The peer is marked for removal via a PeerUpdate as the doneCh was - // explicitly closed to signal we should exit. - return - - case <-r.closeCh: - // The reactor has signaled that we are stopped and thus we should - // implicitly exit this peer's goroutine. - return - } - } -} diff --git a/internal/mempool/v0/reactor_test.go b/internal/mempool/v0/reactor_test.go deleted file mode 100644 index f5f64ca0e8..0000000000 --- a/internal/mempool/v0/reactor_test.go +++ /dev/null @@ -1,392 +0,0 @@ -package v0 - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" - - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/p2ptest" - "github.com/tendermint/tendermint/libs/log" - tmrand "github.com/tendermint/tendermint/libs/rand" - protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" - "github.com/tendermint/tendermint/types" -) - -type reactorTestSuite struct { - network *p2ptest.Network - logger log.Logger - - reactors map[types.NodeID]*Reactor - mempoolChnnels map[types.NodeID]*p2p.Channel - mempools map[types.NodeID]*CListMempool - kvstores map[types.NodeID]*kvstore.Application - - peerChans map[types.NodeID]chan p2p.PeerUpdate - peerUpdates map[types.NodeID]*p2p.PeerUpdates - - nodes []types.NodeID -} - -func setup(t *testing.T, config *config.MempoolConfig, numNodes int, chBuf uint) *reactorTestSuite { - t.Helper() - - rts := &reactorTestSuite{ - logger: log.TestingLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), - reactors: make(map[types.NodeID]*Reactor, numNodes), - mempoolChnnels: make(map[types.NodeID]*p2p.Channel, numNodes), - mempools: make(map[types.NodeID]*CListMempool, numNodes), - kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), - peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), - peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), - } - - chDesc := p2p.ChannelDescriptor{ID: byte(mempool.MempoolChannel)} - rts.mempoolChnnels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(protomem.Message), int(chBuf)) - - for nodeID := range rts.network.Nodes { - rts.kvstores[nodeID] = kvstore.NewApplication() - cc := abciclient.NewLocalCreator(rts.kvstores[nodeID]) - - mempool, memCleanup, err := newMempoolWithApp(cc) - require.NoError(t, err) - t.Cleanup(memCleanup) - mempool.SetLogger(rts.logger) - rts.mempools[nodeID] = mempool - - rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) - rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) - rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID]) - - rts.reactors[nodeID] = NewReactor( - rts.logger.With("nodeID", nodeID), - config, - rts.network.Nodes[nodeID].PeerManager, - mempool, - rts.mempoolChnnels[nodeID], - rts.peerUpdates[nodeID], - ) - - rts.nodes = append(rts.nodes, nodeID) - - require.NoError(t, rts.reactors[nodeID].Start()) - require.True(t, rts.reactors[nodeID].IsRunning()) - } - - require.Len(t, rts.reactors, numNodes) - - t.Cleanup(func() { - for nodeID := range rts.reactors { - if rts.reactors[nodeID].IsRunning() { - require.NoError(t, rts.reactors[nodeID].Stop()) - require.False(t, rts.reactors[nodeID].IsRunning()) - } - } - }) - - return rts -} - -func (rts *reactorTestSuite) start(t *testing.T) { - t.Helper() - rts.network.Start(t) - require.Len(t, - rts.network.RandomNode().PeerManager.Peers(), - len(rts.nodes)-1, - "network does not have expected number of nodes") -} - -func (rts *reactorTestSuite) assertMempoolChannelsDrained(t *testing.T) { - t.Helper() - - for id, r := range rts.reactors { - require.NoError(t, r.Stop(), "stopping reactor %s", id) - r.Wait() - require.False(t, r.IsRunning(), "reactor %s did not stop", id) - } - - for _, mch := range rts.mempoolChnnels { - require.Empty(t, mch.Out, "checking channel %q (len=%d)", mch.ID, len(mch.Out)) - } -} - -func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...types.NodeID) { - t.Helper() - - fn := func(pool *CListMempool) { - for pool.Size() < len(txs) { - time.Sleep(50 * time.Millisecond) - } - - reapedTxs := pool.ReapMaxTxs(len(txs)) - require.Equal(t, len(txs), len(reapedTxs)) - for i, tx := range txs { - require.Equalf(t, - tx, - reapedTxs[i], - "txs at index %d in reactor mempool mismatch; got: %v, expected: %v", i, tx, reapedTxs[i], - ) - } - } - - if len(ids) == 1 { - fn(rts.reactors[ids[0]].mempool) - return - } - - wg := &sync.WaitGroup{} - for id := range rts.mempools { - if len(ids) > 0 && !p2ptest.NodeInSlice(id, ids) { - continue - } - - wg.Add(1) - func(nid types.NodeID) { defer wg.Done(); fn(rts.reactors[nid].mempool) }(id) - } - - wg.Wait() -} - -func TestReactorBroadcastTxs(t *testing.T) { - numTxs := 1000 - numNodes := 10 - cfg := config.TestConfig() - - rts := setup(t, cfg.Mempool, numNodes, 0) - - primary := rts.nodes[0] - secondaries := rts.nodes[1:] - - txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, mempool.UnknownPeerID) - - // run the router - rts.start(t) - - // Wait till all secondary suites (reactor) received all mempool txs from the - // primary suite (node). - rts.waitForTxns(t, txs, secondaries...) - - for _, pool := range rts.mempools { - require.Equal(t, len(txs), pool.Size()) - } - - rts.assertMempoolChannelsDrained(t) -} - -// regression test for https://github.com/tendermint/tendermint/issues/5408 -func TestReactorConcurrency(t *testing.T) { - numTxs := 5 - numNodes := 2 - cfg := config.TestConfig() - - rts := setup(t, cfg.Mempool, numNodes, 0) - - primary := rts.nodes[0] - secondary := rts.nodes[1] - - rts.start(t) - - var wg sync.WaitGroup - - for i := 0; i < 1000; i++ { - wg.Add(2) - - // 1. submit a bunch of txs - // 2. update the whole mempool - - txs := checkTxs(t, rts.reactors[primary].mempool, numTxs, mempool.UnknownPeerID) - go func() { - defer wg.Done() - - mempool := rts.mempools[primary] - - mempool.Lock() - defer mempool.Unlock() - - deliverTxResponses := make([]*abci.ResponseDeliverTx, len(txs)) - for i := range txs { - deliverTxResponses[i] = &abci.ResponseDeliverTx{Code: 0} - } - - require.NoError(t, mempool.Update(1, txs, deliverTxResponses, nil, nil)) - }() - - // 1. submit a bunch of txs - // 2. update none - _ = checkTxs(t, rts.reactors[secondary].mempool, numTxs, mempool.UnknownPeerID) - go func() { - defer wg.Done() - - mempool := rts.mempools[secondary] - - mempool.Lock() - defer mempool.Unlock() - - err := mempool.Update(1, []types.Tx{}, make([]*abci.ResponseDeliverTx, 0), nil, nil) - require.NoError(t, err) - }() - - // flush the mempool - rts.mempools[secondary].Flush() - } - - wg.Wait() -} - -func TestReactorNoBroadcastToSender(t *testing.T) { - numTxs := 1000 - numNodes := 2 - cfg := config.TestConfig() - - rts := setup(t, cfg.Mempool, numNodes, uint(numTxs)) - - primary := rts.nodes[0] - secondary := rts.nodes[1] - - peerID := uint16(1) - _ = checkTxs(t, rts.mempools[primary], numTxs, peerID) - - rts.start(t) - - time.Sleep(100 * time.Millisecond) - - require.Eventually(t, func() bool { - return rts.mempools[secondary].Size() == 0 - }, time.Minute, 100*time.Millisecond) - - rts.assertMempoolChannelsDrained(t) -} - -func TestReactor_MaxTxBytes(t *testing.T) { - numNodes := 2 - cfg := config.TestConfig() - - rts := setup(t, cfg.Mempool, numNodes, 0) - - primary := rts.nodes[0] - secondary := rts.nodes[1] - - // Broadcast a tx, which has the max size and ensure it's received by the - // second reactor. - tx1 := tmrand.Bytes(cfg.Mempool.MaxTxBytes) - err := rts.reactors[primary].mempool.CheckTx( - context.Background(), - tx1, - nil, - mempool.TxInfo{ - SenderID: mempool.UnknownPeerID, - }, - ) - require.NoError(t, err) - - rts.start(t) - - // Wait till all secondary suites (reactor) received all mempool txs from the - // primary suite (node). - rts.waitForTxns(t, []types.Tx{tx1}, secondary) - - rts.reactors[primary].mempool.Flush() - rts.reactors[secondary].mempool.Flush() - - // broadcast a tx, which is beyond the max size and ensure it's not sent - tx2 := tmrand.Bytes(cfg.Mempool.MaxTxBytes + 1) - err = rts.mempools[primary].CheckTx(context.Background(), tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID}) - require.Error(t, err) - - rts.assertMempoolChannelsDrained(t) -} - -func TestDontExhaustMaxActiveIDs(t *testing.T) { - cfg := config.TestConfig() - - // we're creating a single node network, but not starting the - // network. - rts := setup(t, cfg.Mempool, 1, mempool.MaxActiveIDs+1) - - nodeID := rts.nodes[0] - - peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") - require.NoError(t, err) - - // ensure the reactor does not panic (i.e. exhaust active IDs) - for i := 0; i < mempool.MaxActiveIDs+1; i++ { - rts.peerChans[nodeID] <- p2p.PeerUpdate{ - Status: p2p.PeerStatusUp, - NodeID: peerID, - } - - rts.mempoolChnnels[nodeID].Out <- p2p.Envelope{ - To: peerID, - Message: &protomem.Txs{ - Txs: [][]byte{}, - }, - } - } - - require.Eventually( - t, - func() bool { - for _, mch := range rts.mempoolChnnels { - if len(mch.Out) > 0 { - return false - } - } - - return true - }, - time.Minute, - 10*time.Millisecond, - ) - - rts.assertMempoolChannelsDrained(t) -} - -func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode") - } - - // 0 is already reserved for UnknownPeerID - ids := mempool.NewMempoolIDs() - - peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") - require.NoError(t, err) - - for i := 0; i < mempool.MaxActiveIDs-1; i++ { - ids.ReserveForPeer(peerID) - } - - require.Panics(t, func() { - ids.ReserveForPeer(peerID) - }) -} - -func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode") - } - - cfg := config.TestConfig() - - rts := setup(t, cfg.Mempool, 2, 0) - - primary := rts.nodes[0] - secondary := rts.nodes[1] - - rts.start(t) - - // disconnect peer - rts.peerChans[primary] <- p2p.PeerUpdate{ - Status: p2p.PeerStatusDown, - NodeID: secondary, - } -} diff --git a/internal/mempool/v1/mempool.go b/internal/mempool/v1/mempool.go deleted file mode 100644 index f0fed29af1..0000000000 --- a/internal/mempool/v1/mempool.go +++ /dev/null @@ -1,887 +0,0 @@ -package v1 - -import ( - "bytes" - "context" - "errors" - "fmt" - "reflect" - "sync/atomic" - "time" - - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/proxy" - "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" - "github.com/tendermint/tendermint/types" -) - -var _ mempool.Mempool = (*TxMempool)(nil) - -// TxMempoolOption sets an optional parameter on the TxMempool. -type TxMempoolOption func(*TxMempool) - -// TxMempool defines a prioritized mempool data structure used by the v1 mempool -// reactor. It keeps a thread-safe priority queue of transactions that is used -// when a block proposer constructs a block and a thread-safe linked-list that -// is used to gossip transactions to peers in a FIFO manner. -type TxMempool struct { - logger log.Logger - metrics *mempool.Metrics - config *config.MempoolConfig - proxyAppConn proxy.AppConnMempool - - // txsAvailable fires once for each height when the mempool is not empty - txsAvailable chan struct{} - notifiedTxsAvailable bool - - // height defines the last block height process during Update() - height int64 - - // sizeBytes defines the total size of the mempool (sum of all tx bytes) - sizeBytes int64 - - // cache defines a fixed-size cache of already seen transactions as this - // reduces pressure on the proxyApp. - cache mempool.TxCache - - // txStore defines the main storage of valid transactions. Indexes are built - // on top of this store. - txStore *TxStore - - // gossipIndex defines the gossiping index of valid transactions via a - // thread-safe linked-list. We also use the gossip index as a cursor for - // rechecking transactions already in the mempool. - gossipIndex *clist.CList - - // recheckCursor and recheckEnd are used as cursors based on the gossip index - // to recheck transactions that are already in the mempool. Iteration is not - // thread-safe and transaction may be mutated in serial order. - // - // XXX/TODO: It might be somewhat of a codesmell to use the gossip index for - // iterator and cursor management when rechecking transactions. If the gossip - // index changes or is removed in a future refactor, this will have to be - // refactored. Instead, we should consider just keeping a slice of a snapshot - // of the mempool's current transactions during Update and an integer cursor - // into that slice. This, however, requires additional O(n) space complexity. - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here - - // priorityIndex defines the priority index of valid transactions via a - // thread-safe priority queue. - priorityIndex *TxPriorityQueue - - // heightIndex defines a height-based, in ascending order, transaction index. - // i.e. older transactions are first. - heightIndex *WrappedTxList - - // timestampIndex defines a timestamp-based, in ascending order, transaction - // index. i.e. older transactions are first. - timestampIndex *WrappedTxList - - // A read/write lock is used to safe guard updates, insertions and deletions - // from the mempool. A read-lock is implicitly acquired when executing CheckTx, - // however, a caller must explicitly grab a write-lock via Lock when updating - // the mempool via Update(). - mtx tmsync.RWMutex - preCheck mempool.PreCheckFunc - postCheck mempool.PostCheckFunc -} - -func NewTxMempool( - logger log.Logger, - cfg *config.MempoolConfig, - proxyAppConn proxy.AppConnMempool, - height int64, - options ...TxMempoolOption, -) *TxMempool { - - txmp := &TxMempool{ - logger: logger, - config: cfg, - proxyAppConn: proxyAppConn, - height: height, - cache: mempool.NopTxCache{}, - metrics: mempool.NopMetrics(), - txStore: NewTxStore(), - gossipIndex: clist.New(), - priorityIndex: NewTxPriorityQueue(), - heightIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.height >= wtx2.height - }), - timestampIndex: NewWrappedTxList(func(wtx1, wtx2 *WrappedTx) bool { - return wtx1.timestamp.After(wtx2.timestamp) || wtx1.timestamp.Equal(wtx2.timestamp) - }), - } - - if cfg.CacheSize > 0 { - txmp.cache = mempool.NewLRUTxCache(cfg.CacheSize) - } - - proxyAppConn.SetResponseCallback(txmp.defaultTxCallback) - - for _, opt := range options { - opt(txmp) - } - - return txmp -} - -// WithPreCheck sets a filter for the mempool to reject a transaction if f(tx) -// returns an error. This is executed before CheckTx. It only applies to the -// first created block. After that, Update() overwrites the existing value. -func WithPreCheck(f mempool.PreCheckFunc) TxMempoolOption { - return func(txmp *TxMempool) { txmp.preCheck = f } -} - -// WithPostCheck sets a filter for the mempool to reject a transaction if -// f(tx, resp) returns an error. This is executed after CheckTx. It only applies -// to the first created block. After that, Update overwrites the existing value. -func WithPostCheck(f mempool.PostCheckFunc) TxMempoolOption { - return func(txmp *TxMempool) { txmp.postCheck = f } -} - -// WithMetrics sets the mempool's metrics collector. -func WithMetrics(metrics *mempool.Metrics) TxMempoolOption { - return func(txmp *TxMempool) { txmp.metrics = metrics } -} - -// Lock obtains a write-lock on the mempool. A caller must be sure to explicitly -// release the lock when finished. -func (txmp *TxMempool) Lock() { - txmp.mtx.Lock() -} - -// Unlock releases a write-lock on the mempool. -func (txmp *TxMempool) Unlock() { - txmp.mtx.Unlock() -} - -// Size returns the number of valid transactions in the mempool. It is -// thread-safe. -func (txmp *TxMempool) Size() int { - return txmp.txStore.Size() -} - -// SizeBytes return the total sum in bytes of all the valid transactions in the -// mempool. It is thread-safe. -func (txmp *TxMempool) SizeBytes() int64 { - return atomic.LoadInt64(&txmp.sizeBytes) -} - -// FlushAppConn executes FlushSync on the mempool's proxyAppConn. -// -// NOTE: The caller must obtain a write-lock via Lock() prior to execution. -func (txmp *TxMempool) FlushAppConn() error { - return txmp.proxyAppConn.FlushSync(context.Background()) -} - -// WaitForNextTx returns a blocking channel that will be closed when the next -// valid transaction is available to gossip. It is thread-safe. -func (txmp *TxMempool) WaitForNextTx() <-chan struct{} { - return txmp.gossipIndex.WaitChan() -} - -// NextGossipTx returns the next valid transaction to gossip. A caller must wait -// for WaitForNextTx to signal a transaction is available to gossip first. It is -// thread-safe. -func (txmp *TxMempool) NextGossipTx() *clist.CElement { - return txmp.gossipIndex.Front() -} - -// EnableTxsAvailable enables the mempool to trigger events when transactions -// are available on a block by block basis. -func (txmp *TxMempool) EnableTxsAvailable() { - txmp.mtx.Lock() - defer txmp.mtx.Unlock() - - txmp.txsAvailable = make(chan struct{}, 1) -} - -// TxsAvailable returns a channel which fires once for every height, and only -// when transactions are available in the mempool. It is thread-safe. -func (txmp *TxMempool) TxsAvailable() <-chan struct{} { - return txmp.txsAvailable -} - -// CheckTx executes the ABCI CheckTx method for a given transaction. It acquires -// a read-lock attempts to execute the application's CheckTx ABCI method via -// CheckTxAsync. We return an error if any of the following happen: -// -// - The CheckTxAsync execution fails. -// - The transaction already exists in the cache and we've already received the -// transaction from the peer. Otherwise, if it solely exists in the cache, we -// return nil. -// - The transaction size exceeds the maximum transaction size as defined by the -// configuration provided to the mempool. -// - The transaction fails Pre-Check (if it is defined). -// - The proxyAppConn fails, e.g. the buffer is full. -// -// If the mempool is full, we still execute CheckTx and attempt to find a lower -// priority transaction to evict. If such a transaction exists, we remove the -// lower priority transaction and add the new one with higher priority. -// -// NOTE: -// - The applications' CheckTx implementation may panic. -// - The caller is not to explicitly require any locks for executing CheckTx. -func (txmp *TxMempool) CheckTx( - ctx context.Context, - tx types.Tx, - cb func(*abci.Response), - txInfo mempool.TxInfo, -) error { - - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - txSize := len(tx) - if txSize > txmp.config.MaxTxBytes { - return types.ErrTxTooLarge{ - Max: txmp.config.MaxTxBytes, - Actual: txSize, - } - } - - if txmp.preCheck != nil { - if err := txmp.preCheck(tx); err != nil { - return types.ErrPreCheck{ - Reason: err, - } - } - } - - if err := txmp.proxyAppConn.Error(); err != nil { - return err - } - - txHash := tx.Key() - - // We add the transaction to the mempool's cache and if the - // transaction is already present in the cache, i.e. false is returned, then we - // check if we've seen this transaction and error if we have. - if !txmp.cache.Push(tx) { - wtx, ok := txmp.txStore.GetOrSetPeerByTxHash(txHash, txInfo.SenderID) - if wtx != nil && ok { - // We already have the transaction stored and the we've already seen this - // transaction from txInfo.SenderID. - return types.ErrTxInCache - } - - txmp.logger.Debug("tx exists already in cache", "tx_hash", tx.Hash()) - return nil - } - - if ctx == nil { - ctx = context.Background() - } - - reqRes, err := txmp.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{Tx: tx}) - if err != nil { - txmp.cache.Remove(tx) - return err - } - - reqRes.SetCallback(func(res *abci.Response) { - if txmp.recheckCursor != nil { - panic("recheck cursor is non-nil in CheckTx callback") - } - - wtx := &WrappedTx{ - tx: tx, - hash: txHash, - timestamp: time.Now().UTC(), - height: txmp.height, - } - txmp.initTxCallback(wtx, res, txInfo) - - if cb != nil { - cb(res) - } - }) - - return nil -} - -func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { - txmp.Lock() - defer txmp.Unlock() - - // remove the committed transaction from the transaction store and indexes - if wtx := txmp.txStore.GetTxByHash(txKey); wtx != nil { - txmp.removeTx(wtx, false) - return nil - } - - return errors.New("transaction not found") -} - -// Flush flushes out the mempool. It acquires a read-lock, fetches all the -// transactions currently in the transaction store and removes each transaction -// from the store and all indexes and finally resets the cache. -// -// NOTE: -// - Flushing the mempool may leave the mempool in an inconsistent state. -func (txmp *TxMempool) Flush() { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - txmp.heightIndex.Reset() - txmp.timestampIndex.Reset() - - for _, wtx := range txmp.txStore.GetAllTxs() { - txmp.removeTx(wtx, false) - } - - atomic.SwapInt64(&txmp.sizeBytes, 0) - txmp.cache.Reset() -} - -// ReapMaxBytesMaxGas returns a list of transactions within the provided size -// and gas constraints. Transaction are retrieved in priority order. -// -// NOTE: -// - A read-lock is acquired. -// - Transactions returned are not actually removed from the mempool transaction -// store or indexes. -func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - var ( - totalGas int64 - totalSize int64 - ) - - // wTxs contains a list of *WrappedTx retrieved from the priority queue that - // need to be re-enqueued prior to returning. - wTxs := make([]*WrappedTx, 0, txmp.priorityIndex.NumTxs()) - defer func() { - for _, wtx := range wTxs { - txmp.priorityIndex.PushTx(wtx) - } - }() - - txs := make([]types.Tx, 0, txmp.priorityIndex.NumTxs()) - for txmp.priorityIndex.NumTxs() > 0 { - wtx := txmp.priorityIndex.PopTx() - txs = append(txs, wtx.tx) - wTxs = append(wTxs, wtx) - size := types.ComputeProtoSizeForTxs([]types.Tx{wtx.tx}) - - // Ensure we have capacity for the transaction with respect to the - // transaction size. - if maxBytes > -1 && totalSize+size > maxBytes { - return txs[:len(txs)-1] - } - - totalSize += size - - // ensure we have capacity for the transaction with respect to total gas - gas := totalGas + wtx.gasWanted - if maxGas > -1 && gas > maxGas { - return txs[:len(txs)-1] - } - - totalGas = gas - } - - return txs -} - -// ReapMaxTxs returns a list of transactions within the provided number of -// transactions bound. Transaction are retrieved in priority order. -// -// NOTE: -// - A read-lock is acquired. -// - Transactions returned are not actually removed from the mempool transaction -// store or indexes. -func (txmp *TxMempool) ReapMaxTxs(max int) types.Txs { - txmp.mtx.RLock() - defer txmp.mtx.RUnlock() - - numTxs := txmp.priorityIndex.NumTxs() - if max < 0 { - max = numTxs - } - - cap := tmmath.MinInt(numTxs, max) - - // wTxs contains a list of *WrappedTx retrieved from the priority queue that - // need to be re-enqueued prior to returning. - wTxs := make([]*WrappedTx, 0, cap) - defer func() { - for _, wtx := range wTxs { - txmp.priorityIndex.PushTx(wtx) - } - }() - - txs := make([]types.Tx, 0, cap) - for txmp.priorityIndex.NumTxs() > 0 && len(txs) < max { - wtx := txmp.priorityIndex.PopTx() - txs = append(txs, wtx.tx) - wTxs = append(wTxs, wtx) - } - - return txs -} - -// Update iterates over all the transactions provided by the caller, i.e. the -// block producer, and removes them from the cache (if applicable) and removes -// the transactions from the main transaction store and associated indexes. -// Finally, if there are trainsactions remaining in the mempool, we initiate a -// re-CheckTx for them (if applicable), otherwise, we notify the caller more -// transactions are available. -// -// NOTE: -// - The caller must explicitly acquire a write-lock via Lock(). -func (txmp *TxMempool) Update( - blockHeight int64, - blockTxs types.Txs, - deliverTxResponses []*abci.ResponseDeliverTx, - newPreFn mempool.PreCheckFunc, - newPostFn mempool.PostCheckFunc, -) error { - - txmp.height = blockHeight - txmp.notifiedTxsAvailable = false - - if newPreFn != nil { - txmp.preCheck = newPreFn - } - if newPostFn != nil { - txmp.postCheck = newPostFn - } - - for i, tx := range blockTxs { - if deliverTxResponses[i].Code == abci.CodeTypeOK { - // add the valid committed transaction to the cache (if missing) - _ = txmp.cache.Push(tx) - } else if !txmp.config.KeepInvalidTxsInCache { - // allow invalid transactions to be re-submitted - txmp.cache.Remove(tx) - } - - // remove the committed transaction from the transaction store and indexes - if wtx := txmp.txStore.GetTxByHash(tx.Key()); wtx != nil { - txmp.removeTx(wtx, false) - } - } - - txmp.purgeExpiredTxs(blockHeight) - - // If there any uncommitted transactions left in the mempool, we either - // initiate re-CheckTx per remaining transaction or notify that remaining - // transactions are left. - if txmp.Size() > 0 { - if txmp.config.Recheck { - txmp.logger.Debug( - "executing re-CheckTx for all remaining transactions", - "num_txs", txmp.Size(), - "height", blockHeight, - ) - txmp.updateReCheckTxs() - } else { - txmp.notifyTxsAvailable() - } - } - - txmp.metrics.Size.Set(float64(txmp.Size())) - return nil -} - -// initTxCallback performs the initial, i.e. the first, callback after CheckTx -// has been executed by the ABCI application. In other words, initTxCallback is -// called after executing CheckTx when we see a unique transaction for the first -// time. CheckTx can be called again for the same transaction at a later point -// in time when re-checking, however, this callback will not be called. -// -// After the ABCI application executes CheckTx, initTxCallback is called with -// the ABCI *Response object and TxInfo. If postCheck is defined on the mempool, -// we execute that first. If there is no error from postCheck (if defined) and -// the ABCI CheckTx response code is OK, we attempt to insert the transaction. -// -// When attempting to insert the transaction, we first check if there is -// sufficient capacity. If there is sufficient capacity, the transaction is -// inserted into the txStore and indexed across all indexes. Otherwise, if the -// mempool is full, we attempt to find a lower priority transaction to evict in -// place of the new incoming transaction. If no such transaction exists, the -// new incoming transaction is rejected. -// -// If the new incoming transaction fails CheckTx or postCheck fails, we reject -// the new incoming transaction. -// -// NOTE: -// - An explicit lock is NOT required. -func (txmp *TxMempool) initTxCallback(wtx *WrappedTx, res *abci.Response, txInfo mempool.TxInfo) { - checkTxRes, ok := res.Value.(*abci.Response_CheckTx) - if !ok { - return - } - - var err error - if txmp.postCheck != nil { - err = txmp.postCheck(wtx.tx, checkTxRes.CheckTx) - } - - if err != nil || checkTxRes.CheckTx.Code != abci.CodeTypeOK { - // ignore bad transactions - txmp.logger.Info( - "rejected bad transaction", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "peer", txInfo.SenderNodeID, - "code", checkTxRes.CheckTx.Code, - "post_check_err", err, - ) - - txmp.metrics.FailedTxs.Add(1) - - if !txmp.config.KeepInvalidTxsInCache { - txmp.cache.Remove(wtx.tx) - } - if err != nil { - checkTxRes.CheckTx.MempoolError = err.Error() - } - return - } - - sender := checkTxRes.CheckTx.Sender - priority := checkTxRes.CheckTx.Priority - - if len(sender) > 0 { - if wtx := txmp.txStore.GetTxBySender(sender); wtx != nil { - txmp.logger.Error( - "rejected incoming good transaction; tx already exists for sender", - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "sender", sender, - ) - txmp.metrics.RejectedTxs.Add(1) - return - } - } - - if err := txmp.canAddTx(wtx); err != nil { - evictTxs := txmp.priorityIndex.GetEvictableTxs( - priority, - int64(wtx.Size()), - txmp.SizeBytes(), - txmp.config.MaxTxsBytes, - ) - if len(evictTxs) == 0 { - // No room for the new incoming transaction so we just remove it from - // the cache. - txmp.cache.Remove(wtx.tx) - txmp.logger.Error( - "rejected incoming good transaction; mempool full", - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "err", err.Error(), - ) - txmp.metrics.RejectedTxs.Add(1) - return - } - - // evict an existing transaction(s) - // - // NOTE: - // - The transaction, toEvict, can be removed while a concurrent - // reCheckTx callback is being executed for the same transaction. - for _, toEvict := range evictTxs { - txmp.removeTx(toEvict, true) - txmp.logger.Debug( - "evicted existing good transaction; mempool full", - "old_tx", fmt.Sprintf("%X", toEvict.tx.Hash()), - "old_priority", toEvict.priority, - "new_tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "new_priority", wtx.priority, - ) - txmp.metrics.EvictedTxs.Add(1) - } - } - - wtx.gasWanted = checkTxRes.CheckTx.GasWanted - wtx.priority = priority - wtx.sender = sender - wtx.peers = map[uint16]struct{}{ - txInfo.SenderID: {}, - } - - txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size())) - txmp.metrics.Size.Set(float64(txmp.Size())) - - txmp.insertTx(wtx) - txmp.logger.Debug( - "inserted good transaction", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "height", txmp.height, - "num_txs", txmp.Size(), - ) - txmp.notifyTxsAvailable() - -} - -// defaultTxCallback performs the default CheckTx application callback. This is -// NOT executed when a transaction is first seen/received. Instead, this callback -// is executed during re-checking transactions (if enabled). A caller, i.e a -// block proposer, acquires a mempool write-lock via Lock() and when executing -// Update(), if the mempool is non-empty and Recheck is enabled, then all -// remaining transactions will be rechecked via CheckTxAsync. The order in which -// they are rechecked must be the same order in which this callback is called -// per transaction. -func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response) { - if txmp.recheckCursor == nil { - return - } - - txmp.metrics.RecheckTimes.Add(1) - - checkTxRes, ok := res.Value.(*abci.Response_CheckTx) - if !ok { - txmp.logger.Error("received incorrect type in mempool callback", - "expected", reflect.TypeOf(&abci.Response_CheckTx{}).Name(), - "got", reflect.TypeOf(res.Value).Name(), - ) - return - } - tx := req.GetCheckTx().Tx - wtx := txmp.recheckCursor.Value.(*WrappedTx) - - // Search through the remaining list of tx to recheck for a transaction that matches - // the one we received from the ABCI application. - for { - if bytes.Equal(tx, wtx.tx) { - // We've found a tx in the recheck list that matches the tx that we - // received from the ABCI application. - // Break, and use this transaction for further checks. - break - } - - txmp.logger.Error( - "re-CheckTx transaction mismatch", - "got", wtx.tx.Hash(), - "expected", types.Tx(tx).Key(), - ) - - if txmp.recheckCursor == txmp.recheckEnd { - // we reached the end of the recheckTx list without finding a tx - // matching the one we received from the ABCI application. - // Return without processing any tx. - txmp.recheckCursor = nil - return - } - - txmp.recheckCursor = txmp.recheckCursor.Next() - wtx = txmp.recheckCursor.Value.(*WrappedTx) - } - - // Only evaluate transactions that have not been removed. This can happen - // if an existing transaction is evicted during CheckTx and while this - // callback is being executed for the same evicted transaction. - if !txmp.txStore.IsTxRemoved(wtx.hash) { - var err error - if txmp.postCheck != nil { - err = txmp.postCheck(tx, checkTxRes.CheckTx) - } - - if checkTxRes.CheckTx.Code == abci.CodeTypeOK && err == nil { - wtx.priority = checkTxRes.CheckTx.Priority - } else { - txmp.logger.Debug( - "existing transaction no longer valid; failed re-CheckTx callback", - "priority", wtx.priority, - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), - "err", err, - "code", checkTxRes.CheckTx.Code, - ) - - if wtx.gossipEl != txmp.recheckCursor { - panic("corrupted reCheckTx cursor") - } - - txmp.removeTx(wtx, !txmp.config.KeepInvalidTxsInCache) - } - } - - // move reCheckTx cursor to next element - if txmp.recheckCursor == txmp.recheckEnd { - txmp.recheckCursor = nil - } else { - txmp.recheckCursor = txmp.recheckCursor.Next() - } - - if txmp.recheckCursor == nil { - txmp.logger.Debug("finished rechecking transactions") - - if txmp.Size() > 0 { - txmp.notifyTxsAvailable() - } - } - - txmp.metrics.Size.Set(float64(txmp.Size())) -} - -// updateReCheckTxs updates the recheck cursors by using the gossipIndex. For -// each transaction, it executes CheckTxAsync. The global callback defined on -// the proxyAppConn will be executed for each transaction after CheckTx is -// executed. -// -// NOTE: -// - The caller must have a write-lock when executing updateReCheckTxs. -func (txmp *TxMempool) updateReCheckTxs() { - if txmp.Size() == 0 { - panic("attempted to update re-CheckTx txs when mempool is empty") - } - - txmp.recheckCursor = txmp.gossipIndex.Front() - txmp.recheckEnd = txmp.gossipIndex.Back() - ctx := context.Background() - - for e := txmp.gossipIndex.Front(); e != nil; e = e.Next() { - wtx := e.Value.(*WrappedTx) - - // Only execute CheckTx if the transaction is not marked as removed which - // could happen if the transaction was evicted. - if !txmp.txStore.IsTxRemoved(wtx.hash) { - _, err := txmp.proxyAppConn.CheckTxAsync(ctx, abci.RequestCheckTx{ - Tx: wtx.tx, - Type: abci.CheckTxType_Recheck, - }) - if err != nil { - // no need in retrying since the tx will be rechecked after the next block - txmp.logger.Error("failed to execute CheckTx during rechecking", "err", err) - } - } - } - - if _, err := txmp.proxyAppConn.FlushAsync(ctx); err != nil { - txmp.logger.Error("failed to flush transactions during rechecking", "err", err) - } -} - -// canAddTx returns an error if we cannot insert the provided *WrappedTx into -// the mempool due to mempool configured constraints. Otherwise, nil is returned -// and the transaction can be inserted into the mempool. -func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { - var ( - numTxs = txmp.Size() - sizeBytes = txmp.SizeBytes() - ) - - if numTxs >= txmp.config.Size || int64(wtx.Size())+sizeBytes > txmp.config.MaxTxsBytes { - return types.ErrMempoolIsFull{ - NumTxs: numTxs, - MaxTxs: txmp.config.Size, - TxsBytes: sizeBytes, - MaxTxsBytes: txmp.config.MaxTxsBytes, - } - } - - return nil -} - -func (txmp *TxMempool) insertTx(wtx *WrappedTx) { - txmp.txStore.SetTx(wtx) - txmp.priorityIndex.PushTx(wtx) - txmp.heightIndex.Insert(wtx) - txmp.timestampIndex.Insert(wtx) - - // Insert the transaction into the gossip index and mark the reference to the - // linked-list element, which will be needed at a later point when the - // transaction is removed. - gossipEl := txmp.gossipIndex.PushBack(wtx) - wtx.gossipEl = gossipEl - - atomic.AddInt64(&txmp.sizeBytes, int64(wtx.Size())) -} - -func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) { - if txmp.txStore.IsTxRemoved(wtx.hash) { - return - } - - txmp.txStore.RemoveTx(wtx) - txmp.priorityIndex.RemoveTx(wtx) - txmp.heightIndex.Remove(wtx) - txmp.timestampIndex.Remove(wtx) - - // Remove the transaction from the gossip index and cleanup the linked-list - // element so it can be garbage collected. - txmp.gossipIndex.Remove(wtx.gossipEl) - wtx.gossipEl.DetachPrev() - - atomic.AddInt64(&txmp.sizeBytes, int64(-wtx.Size())) - - if removeFromCache { - txmp.cache.Remove(wtx.tx) - } -} - -// purgeExpiredTxs removes all transactions that have exceeded their respective -// height and/or time based TTLs from their respective indexes. Every expired -// transaction will be removed from the mempool entirely, except for the cache. -// -// NOTE: purgeExpiredTxs must only be called during TxMempool#Update in which -// the caller has a write-lock on the mempool and so we can safely iterate over -// the height and time based indexes. -func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { - now := time.Now() - expiredTxs := make(map[types.TxKey]*WrappedTx) - - if txmp.config.TTLNumBlocks > 0 { - purgeIdx := -1 - for i, wtx := range txmp.heightIndex.txs { - if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks { - expiredTxs[wtx.tx.Key()] = wtx - purgeIdx = i - } else { - // since the index is sorted, we know no other txs can be be purged - break - } - } - - if purgeIdx >= 0 { - txmp.heightIndex.txs = txmp.heightIndex.txs[purgeIdx+1:] - } - } - - if txmp.config.TTLDuration > 0 { - purgeIdx := -1 - for i, wtx := range txmp.timestampIndex.txs { - if now.Sub(wtx.timestamp) > txmp.config.TTLDuration { - expiredTxs[wtx.tx.Key()] = wtx - purgeIdx = i - } else { - // since the index is sorted, we know no other txs can be be purged - break - } - } - - if purgeIdx >= 0 { - txmp.timestampIndex.txs = txmp.timestampIndex.txs[purgeIdx+1:] - } - } - - for _, wtx := range expiredTxs { - txmp.removeTx(wtx, false) - } -} - -func (txmp *TxMempool) notifyTxsAvailable() { - if txmp.Size() == 0 { - panic("attempt to notify txs available but mempool is empty!") - } - - if txmp.txsAvailable != nil && !txmp.notifiedTxsAvailable { - // channel cap is 1, so this will send once - txmp.notifiedTxsAvailable = true - - select { - case txmp.txsAvailable <- struct{}{}: - default: - } - } -} diff --git a/internal/mempool/v1/mempool_bench_test.go b/internal/mempool/v1/mempool_bench_test.go deleted file mode 100644 index ca23f1479d..0000000000 --- a/internal/mempool/v1/mempool_bench_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package v1 - -import ( - "context" - "fmt" - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/internal/mempool" -) - -func BenchmarkTxMempool_CheckTx(b *testing.B) { - txmp := setup(b, 10000) - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - - b.ResetTimer() - - for n := 0; n < b.N; n++ { - b.StopTimer() - prefix := make([]byte, 20) - _, err := rng.Read(prefix) - require.NoError(b, err) - - priority := int64(rng.Intn(9999-1000) + 1000) - tx := []byte(fmt.Sprintf("%X=%d", prefix, priority)) - b.StartTimer() - - require.NoError(b, txmp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{})) - } -} diff --git a/internal/mempool/v1/reactor_test.go b/internal/mempool/v1/reactor_test.go deleted file mode 100644 index ce187b578b..0000000000 --- a/internal/mempool/v1/reactor_test.go +++ /dev/null @@ -1,146 +0,0 @@ -package v1 - -import ( - "os" - "strings" - "sync" - "testing" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/config" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/p2ptest" - "github.com/tendermint/tendermint/libs/log" - protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" - "github.com/tendermint/tendermint/types" -) - -type reactorTestSuite struct { - network *p2ptest.Network - logger log.Logger - - reactors map[types.NodeID]*Reactor - mempoolChannels map[types.NodeID]*p2p.Channel - mempools map[types.NodeID]*TxMempool - kvstores map[types.NodeID]*kvstore.Application - - peerChans map[types.NodeID]chan p2p.PeerUpdate - peerUpdates map[types.NodeID]*p2p.PeerUpdates - - nodes []types.NodeID -} - -func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { - t.Helper() - - cfg, err := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) - require.NoError(t, err) - t.Cleanup(func() { os.RemoveAll(cfg.RootDir) }) - - rts := &reactorTestSuite{ - logger: log.TestingLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), - reactors: make(map[types.NodeID]*Reactor, numNodes), - mempoolChannels: make(map[types.NodeID]*p2p.Channel, numNodes), - mempools: make(map[types.NodeID]*TxMempool, numNodes), - kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), - peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), - peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), - } - - chDesc := p2p.ChannelDescriptor{ID: byte(mempool.MempoolChannel)} - rts.mempoolChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(protomem.Message), int(chBuf)) - - for nodeID := range rts.network.Nodes { - rts.kvstores[nodeID] = kvstore.NewApplication() - - mempool := setup(t, 0) - rts.mempools[nodeID] = mempool - - rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) - rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) - rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID]) - - rts.reactors[nodeID] = NewReactor( - rts.logger.With("nodeID", nodeID), - cfg.Mempool, - rts.network.Nodes[nodeID].PeerManager, - mempool, - rts.mempoolChannels[nodeID], - rts.peerUpdates[nodeID], - ) - - rts.nodes = append(rts.nodes, nodeID) - - require.NoError(t, rts.reactors[nodeID].Start()) - require.True(t, rts.reactors[nodeID].IsRunning()) - } - - require.Len(t, rts.reactors, numNodes) - - t.Cleanup(func() { - for nodeID := range rts.reactors { - if rts.reactors[nodeID].IsRunning() { - require.NoError(t, rts.reactors[nodeID].Stop()) - require.False(t, rts.reactors[nodeID].IsRunning()) - } - } - }) - - return rts -} - -func (rts *reactorTestSuite) start(t *testing.T) { - t.Helper() - rts.network.Start(t) - require.Len(t, - rts.network.RandomNode().PeerManager.Peers(), - len(rts.nodes)-1, - "network does not have expected number of nodes") -} - -func TestReactorBroadcastDoesNotPanic(t *testing.T) { - numNodes := 2 - rts := setupReactors(t, numNodes, 0) - - observePanic := func(r interface{}) { - t.Fatal("panic detected in reactor") - } - - primary := rts.nodes[0] - secondary := rts.nodes[1] - primaryReactor := rts.reactors[primary] - primaryMempool := primaryReactor.mempool - secondaryReactor := rts.reactors[secondary] - - primaryReactor.observePanic = observePanic - secondaryReactor.observePanic = observePanic - - firstTx := &WrappedTx{} - primaryMempool.insertTx(firstTx) - - // run the router - rts.start(t) - - closer := tmsync.NewCloser() - primaryReactor.peerWG.Add(1) - go primaryReactor.broadcastTxRoutine(secondary, closer) - - wg := &sync.WaitGroup{} - for i := 0; i < 50; i++ { - next := &WrappedTx{} - wg.Add(1) - go func() { - defer wg.Done() - primaryMempool.insertTx(next) - }() - } - - err := primaryReactor.Stop() - require.NoError(t, err) - primaryReactor.peerWG.Wait() - wg.Wait() -} diff --git a/internal/mempool/v1/tx.go b/internal/mempool/v1/tx.go deleted file mode 100644 index c5b7ca82f6..0000000000 --- a/internal/mempool/v1/tx.go +++ /dev/null @@ -1,281 +0,0 @@ -package v1 - -import ( - "sort" - "time" - - "github.com/tendermint/tendermint/internal/libs/clist" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/types" -) - -// WrappedTx defines a wrapper around a raw transaction with additional metadata -// that is used for indexing. -type WrappedTx struct { - // tx represents the raw binary transaction data - tx types.Tx - - // hash defines the transaction hash and the primary key used in the mempool - hash types.TxKey - - // height defines the height at which the transaction was validated at - height int64 - - // gasWanted defines the amount of gas the transaction sender requires - gasWanted int64 - - // priority defines the transaction's priority as specified by the application - // in the ResponseCheckTx response. - priority int64 - - // sender defines the transaction's sender as specified by the application in - // the ResponseCheckTx response. - sender string - - // timestamp is the time at which the node first received the transaction from - // a peer. It is used as a second dimension is prioritizing transactions when - // two transactions have the same priority. - timestamp time.Time - - // peers records a mapping of all peers that sent a given transaction - peers map[uint16]struct{} - - // heapIndex defines the index of the item in the heap - heapIndex int - - // gossipEl references the linked-list element in the gossip index - gossipEl *clist.CElement - - // removed marks the transaction as removed from the mempool. This is set - // during RemoveTx and is needed due to the fact that a given existing - // transaction in the mempool can be evicted when it is simultaneously having - // a reCheckTx callback executed. - removed bool -} - -func (wtx *WrappedTx) Size() int { - return len(wtx.tx) -} - -// TxStore implements a thread-safe mapping of valid transaction(s). -// -// NOTE: -// - Concurrent read-only access to a *WrappedTx object is OK. However, mutative -// access is not allowed. Regardless, it is not expected for the mempool to -// need mutative access. -type TxStore struct { - mtx tmsync.RWMutex - hashTxs map[types.TxKey]*WrappedTx // primary index - senderTxs map[string]*WrappedTx // sender is defined by the ABCI application -} - -func NewTxStore() *TxStore { - return &TxStore{ - senderTxs: make(map[string]*WrappedTx), - hashTxs: make(map[types.TxKey]*WrappedTx), - } -} - -// Size returns the total number of transactions in the store. -func (txs *TxStore) Size() int { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return len(txs.hashTxs) -} - -// GetAllTxs returns all the transactions currently in the store. -func (txs *TxStore) GetAllTxs() []*WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wTxs := make([]*WrappedTx, len(txs.hashTxs)) - i := 0 - for _, wtx := range txs.hashTxs { - wTxs[i] = wtx - i++ - } - - return wTxs -} - -// GetTxBySender returns a *WrappedTx by the transaction's sender property -// defined by the ABCI application. -func (txs *TxStore) GetTxBySender(sender string) *WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return txs.senderTxs[sender] -} - -// GetTxByHash returns a *WrappedTx by the transaction's hash. -func (txs *TxStore) GetTxByHash(hash types.TxKey) *WrappedTx { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - return txs.hashTxs[hash] -} - -// IsTxRemoved returns true if a transaction by hash is marked as removed and -// false otherwise. -func (txs *TxStore) IsTxRemoved(hash types.TxKey) bool { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wtx, ok := txs.hashTxs[hash] - if ok { - return wtx.removed - } - - return false -} - -// SetTx stores a *WrappedTx by it's hash. If the transaction also contains a -// non-empty sender, we additionally store the transaction by the sender as -// defined by the ABCI application. -func (txs *TxStore) SetTx(wtx *WrappedTx) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - if len(wtx.sender) > 0 { - txs.senderTxs[wtx.sender] = wtx - } - - txs.hashTxs[wtx.tx.Key()] = wtx -} - -// RemoveTx removes a *WrappedTx from the transaction store. It deletes all -// indexes of the transaction. -func (txs *TxStore) RemoveTx(wtx *WrappedTx) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - if len(wtx.sender) > 0 { - delete(txs.senderTxs, wtx.sender) - } - - delete(txs.hashTxs, wtx.tx.Key()) - wtx.removed = true -} - -// TxHasPeer returns true if a transaction by hash has a given peer ID and false -// otherwise. If the transaction does not exist, false is returned. -func (txs *TxStore) TxHasPeer(hash types.TxKey, peerID uint16) bool { - txs.mtx.RLock() - defer txs.mtx.RUnlock() - - wtx := txs.hashTxs[hash] - if wtx == nil { - return false - } - - _, ok := wtx.peers[peerID] - return ok -} - -// GetOrSetPeerByTxHash looks up a WrappedTx by transaction hash and adds the -// given peerID to the WrappedTx's set of peers that sent us this transaction. -// We return true if we've already recorded the given peer for this transaction -// and false otherwise. If the transaction does not exist by hash, we return -// (nil, false). -func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*WrappedTx, bool) { - txs.mtx.Lock() - defer txs.mtx.Unlock() - - wtx := txs.hashTxs[hash] - if wtx == nil { - return nil, false - } - - if wtx.peers == nil { - wtx.peers = make(map[uint16]struct{}) - } - - if _, ok := wtx.peers[peerID]; ok { - return wtx, true - } - - wtx.peers[peerID] = struct{}{} - return wtx, false -} - -// WrappedTxList implements a thread-safe list of *WrappedTx objects that can be -// used to build generic transaction indexes in the mempool. It accepts a -// comparator function, less(a, b *WrappedTx) bool, that compares two WrappedTx -// references which is used during Insert in order to determine sorted order. If -// less returns true, a <= b. -type WrappedTxList struct { - mtx tmsync.RWMutex - txs []*WrappedTx - less func(*WrappedTx, *WrappedTx) bool -} - -func NewWrappedTxList(less func(*WrappedTx, *WrappedTx) bool) *WrappedTxList { - return &WrappedTxList{ - txs: make([]*WrappedTx, 0), - less: less, - } -} - -// Size returns the number of WrappedTx objects in the list. -func (wtl *WrappedTxList) Size() int { - wtl.mtx.RLock() - defer wtl.mtx.RUnlock() - - return len(wtl.txs) -} - -// Reset resets the list of transactions to an empty list. -func (wtl *WrappedTxList) Reset() { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - wtl.txs = make([]*WrappedTx, 0) -} - -// Insert inserts a WrappedTx reference into the sorted list based on the list's -// comparator function. -func (wtl *WrappedTxList) Insert(wtx *WrappedTx) { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - i := sort.Search(len(wtl.txs), func(i int) bool { - return wtl.less(wtl.txs[i], wtx) - }) - - if i == len(wtl.txs) { - // insert at the end - wtl.txs = append(wtl.txs, wtx) - return - } - - // Make space for the inserted element by shifting values at the insertion - // index up one index. - // - // NOTE: The call to append does not allocate memory when cap(wtl.txs) > len(wtl.txs). - wtl.txs = append(wtl.txs[:i+1], wtl.txs[i:]...) - wtl.txs[i] = wtx -} - -// Remove attempts to remove a WrappedTx from the sorted list. -func (wtl *WrappedTxList) Remove(wtx *WrappedTx) { - wtl.mtx.Lock() - defer wtl.mtx.Unlock() - - i := sort.Search(len(wtl.txs), func(i int) bool { - return wtl.less(wtl.txs[i], wtx) - }) - - // Since the list is sorted, we evaluate all elements starting at i. Note, if - // the element does not exist, we may potentially evaluate the entire remainder - // of the list. However, a caller should not be expected to call Remove with a - // non-existing element. - for i < len(wtl.txs) { - if wtl.txs[i] == wtx { - wtl.txs = append(wtl.txs[:i], wtl.txs[i+1:]...) - return - } - - i++ - } -} diff --git a/internal/p2p/README.md b/internal/p2p/README.md index 9ba7303fa1..16ad1d5f68 100644 --- a/internal/p2p/README.md +++ b/internal/p2p/README.md @@ -7,5 +7,5 @@ Docs: - [Connection](https://docs.tendermint.com/master/spec/p2p/connection.html) for details on how connections and multiplexing work - [Peer](https://docs.tendermint.com/master/spec/p2p/node.html) for details on peer ID, handshakes, and peer exchange - [Node](https://docs.tendermint.com/master/spec/p2p/node.html) for details about different types of nodes and how they should work -- [Pex](https://docs.tendermint.com/master/spec/reactors/pex/pex.html) for details on peer discovery and exchange +- [Pex](https://docs.tendermint.com/master/spec/p2p/messages/pex.html) for details on peer discovery and exchange - [Config](https://docs.tendermint.com/master/spec/p2p/config.html) for details on some config option diff --git a/internal/p2p/address.go b/internal/p2p/address.go index 7c084216ec..0f4066fafd 100644 --- a/internal/p2p/address.go +++ b/internal/p2p/address.go @@ -97,7 +97,7 @@ func ParseNodeAddress(urlString string) (NodeAddress, error) { // Resolve resolves a NodeAddress into a set of Endpoints, by expanding // out a DNS hostname to IP addresses. -func (a NodeAddress) Resolve(ctx context.Context) ([]Endpoint, error) { +func (a NodeAddress) Resolve(ctx context.Context) ([]*Endpoint, error) { if a.Protocol == "" { return nil, errors.New("address has no protocol") } @@ -109,7 +109,7 @@ func (a NodeAddress) Resolve(ctx context.Context) ([]Endpoint, error) { if a.NodeID == "" { return nil, errors.New("local address has no node ID") } - return []Endpoint{{ + return []*Endpoint{{ Protocol: a.Protocol, Path: string(a.NodeID), }}, nil @@ -119,9 +119,9 @@ func (a NodeAddress) Resolve(ctx context.Context) ([]Endpoint, error) { if err != nil { return nil, err } - endpoints := make([]Endpoint, len(ips)) + endpoints := make([]*Endpoint, len(ips)) for i, ip := range ips { - endpoints[i] = Endpoint{ + endpoints[i] = &Endpoint{ Protocol: a.Protocol, IP: ip, Port: a.Port, diff --git a/internal/p2p/address_test.go b/internal/p2p/address_test.go index a0cc61e9e8..bd1dda75cb 100644 --- a/internal/p2p/address_test.go +++ b/internal/p2p/address_test.go @@ -1,6 +1,7 @@ package p2p_test import ( + "context" "net" "strings" "testing" @@ -203,77 +204,83 @@ func TestParseNodeAddress(t *testing.T) { func TestNodeAddress_Resolve(t *testing.T) { id := types.NodeID("00112233445566778899aabbccddeeff00112233") + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + testcases := []struct { address p2p.NodeAddress - expect p2p.Endpoint + expect *p2p.Endpoint ok bool }{ // Valid networked addresses (with hostname). { p2p.NodeAddress{Protocol: "tcp", Hostname: "127.0.0.1", Port: 80, Path: "/path"}, - p2p.Endpoint{Protocol: "tcp", IP: net.IPv4(127, 0, 0, 1), Port: 80, Path: "/path"}, + &p2p.Endpoint{Protocol: "tcp", IP: net.IPv4(127, 0, 0, 1), Port: 80, Path: "/path"}, true, }, { p2p.NodeAddress{Protocol: "tcp", Hostname: "localhost", Port: 80, Path: "/path"}, - p2p.Endpoint{Protocol: "tcp", IP: net.IPv4(127, 0, 0, 1), Port: 80, Path: "/path"}, + &p2p.Endpoint{Protocol: "tcp", IP: net.IPv4(127, 0, 0, 1), Port: 80, Path: "/path"}, true, }, { p2p.NodeAddress{Protocol: "tcp", Hostname: "localhost", Port: 80, Path: "/path"}, - p2p.Endpoint{Protocol: "tcp", IP: net.IPv6loopback, Port: 80, Path: "/path"}, + &p2p.Endpoint{Protocol: "tcp", IP: net.IPv6loopback, Port: 80, Path: "/path"}, true, }, { p2p.NodeAddress{Protocol: "tcp", Hostname: "127.0.0.1"}, - p2p.Endpoint{Protocol: "tcp", IP: net.IPv4(127, 0, 0, 1)}, + &p2p.Endpoint{Protocol: "tcp", IP: net.IPv4(127, 0, 0, 1)}, true, }, { p2p.NodeAddress{Protocol: "tcp", Hostname: "::1"}, - p2p.Endpoint{Protocol: "tcp", IP: net.IPv6loopback}, + &p2p.Endpoint{Protocol: "tcp", IP: net.IPv6loopback}, true, }, { p2p.NodeAddress{Protocol: "tcp", Hostname: "8.8.8.8"}, - p2p.Endpoint{Protocol: "tcp", IP: net.IPv4(8, 8, 8, 8)}, + &p2p.Endpoint{Protocol: "tcp", IP: net.IPv4(8, 8, 8, 8)}, true, }, { p2p.NodeAddress{Protocol: "tcp", Hostname: "2001:0db8::ff00:0042:8329"}, - p2p.Endpoint{Protocol: "tcp", IP: []byte{ + &p2p.Endpoint{Protocol: "tcp", IP: []byte{ 0x20, 0x01, 0x0d, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x42, 0x83, 0x29}}, true, }, { p2p.NodeAddress{Protocol: "tcp", Hostname: "some.missing.host.tendermint.com"}, - p2p.Endpoint{}, + &p2p.Endpoint{}, false, }, // Valid non-networked addresses. { p2p.NodeAddress{Protocol: "memory", NodeID: id}, - p2p.Endpoint{Protocol: "memory", Path: string(id)}, + &p2p.Endpoint{Protocol: "memory", Path: string(id)}, true, }, { p2p.NodeAddress{Protocol: "memory", NodeID: id, Path: string(id)}, - p2p.Endpoint{Protocol: "memory", Path: string(id)}, + &p2p.Endpoint{Protocol: "memory", Path: string(id)}, true, }, // Invalid addresses. - {p2p.NodeAddress{}, p2p.Endpoint{}, false}, - {p2p.NodeAddress{Hostname: "127.0.0.1"}, p2p.Endpoint{}, false}, - {p2p.NodeAddress{Protocol: "tcp", Hostname: "127.0.0.1:80"}, p2p.Endpoint{}, false}, - {p2p.NodeAddress{Protocol: "memory"}, p2p.Endpoint{}, false}, - {p2p.NodeAddress{Protocol: "memory", Path: string(id)}, p2p.Endpoint{}, false}, - {p2p.NodeAddress{Protocol: "tcp", Hostname: "💥"}, p2p.Endpoint{}, false}, + {p2p.NodeAddress{}, &p2p.Endpoint{}, false}, + {p2p.NodeAddress{Hostname: "127.0.0.1"}, &p2p.Endpoint{}, false}, + {p2p.NodeAddress{Protocol: "tcp", Hostname: "127.0.0.1:80"}, &p2p.Endpoint{}, false}, + {p2p.NodeAddress{Protocol: "memory"}, &p2p.Endpoint{}, false}, + {p2p.NodeAddress{Protocol: "memory", Path: string(id)}, &p2p.Endpoint{}, false}, + {p2p.NodeAddress{Protocol: "tcp", Hostname: "💥"}, &p2p.Endpoint{}, false}, } for _, tc := range testcases { tc := tc t.Run(tc.address.String(), func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() + endpoints, err := tc.address.Resolve(ctx) if !tc.ok { require.Error(t, err) diff --git a/internal/p2p/base_reactor.go b/internal/p2p/base_reactor.go deleted file mode 100644 index 09925caf8d..0000000000 --- a/internal/p2p/base_reactor.go +++ /dev/null @@ -1,74 +0,0 @@ -package p2p - -import ( - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/service" -) - -// Reactor is responsible for handling incoming messages on one or more -// Channel. Switch calls GetChannels when reactor is added to it. When a new -// peer joins our node, InitPeer and AddPeer are called. RemovePeer is called -// when the peer is stopped. Receive is called when a message is received on a -// channel associated with this reactor. -// -// Peer#Send or Peer#TrySend should be used to send the message to a peer. -type Reactor interface { - service.Service // Start, Stop - - // SetSwitch allows setting a switch. - SetSwitch(*Switch) - - // GetChannels returns the list of MConnection.ChannelDescriptor. Make sure - // that each ID is unique across all the reactors added to the switch. - GetChannels() []*conn.ChannelDescriptor - - // InitPeer is called by the switch before the peer is started. Use it to - // initialize data for the peer (e.g. peer state). - // - // NOTE: The switch won't call AddPeer nor RemovePeer if it fails to start - // the peer. Do not store any data associated with the peer in the reactor - // itself unless you don't want to have a state, which is never cleaned up. - InitPeer(peer Peer) Peer - - // AddPeer is called by the switch after the peer is added and successfully - // started. Use it to start goroutines communicating with the peer. - AddPeer(peer Peer) - - // RemovePeer is called by the switch when the peer is stopped (due to error - // or other reason). - RemovePeer(peer Peer, reason interface{}) - - // Receive is called by the switch when msgBytes is received from the peer. - // - // NOTE reactor can not keep msgBytes around after Receive completes without - // copying. - // - // CONTRACT: msgBytes are not nil. - // - // XXX: do not call any methods that can block or incur heavy processing. - // https://github.com/tendermint/tendermint/issues/2888 - Receive(chID byte, peer Peer, msgBytes []byte) -} - -//-------------------------------------- - -type BaseReactor struct { - service.BaseService // Provides Start, Stop, .Quit - Switch *Switch -} - -func NewBaseReactor(name string, impl Reactor) *BaseReactor { - return &BaseReactor{ - BaseService: *service.NewBaseService(nil, name, impl), - Switch: nil, - } -} - -func (br *BaseReactor) SetSwitch(sw *Switch) { - br.Switch = sw -} -func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil } -func (*BaseReactor) AddPeer(peer Peer) {} -func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {} -func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {} -func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } diff --git a/internal/p2p/channel.go b/internal/p2p/channel.go new file mode 100644 index 0000000000..8e6774612e --- /dev/null +++ b/internal/p2p/channel.go @@ -0,0 +1,212 @@ +package p2p + +import ( + "context" + "fmt" + "sync" + + "github.com/gogo/protobuf/proto" + + "github.com/tendermint/tendermint/types" +) + +// Envelope contains a message with sender/receiver routing info. +type Envelope struct { + From types.NodeID // sender (empty if outbound) + To types.NodeID // receiver (empty if inbound) + Broadcast bool // send to all connected peers (ignores To) + Message proto.Message // message payload + ChannelID ChannelID +} + +// Wrapper is a Protobuf message that can contain a variety of inner messages +// (e.g. via oneof fields). If a Channel's message type implements Wrapper, the +// Router will automatically wrap outbound messages and unwrap inbound messages, +// such that reactors do not have to do this themselves. +type Wrapper interface { + proto.Message + + // Wrap will take a message and wrap it in this one if possible. + Wrap(proto.Message) error + + // Unwrap will unwrap the inner message contained in this message. + Unwrap() (proto.Message, error) +} + +// PeerError is a peer error reported via Channel.Error. +// +// FIXME: This currently just disconnects the peer, which is too simplistic. +// For example, some errors should be logged, some should cause disconnects, +// and some should ban the peer. +// +// FIXME: This should probably be replaced by a more general PeerBehavior +// concept that can mark good and bad behavior and contributes to peer scoring. +// It should possibly also allow reactors to request explicit actions, e.g. +// disconnection or banning, in addition to doing this based on aggregates. +type PeerError struct { + NodeID types.NodeID + Err error +} + +func (pe PeerError) Error() string { return fmt.Sprintf("peer=%q: %s", pe.NodeID, pe.Err.Error()) } +func (pe PeerError) Unwrap() error { return pe.Err } + +// Channel is a bidirectional channel to exchange Protobuf messages with peers. +// Each message is wrapped in an Envelope to specify its sender and receiver. +type Channel struct { + ID ChannelID + inCh <-chan Envelope // inbound messages (peers to reactors) + outCh chan<- Envelope // outbound messages (reactors to peers) + errCh chan<- PeerError // peer error reporting + + messageType proto.Message // the channel's message type, used for unmarshaling + name string +} + +// NewChannel creates a new channel. It is primarily for internal and test +// use, reactors should use Router.OpenChannel(). +func NewChannel( + id ChannelID, + messageType proto.Message, + inCh <-chan Envelope, + outCh chan<- Envelope, + errCh chan<- PeerError, +) *Channel { + return &Channel{ + ID: id, + messageType: messageType, + inCh: inCh, + outCh: outCh, + errCh: errCh, + } +} + +// Send blocks until the envelope has been sent, or until ctx ends. +// An error only occurs if the context ends before the send completes. +func (ch *Channel) Send(ctx context.Context, envelope Envelope) error { + select { + case <-ctx.Done(): + return ctx.Err() + case ch.outCh <- envelope: + return nil + } +} + +// SendError blocks until the given error has been sent, or ctx ends. +// An error only occurs if the context ends before the send completes. +func (ch *Channel) SendError(ctx context.Context, pe PeerError) error { + select { + case <-ctx.Done(): + return ctx.Err() + case ch.errCh <- pe: + return nil + } +} + +func (ch *Channel) String() string { return fmt.Sprintf("p2p.Channel<%d:%s>", ch.ID, ch.name) } + +// Receive returns a new unbuffered iterator to receive messages from ch. +// The iterator runs until ctx ends. +func (ch *Channel) Receive(ctx context.Context) *ChannelIterator { + iter := &ChannelIterator{ + pipe: make(chan Envelope), // unbuffered + } + go func() { + defer close(iter.pipe) + iteratorWorker(ctx, ch, iter.pipe) + }() + return iter +} + +// ChannelIterator provides a context-aware path for callers +// (reactors) to process messages from the P2P layer without relying +// on the implementation details of the P2P layer. Channel provides +// access to it's Outbound stream as an iterator, and the +// MergedChannelIterator makes it possible to combine multiple +// channels into a single iterator. +type ChannelIterator struct { + pipe chan Envelope + current *Envelope +} + +func iteratorWorker(ctx context.Context, ch *Channel, pipe chan Envelope) { + for { + select { + case <-ctx.Done(): + return + case envelope := <-ch.inCh: + select { + case <-ctx.Done(): + return + case pipe <- envelope: + } + } + } +} + +// Next returns true when the Envelope value has advanced, and false +// when the context is canceled or iteration should stop. If an iterator has returned false, +// it will never return true again. +// in general, use Next, as in: +// +// for iter.Next(ctx) { +// envelope := iter.Envelope() +// // ... do things ... +// } +// +func (iter *ChannelIterator) Next(ctx context.Context) bool { + select { + case <-ctx.Done(): + iter.current = nil + return false + case envelope, ok := <-iter.pipe: + if !ok { + iter.current = nil + return false + } + + iter.current = &envelope + + return true + } +} + +// Envelope returns the current Envelope object held by the +// iterator. When the last call to Next returned true, Envelope will +// return a non-nil object. If Next returned false then Envelope is +// always nil. +func (iter *ChannelIterator) Envelope() *Envelope { return iter.current } + +// MergedChannelIterator produces an iterator that merges the +// messages from the given channels in arbitrary order. +// +// This allows the caller to consume messages from multiple channels +// without needing to manage the concurrency separately. +func MergedChannelIterator(ctx context.Context, chs ...*Channel) *ChannelIterator { + iter := &ChannelIterator{ + pipe: make(chan Envelope), // unbuffered + } + wg := new(sync.WaitGroup) + + for _, ch := range chs { + wg.Add(1) + go func(ch *Channel) { + defer wg.Done() + iteratorWorker(ctx, ch, iter.pipe) + }(ch) + } + + done := make(chan struct{}) + go func() { defer close(done); wg.Wait() }() + + go func() { + defer close(iter.pipe) + // we could return early if the context is canceled, + // but this is safer because it means the pipe stays + // open until all of the ch worker threads end, which + // should happen very quickly. + <-done + }() + + return iter +} diff --git a/internal/p2p/channel_test.go b/internal/p2p/channel_test.go new file mode 100644 index 0000000000..e06e3e77ea --- /dev/null +++ b/internal/p2p/channel_test.go @@ -0,0 +1,221 @@ +package p2p + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/require" +) + +type channelInternal struct { + In chan Envelope + Out chan Envelope + Error chan PeerError +} + +func testChannel(size int) (*channelInternal, *Channel) { + in := &channelInternal{ + In: make(chan Envelope, size), + Out: make(chan Envelope, size), + Error: make(chan PeerError, size), + } + ch := &Channel{ + inCh: in.In, + outCh: in.Out, + errCh: in.Error, + } + return in, ch +} + +func TestChannel(t *testing.T) { + t.Cleanup(leaktest.Check(t)) + + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + + testCases := []struct { + Name string + Case func(context.Context, *testing.T) + }{ + { + Name: "Send", + Case: func(ctx context.Context, t *testing.T) { + ins, ch := testChannel(1) + require.NoError(t, ch.Send(ctx, Envelope{From: "kip", To: "merlin"})) + + res, ok := <-ins.Out + require.True(t, ok) + require.EqualValues(t, "kip", res.From) + require.EqualValues(t, "merlin", res.To) + }, + }, + { + Name: "SendError", + Case: func(ctx context.Context, t *testing.T) { + ins, ch := testChannel(1) + require.NoError(t, ch.SendError(ctx, PeerError{NodeID: "kip", Err: errors.New("merlin")})) + + res, ok := <-ins.Error + require.True(t, ok) + require.EqualValues(t, "kip", res.NodeID) + require.EqualValues(t, "merlin", res.Err.Error()) + }, + }, + { + Name: "SendWithCanceledContext", + Case: func(ctx context.Context, t *testing.T) { + _, ch := testChannel(0) + cctx, ccancel := context.WithCancel(ctx) + ccancel() + require.Error(t, ch.Send(cctx, Envelope{From: "kip", To: "merlin"})) + }, + }, + { + Name: "SendErrorWithCanceledContext", + Case: func(ctx context.Context, t *testing.T) { + _, ch := testChannel(0) + cctx, ccancel := context.WithCancel(ctx) + ccancel() + + require.Error(t, ch.SendError(cctx, PeerError{NodeID: "kip", Err: errors.New("merlin")})) + }, + }, + { + Name: "ReceiveEmptyIteratorBlocks", + Case: func(ctx context.Context, t *testing.T) { + _, ch := testChannel(1) + iter := ch.Receive(ctx) + require.NotNil(t, iter) + out := make(chan bool) + go func() { + defer close(out) + select { + case <-ctx.Done(): + case out <- iter.Next(ctx): + } + }() + select { + case <-time.After(10 * time.Millisecond): + case <-out: + require.Fail(t, "iterator should not advance") + } + require.Nil(t, iter.Envelope()) + }, + }, + { + Name: "ReceiveWithData", + Case: func(ctx context.Context, t *testing.T) { + ins, ch := testChannel(1) + ins.In <- Envelope{From: "kip", To: "merlin"} + iter := ch.Receive(ctx) + require.NotNil(t, iter) + require.True(t, iter.Next(ctx)) + + res := iter.Envelope() + require.EqualValues(t, "kip", res.From) + require.EqualValues(t, "merlin", res.To) + }, + }, + { + Name: "ReceiveWithCanceledContext", + Case: func(ctx context.Context, t *testing.T) { + _, ch := testChannel(0) + cctx, ccancel := context.WithCancel(ctx) + ccancel() + + iter := ch.Receive(cctx) + require.NotNil(t, iter) + require.False(t, iter.Next(cctx)) + require.Nil(t, iter.Envelope()) + }, + }, + { + Name: "IteratorWithCanceledContext", + Case: func(ctx context.Context, t *testing.T) { + _, ch := testChannel(0) + + iter := ch.Receive(ctx) + require.NotNil(t, iter) + + cctx, ccancel := context.WithCancel(ctx) + ccancel() + require.False(t, iter.Next(cctx)) + require.Nil(t, iter.Envelope()) + }, + }, + { + Name: "IteratorCanceledAfterFirstUseBecomesNil", + Case: func(ctx context.Context, t *testing.T) { + ins, ch := testChannel(1) + + ins.In <- Envelope{From: "kip", To: "merlin"} + iter := ch.Receive(ctx) + require.NotNil(t, iter) + + require.True(t, iter.Next(ctx)) + + res := iter.Envelope() + require.EqualValues(t, "kip", res.From) + require.EqualValues(t, "merlin", res.To) + + cctx, ccancel := context.WithCancel(ctx) + ccancel() + + require.False(t, iter.Next(cctx)) + require.Nil(t, iter.Envelope()) + }, + }, + { + Name: "IteratorMultipleNextCalls", + Case: func(ctx context.Context, t *testing.T) { + ins, ch := testChannel(1) + + ins.In <- Envelope{From: "kip", To: "merlin"} + iter := ch.Receive(ctx) + require.NotNil(t, iter) + + require.True(t, iter.Next(ctx)) + + res := iter.Envelope() + require.EqualValues(t, "kip", res.From) + require.EqualValues(t, "merlin", res.To) + + res1 := iter.Envelope() + require.Equal(t, res, res1) + }, + }, + { + Name: "IteratorProducesNilObjectBeforeNext", + Case: func(ctx context.Context, t *testing.T) { + ins, ch := testChannel(1) + + iter := ch.Receive(ctx) + require.NotNil(t, iter) + require.Nil(t, iter.Envelope()) + + ins.In <- Envelope{From: "kip", To: "merlin"} + require.NotNil(t, iter) + require.True(t, iter.Next(ctx)) + + res := iter.Envelope() + require.NotNil(t, res) + require.EqualValues(t, "kip", res.From) + require.EqualValues(t, "merlin", res.To) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(bctx) + defer cancel() + + tc.Case(ctx, t) + }) + } +} diff --git a/internal/p2p/conn/conn_go110.go b/internal/p2p/conn/conn_go110.go deleted file mode 100644 index 459c3169b1..0000000000 --- a/internal/p2p/conn/conn_go110.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build go1.10 -// +build go1.10 - -package conn - -// Go1.10 has a proper net.Conn implementation that -// has the SetDeadline method implemented as per -// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706 -// lest we run into problems like -// https://github.com/tendermint/tendermint/issues/851 - -import "net" - -func NetPipe() (net.Conn, net.Conn) { - return net.Pipe() -} diff --git a/internal/p2p/conn/conn_notgo110.go b/internal/p2p/conn/conn_notgo110.go deleted file mode 100644 index 21dffad2c2..0000000000 --- a/internal/p2p/conn/conn_notgo110.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build !go1.10 -// +build !go1.10 - -package conn - -import ( - "net" - "time" -) - -// Only Go1.10 has a proper net.Conn implementation that -// has the SetDeadline method implemented as per -// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706 -// lest we run into problems like -// https://github.com/tendermint/tendermint/issues/851 -// so for go versions < Go1.10 use our custom net.Conn creator -// that doesn't return an `Unimplemented error` for net.Conn. -// Before https://github.com/tendermint/tendermint/commit/49faa79bdce5663894b3febbf4955fb1d172df04 -// we hadn't cared about errors from SetDeadline so swallow them up anyways. -type pipe struct { - net.Conn -} - -func (p *pipe) SetDeadline(t time.Time) error { - return nil -} - -func NetPipe() (net.Conn, net.Conn) { - p1, p2 := net.Pipe() - return &pipe{p1}, &pipe{p2} -} - -var _ net.Conn = (*pipe)(nil) diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index 5bf1873dc0..c8fc211888 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -2,6 +2,7 @@ package conn import ( "bufio" + "context" "errors" "fmt" "io" @@ -15,9 +16,8 @@ import ( "github.com/gogo/protobuf/proto" - flow "github.com/tendermint/tendermint/internal/libs/flowrate" + "github.com/tendermint/tendermint/internal/libs/flowrate" "github.com/tendermint/tendermint/internal/libs/protoio" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/libs/timer" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" @@ -49,11 +49,10 @@ const ( defaultPongTimeout = 90 * time.Second ) -type receiveCbFunc func(chID byte, msgBytes []byte) -type errorCbFunc func(interface{}) +type receiveCbFunc func(ctx context.Context, chID ChannelID, msgBytes []byte) +type errorCbFunc func(context.Context, interface{}) /* -MConnection struct: Each peer has one `MConnection` (multiplex connection) instance. __multiplex__ *noun* a system or signal involving simultaneous transmission of @@ -66,29 +65,26 @@ initialization of the connection. There are two methods for sending messages: func (m MConnection) Send(chID byte, msgBytes []byte) bool {} - func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {} `Send(chID, msgBytes)` is a blocking call that waits until `msg` is successfully queued for the channel with the given id byte `chID`, or until the request times out. The message `msg` is serialized using Protobuf. -`TrySend(chID, msgBytes)` is a nonblocking call that returns false if the -channel's queue is full. - Inbound message bytes are handled with an onReceive callback function. */ type MConnection struct { service.BaseService + logger log.Logger conn net.Conn bufConnReader *bufio.Reader bufConnWriter *bufio.Writer - sendMonitor *flow.Monitor - recvMonitor *flow.Monitor + sendMonitor *flowrate.Monitor + recvMonitor *flowrate.Monitor send chan struct{} pong chan struct{} - channels []*Channel - channelsIdx map[byte]*Channel + channels []*channel + channelsIdx map[ChannelID]*channel onReceive receiveCbFunc onError errorCbFunc errored uint32 @@ -104,7 +100,9 @@ type MConnection struct { // used to ensure FlushStop and OnStop // are safe to call concurrently. - stopMtx tmsync.Mutex + stopMtx sync.Mutex + + cancel context.CancelFunc flushTimer *timer.ThrottleTimer // flush writes as necessary but throttled. pingTimer *time.Ticker // send pings periodically @@ -138,6 +136,9 @@ type MConnConfig struct { // Maximum wait time for pongs PongTimeout time.Duration `mapstructure:"pong_timeout"` + + // Process/Transport Start time + StartTime time.Time `mapstructure:",omitempty"` } // DefaultMConnConfig returns the default config. @@ -149,26 +150,13 @@ func DefaultMConnConfig() MConnConfig { FlushThrottle: defaultFlushThrottle, PingInterval: defaultPingInterval, PongTimeout: defaultPongTimeout, + StartTime: time.Now(), } } -// NewMConnection wraps net.Conn and creates multiplex connection +// NewMConnection wraps net.Conn and creates multiplex connection with a config func NewMConnection( - conn net.Conn, - chDescs []*ChannelDescriptor, - onReceive receiveCbFunc, - onError errorCbFunc, -) *MConnection { - return NewMConnectionWithConfig( - conn, - chDescs, - onReceive, - onError, - DefaultMConnConfig()) -} - -// NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config -func NewMConnectionWithConfig( + logger log.Logger, conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, @@ -176,22 +164,26 @@ func NewMConnectionWithConfig( config MConnConfig, ) *MConnection { mconn := &MConnection{ + logger: logger, conn: conn, bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize), bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize), - sendMonitor: flow.New(0, 0), - recvMonitor: flow.New(0, 0), + sendMonitor: flowrate.New(config.StartTime, 0, 0), + recvMonitor: flowrate.New(config.StartTime, 0, 0), send: make(chan struct{}, 1), pong: make(chan struct{}, 1), onReceive: onReceive, onError: onError, config: config, created: time.Now(), + cancel: func() {}, } + mconn.BaseService = *service.NewBaseService(logger, "MConnection", mconn) + // Create channels - var channelsIdx = map[byte]*Channel{} - var channels = []*Channel{} + var channelsIdx = map[ChannelID]*channel{} + var channels = []*channel{} for _, desc := range chDescs { channel := newChannel(mconn, *desc) @@ -201,26 +193,14 @@ func NewMConnectionWithConfig( mconn.channels = channels mconn.channelsIdx = channelsIdx - mconn.BaseService = *service.NewBaseService(nil, "MConnection", mconn) - // maxPacketMsgSize() is a bit heavy, so call just once mconn._maxPacketMsgSize = mconn.maxPacketMsgSize() return mconn } -func (c *MConnection) SetLogger(l log.Logger) { - c.BaseService.SetLogger(l) - for _, ch := range c.channels { - ch.SetLogger(l) - } -} - // OnStart implements BaseService -func (c *MConnection) OnStart() error { - if err := c.BaseService.OnStart(); err != nil { - return err - } +func (c *MConnection) OnStart(ctx context.Context) error { c.flushTimer = timer.NewThrottleTimer("flush", c.config.FlushThrottle) c.pingTimer = time.NewTicker(c.config.PingInterval) c.chStatsTimer = time.NewTicker(updateStats) @@ -228,8 +208,8 @@ func (c *MConnection) OnStart() error { c.doneSendRoutine = make(chan struct{}) c.quitRecvRoutine = make(chan struct{}) c.setRecvLastMsgAt(time.Now()) - go c.sendRoutine() - go c.recvRoutine() + go c.sendRoutine(ctx) + go c.recvRoutine(ctx) return nil } @@ -266,7 +246,6 @@ func (c *MConnection) stopServices() (alreadyStopped bool) { default: } - c.BaseService.OnStop() c.flushTimer.Stop() c.pingTimer.Stop() c.chStatsTimer.Stop() @@ -277,43 +256,6 @@ func (c *MConnection) stopServices() (alreadyStopped bool) { return false } -// FlushStop replicates the logic of OnStop. -// It additionally ensures that all successful -// .Send() calls will get flushed before closing -// the connection. -func (c *MConnection) FlushStop() { - if c.stopServices() { - return - } - - // this block is unique to FlushStop - { - // wait until the sendRoutine exits - // so we dont race on calling sendSomePacketMsgs - <-c.doneSendRoutine - - // Send and flush all pending msgs. - // Since sendRoutine has exited, we can call this - // safely - eof := c.sendSomePacketMsgs() - for !eof { - eof = c.sendSomePacketMsgs() - } - c.flush() - - // Now we can close the connection - } - - c.conn.Close() - - // We can't close pong safely here because - // recvRoutine may write to it after we've stopped. - // Though it doesn't need to get closed at all, - // we close it @ recvRoutine. - - // c.Stop() -} - // OnStop implements BaseService func (c *MConnection) OnStop() { if c.stopServices() { @@ -329,50 +271,47 @@ func (c *MConnection) OnStop() { } func (c *MConnection) String() string { - if c.conn != nil { - return fmt.Sprintf("MConn{%v}", c.conn.RemoteAddr()) - } - return "MConn{nil}" + return fmt.Sprintf("MConn{%v}", c.conn.RemoteAddr()) } func (c *MConnection) flush() { - // c.Logger.Debug("Flush", "conn", c) + c.logger.Debug("Flush", "conn", c) err := c.bufConnWriter.Flush() if err != nil { - c.Logger.Debug("MConnection flush failed", "err", err) + c.logger.Debug("MConnection flush failed", "err", err) } } // Catch panics, usually caused by remote disconnects. -func (c *MConnection) _recover() { +func (c *MConnection) _recover(ctx context.Context) { if r := recover(); r != nil { - c.Logger.Error("MConnection panicked", "err", r, "stack", string(debug.Stack())) - c.stopForError(fmt.Errorf("recovered from panic: %v", r)) + c.logger.Error("MConnection panicked", "err", r, "stack", string(debug.Stack())) + c.stopForError(ctx, fmt.Errorf("recovered from panic: %v", r)) } } -func (c *MConnection) stopForError(r interface{}) { - if err := c.Stop(); err != nil { - c.Logger.Error("Error stopping connection", "err", err) - } +func (c *MConnection) stopForError(ctx context.Context, r interface{}) { + c.Stop() + if atomic.CompareAndSwapUint32(&c.errored, 0, 1) { if c.onError != nil { - c.onError(r) + c.onError(ctx, r) } } } -// Send queues a message to be sent to channel. -func (c *MConnection) Send(chID byte, msgBytes []byte) bool { +// Queues a message to be sent to channel. +func (c *MConnection) Send(chID ChannelID, msgBytes []byte) bool { if !c.IsRunning() { return false } - // c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", base64.StdEncoding.EncodeToString(msgBytes)) + c.logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", msgBytes) + // Send message to channel. channel, ok := c.channelsIdx[chID] if !ok { - c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) + c.logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) return false } @@ -384,57 +323,14 @@ func (c *MConnection) Send(chID byte, msgBytes []byte) bool { default: } } else { - c.Logger.Debug("Send failed", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes)) + c.logger.Debug("Send failed", "channel", chID, "conn", c, "msgBytes", msgBytes) } return success } -// TrySend queues a message to be sent to channel. -// Nonblocking, returns true if successful. -func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool { - if !c.IsRunning() { - return false - } - - // c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes)) - - // Send message to channel. - channel, ok := c.channelsIdx[chID] - if !ok { - c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) - return false - } - - ok = channel.trySendBytes(msgBytes) - if ok { - // Wake up sendRoutine if necessary - select { - case c.send <- struct{}{}: - default: - } - } - - return ok -} - -// CanSend returns true if you can send more data onto the chID, false -// otherwise. Use only as a heuristic. -func (c *MConnection) CanSend(chID byte) bool { - if !c.IsRunning() { - return false - } - - channel, ok := c.channelsIdx[chID] - if !ok { - c.Logger.Error(fmt.Sprintf("Unknown channel %X", chID)) - return false - } - return channel.canSend() -} - // sendRoutine polls for packets to send from channels. -func (c *MConnection) sendRoutine() { - defer c._recover() +func (c *MConnection) sendRoutine(ctx context.Context) { + defer c._recover(ctx) protoWriter := protoio.NewDelimitedWriter(c.bufConnWriter) pongTimeout := time.NewTicker(c.config.PongTimeout) @@ -454,23 +350,23 @@ FOR_LOOP: channel.updateStats() } case <-c.pingTimer.C: - c.Logger.Debug("Send Ping") _n, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPing{})) if err != nil { - c.Logger.Error("Failed to send PacketPing", "err", err) + c.logger.Error("Failed to send PacketPing", "err", err) break SELECTION } c.sendMonitor.Update(_n) c.flush() case <-c.pong: - c.Logger.Debug("Send Pong") _n, err = protoWriter.WriteMsg(mustWrapPacket(&tmp2p.PacketPong{})) if err != nil { - c.Logger.Error("Failed to send PacketPong", "err", err) + c.logger.Error("Failed to send PacketPong", "err", err) break SELECTION } c.sendMonitor.Update(_n) c.flush() + case <-ctx.Done(): + break FOR_LOOP case <-c.quitSendRoutine: break FOR_LOOP case <-pongTimeout.C: @@ -483,7 +379,7 @@ FOR_LOOP: break SELECTION case <-c.send: // Send some PacketMsgs - eof := c.sendSomePacketMsgs() + eof := c.sendSomePacketMsgs(ctx) if !eof { // Keep sendRoutine awake. select { @@ -498,8 +394,8 @@ FOR_LOOP: } if err != nil { - c.Logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err) - c.stopForError(err) + c.logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err) + c.stopForError(ctx, err) break FOR_LOOP } if !c.IsRunning() { @@ -513,15 +409,15 @@ FOR_LOOP: // Returns true if messages from channels were exhausted. // Blocks in accordance to .sendMonitor throttling. -func (c *MConnection) sendSomePacketMsgs() bool { +func (c *MConnection) sendSomePacketMsgs(ctx context.Context) bool { // Block until .sendMonitor says we can write. // Once we're ready we send more than we asked for, // but amortized it should even out. - c.sendMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.SendRate), true) + c.sendMonitor.Limit(c._maxPacketMsgSize, c.config.SendRate, true) // Now send some PacketMsgs. for i := 0; i < numBatchPacketMsgs; i++ { - if c.sendPacketMsg() { + if c.sendPacketMsg(ctx) { return true } } @@ -529,11 +425,11 @@ func (c *MConnection) sendSomePacketMsgs() bool { } // Returns true if messages from channels were exhausted. -func (c *MConnection) sendPacketMsg() bool { +func (c *MConnection) sendPacketMsg(ctx context.Context) bool { // Choose a channel to create a PacketMsg from. // The chosen channel will be the one whose recentlySent/priority is the least. var leastRatio float32 = math.MaxFloat32 - var leastChannel *Channel + var leastChannel *channel for _, channel := range c.channels { // If nothing to send, skip this channel if !channel.isSendPending() { @@ -551,13 +447,13 @@ func (c *MConnection) sendPacketMsg() bool { if leastChannel == nil { return true } - // c.Logger.Info("Found a msgPacket to send") + // c.logger.Info("Found a msgPacket to send") // Make & send a PacketMsg from this channel _n, err := leastChannel.writePacketMsgTo(c.bufConnWriter) if err != nil { - c.Logger.Error("Failed to write PacketMsg", "err", err) - c.stopForError(err) + c.logger.Error("Failed to write PacketMsg", "err", err) + c.stopForError(ctx, err) return true } c.sendMonitor.Update(_n) @@ -569,15 +465,15 @@ func (c *MConnection) sendPacketMsg() bool { // After a whole message has been assembled, it's pushed to onReceive(). // Blocks depending on how the connection is throttled. // Otherwise, it never blocks. -func (c *MConnection) recvRoutine() { - defer c._recover() +func (c *MConnection) recvRoutine(ctx context.Context) { + defer c._recover(ctx) protoReader := protoio.NewDelimitedReader(c.bufConnReader, c._maxPacketMsgSize) FOR_LOOP: for { select { - case <-c.quitRecvRoutine: + case <-ctx.Done(): break FOR_LOOP case <-c.doneSendRoutine: break FOR_LOOP @@ -585,7 +481,7 @@ FOR_LOOP: } // Block until .recvMonitor says we can read. - c.recvMonitor.Limit(c._maxPacketMsgSize, atomic.LoadInt64(&c.config.RecvRate), true) + c.recvMonitor.Limit(c._maxPacketMsgSize, c.config.RecvRate, true) // Peek into bufConnReader for debugging /* @@ -594,10 +490,10 @@ FOR_LOOP: if err == nil { // return } else { - c.Logger.Debug("Error peeking connection buffer", "err", err) + c.logger.Debug("error peeking connection buffer", "err", err) // return nil } - c.Logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz) + c.logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz) } */ @@ -610,6 +506,7 @@ FOR_LOOP: // stopServices was invoked and we are shutting down // receiving is excpected to fail since we will close the connection select { + case <-ctx.Done(): case <-c.quitRecvRoutine: break FOR_LOOP default: @@ -617,11 +514,11 @@ FOR_LOOP: if c.IsRunning() { if err == io.EOF { - c.Logger.Info("Connection is closed @ recvRoutine (likely by the other side)", "conn", c) + c.logger.Info("Connection is closed @ recvRoutine (likely by the other side)", "conn", c) } else { - c.Logger.Debug("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err) + c.logger.Debug("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err) } - c.stopForError(err) + c.stopForError(ctx, err) } break FOR_LOOP } @@ -634,7 +531,6 @@ FOR_LOOP: case *tmp2p.Packet_PacketPing: // TODO: prevent abuse, as they cause flush()'s. // https://github.com/tendermint/tendermint/issues/1190 - c.Logger.Debug("Receive Ping") select { case c.pong <- struct{}{}: default: @@ -645,32 +541,32 @@ FOR_LOOP: // received" timestamp above, so we can ignore // this message case *tmp2p.Packet_PacketMsg: - channelID := byte(pkt.PacketMsg.ChannelID) + channelID := ChannelID(pkt.PacketMsg.ChannelID) channel, ok := c.channelsIdx[channelID] if pkt.PacketMsg.ChannelID < 0 || pkt.PacketMsg.ChannelID > math.MaxUint8 || !ok || channel == nil { err := fmt.Errorf("unknown channel %X", pkt.PacketMsg.ChannelID) - c.Logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err) - c.stopForError(err) + c.logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err) + c.stopForError(ctx, err) break FOR_LOOP } msgBytes, err := channel.recvPacketMsg(*pkt.PacketMsg) if err != nil { if c.IsRunning() { - c.Logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err) - c.stopForError(err) + c.logger.Debug("Connection failed @ recvRoutine", "conn", c, "err", err) + c.stopForError(ctx, err) } break FOR_LOOP } if msgBytes != nil { - // c.Logger.Debug("Received bytes", "chID", channelID, "msgBytes", base64.StdEncoding.EncodeToString(msgBytes)) + c.logger.Debug("Received bytes", "chID", channelID, "msgBytes", msgBytes) // NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine - c.onReceive(channelID, msgBytes) + c.onReceive(ctx, channelID, msgBytes) } default: err := fmt.Errorf("unknown message type %v", reflect.TypeOf(packet)) - c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) - c.stopForError(err) + c.logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) + c.stopForError(ctx, err) break FOR_LOOP } } @@ -695,13 +591,6 @@ func (c *MConnection) maxPacketMsgSize() int { return len(bz) } -type ConnectionStatus struct { - Duration time.Duration - SendMonitor flow.Status - RecvMonitor flow.Status - Channels []ChannelStatus -} - type ChannelStatus struct { ID byte SendQueueCapacity int @@ -710,30 +599,16 @@ type ChannelStatus struct { RecentlySent int64 } -func (c *MConnection) Status() ConnectionStatus { - var status ConnectionStatus - status.Duration = time.Since(c.created) - status.SendMonitor = c.sendMonitor.Status() - status.RecvMonitor = c.recvMonitor.Status() - status.Channels = make([]ChannelStatus, len(c.channels)) - for i, channel := range c.channels { - status.Channels[i] = ChannelStatus{ - ID: channel.desc.ID, - SendQueueCapacity: cap(channel.sendQueue), - SendQueueSize: int(atomic.LoadInt32(&channel.sendQueueSize)), - Priority: channel.desc.Priority, - RecentlySent: atomic.LoadInt64(&channel.recentlySent), - } - } - return status -} - //----------------------------------------------------------------------------- +// ChannelID is an arbitrary channel ID. +type ChannelID uint16 type ChannelDescriptor struct { - ID byte + ID ChannelID Priority int + MessageType proto.Message + // TODO: Remove once p2p refactor is complete. SendQueueCapacity int RecvMessageCapacity int @@ -742,9 +617,9 @@ type ChannelDescriptor struct { // given p2p Channel queue. RecvBufferCapacity int - // MaxSendBytes defines the maximum number of bytes that can be sent at any - // given moment from a Channel to a peer. - MaxSendBytes uint + // Human readable name of the channel, used in logging and + // diagnostics. + Name string } func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { @@ -761,9 +636,8 @@ func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { return } -// TODO: lowercase. // NOTE: not goroutine-safe. -type Channel struct { +type channel struct { // Exponential moving average. // This field must be accessed atomically. // It is first in the struct to ensure correct alignment. @@ -779,31 +653,28 @@ type Channel struct { maxPacketMsgPayloadSize int - Logger log.Logger + logger log.Logger } -func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { +func newChannel(conn *MConnection, desc ChannelDescriptor) *channel { desc = desc.FillDefaults() if desc.Priority <= 0 { panic("Channel default priority must be a positive integer") } - return &Channel{ + return &channel{ conn: conn, desc: desc, sendQueue: make(chan []byte, desc.SendQueueCapacity), recving: make([]byte, 0, desc.RecvBufferCapacity), maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize, + logger: conn.logger, } } -func (ch *Channel) SetLogger(l log.Logger) { - ch.Logger = l -} - // Queues message to send to this channel. // Goroutine-safe // Times out (and returns false) after defaultSendTimeout -func (ch *Channel) sendBytes(bytes []byte) bool { +func (ch *channel) sendBytes(bytes []byte) bool { select { case ch.sendQueue <- bytes: atomic.AddInt32(&ch.sendQueueSize, 1) @@ -813,34 +684,10 @@ func (ch *Channel) sendBytes(bytes []byte) bool { } } -// Queues message to send to this channel. -// Nonblocking, returns true if successful. -// Goroutine-safe -func (ch *Channel) trySendBytes(bytes []byte) bool { - select { - case ch.sendQueue <- bytes: - atomic.AddInt32(&ch.sendQueueSize, 1) - return true - default: - return false - } -} - -// Goroutine-safe -func (ch *Channel) loadSendQueueSize() (size int) { - return int(atomic.LoadInt32(&ch.sendQueueSize)) -} - -// Goroutine-safe -// Use only as a heuristic. -func (ch *Channel) canSend() bool { - return ch.loadSendQueueSize() < defaultSendQueueCapacity -} - // Returns true if any PacketMsgs are pending to be sent. // Call before calling nextPacketMsg() // Goroutine-safe -func (ch *Channel) isSendPending() bool { +func (ch *channel) isSendPending() bool { if len(ch.sending) == 0 { if len(ch.sendQueue) == 0 { return false @@ -852,7 +699,7 @@ func (ch *Channel) isSendPending() bool { // Creates a new PacketMsg to send. // Not goroutine-safe -func (ch *Channel) nextPacketMsg() tmp2p.PacketMsg { +func (ch *channel) nextPacketMsg() tmp2p.PacketMsg { packet := tmp2p.PacketMsg{ChannelID: int32(ch.desc.ID)} maxSize := ch.maxPacketMsgPayloadSize packet.Data = ch.sending[:tmmath.MinInt(maxSize, len(ch.sending))] @@ -869,7 +716,7 @@ func (ch *Channel) nextPacketMsg() tmp2p.PacketMsg { // Writes next PacketMsg to w and updates c.recentlySent. // Not goroutine-safe -func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) { +func (ch *channel) writePacketMsgTo(w io.Writer) (n int, err error) { packet := ch.nextPacketMsg() n, err = protoio.NewDelimitedWriter(w).WriteMsg(mustWrapPacket(&packet)) atomic.AddInt64(&ch.recentlySent, int64(n)) @@ -879,8 +726,8 @@ func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) { // Handles incoming PacketMsgs. It returns a message bytes if message is // complete, which is owned by the caller and will not be modified. // Not goroutine-safe -func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { - // ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packetData", base64.StdEncoding.EncodeToString(packet.Data)) +func (ch *channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { + ch.logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet) var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Data) if recvCap < recvReceived { return nil, fmt.Errorf("received message exceeds available capacity: %v < %v", recvCap, recvReceived) @@ -896,7 +743,7 @@ func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { // Call this periodically to update stats for throttling purposes. // Not goroutine-safe -func (ch *Channel) updateStats() { +func (ch *channel) updateStats() { // Exponential decay of stats. // TODO: optimize. atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent))*0.8)) diff --git a/internal/p2p/conn/connection_test.go b/internal/p2p/conn/connection_test.go index 09e6fe4b00..4da3901fa0 100644 --- a/internal/p2p/conn/connection_test.go +++ b/internal/p2p/conn/connection_test.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "io" "net" + "sync" "testing" "time" @@ -15,44 +16,48 @@ import ( "github.com/tendermint/tendermint/internal/libs/protoio" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" "github.com/tendermint/tendermint/proto/tendermint/types" ) const maxPingPongPacketSize = 1024 // bytes -func createTestMConnection(conn net.Conn) *MConnection { - onReceive := func(chID byte, msgBytes []byte) { - } - onError := func(r interface{}) { - } - c := createMConnectionWithCallbacks(conn, onReceive, onError) - c.SetLogger(log.TestingLogger()) - return c +func createTestMConnection(logger log.Logger, conn net.Conn) *MConnection { + return createMConnectionWithCallbacks(logger, conn, + // onRecieve + func(ctx context.Context, chID ChannelID, msgBytes []byte) { + }, + // onError + func(ctx context.Context, r interface{}) { + }) } func createMConnectionWithCallbacks( + logger log.Logger, conn net.Conn, - onReceive func(chID byte, msgBytes []byte), - onError func(r interface{}), + onReceive func(ctx context.Context, chID ChannelID, msgBytes []byte), + onError func(ctx context.Context, r interface{}), ) *MConnection { cfg := DefaultMConnConfig() cfg.PingInterval = 250 * time.Millisecond cfg.PongTimeout = 500 * time.Millisecond chDescs := []*ChannelDescriptor{{ID: 0x01, Priority: 1, SendQueueCapacity: 1}} - c := NewMConnectionWithConfig(conn, chDescs, onReceive, onError, cfg) - c.SetLogger(log.TestingLogger()) + c := NewMConnection(logger, conn, chDescs, onReceive, onError, cfg) return c } func TestMConnectionSendFlushStop(t *testing.T) { - server, client := NetPipe() + server, client := net.Pipe() t.Cleanup(closeAll(t, client, server)) - clientConn := createTestMConnection(client) - err := clientConn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, clientConn)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + clientConn := createTestMConnection(log.NewNopLogger(), client) + err := clientConn.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(clientConn)) msg := []byte("abc") assert.True(t, clientConn.Send(0x01, msg)) @@ -71,9 +76,6 @@ func TestMConnectionSendFlushStop(t *testing.T) { errCh <- err }() - // stop the conn - it should flush all conns - clientConn.FlushStop() - timer := time.NewTimer(3 * time.Second) select { case <-errCh: @@ -83,13 +85,16 @@ func TestMConnectionSendFlushStop(t *testing.T) { } func TestMConnectionSend(t *testing.T) { - server, client := NetPipe() + server, client := net.Pipe() t.Cleanup(closeAll(t, client, server)) - mconn := createTestMConnection(client) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn := createTestMConnection(log.NewNopLogger(), client) + err := mconn.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn)) msg := []byte("Ant-Man") assert.True(t, mconn.Send(0x01, msg)) @@ -99,40 +104,49 @@ func TestMConnectionSend(t *testing.T) { if err != nil { t.Error(err) } - assert.True(t, mconn.CanSend(0x01)) msg = []byte("Spider-Man") - assert.True(t, mconn.TrySend(0x01, msg)) + assert.True(t, mconn.Send(0x01, msg)) _, err = server.Read(make([]byte, len(msg))) if err != nil { t.Error(err) } - assert.False(t, mconn.CanSend(0x05), "CanSend should return false because channel is unknown") assert.False(t, mconn.Send(0x05, []byte("Absorbing Man")), "Send should return false because channel is unknown") } func TestMConnectionReceive(t *testing.T) { - server, client := NetPipe() + server, client := net.Pipe() t.Cleanup(closeAll(t, client, server)) receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { - receivedCh <- msgBytes + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case receivedCh <- msgBytes: + case <-ctx.Done(): + } } - onError := func(r interface{}) { - errorsCh <- r + onError := func(ctx context.Context, r interface{}) { + select { + case errorsCh <- r: + case <-ctx.Done(): + } } - mconn1 := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn1.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn1)) + logger := log.NewNopLogger() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - mconn2 := createTestMConnection(server) - err = mconn2.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn2)) + mconn1 := createMConnectionWithCallbacks(logger, client, onReceive, onError) + err := mconn1.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn1)) + + mconn2 := createTestMConnection(logger, server) + err = mconn2.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn2)) msg := []byte("Cyclops") assert.True(t, mconn2.Send(0x01, msg)) @@ -147,20 +161,6 @@ func TestMConnectionReceive(t *testing.T) { } } -func TestMConnectionStatus(t *testing.T) { - server, client := NetPipe() - t.Cleanup(closeAll(t, client, server)) - - mconn := createTestMConnection(client) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) - - status := mconn.Status() - assert.NotNil(t, status) - assert.Zero(t, status.Channels[0].SendQueueSize) -} - func TestMConnectionWillEventuallyTimeout(t *testing.T) { server, client := net.Pipe() t.Cleanup(closeAll(t, client, server)) @@ -168,9 +168,10 @@ func TestMConnectionWillEventuallyTimeout(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - mconn := createMConnectionWithCallbacks(client, nil, nil) - err := mconn.Start() + mconn := createMConnectionWithCallbacks(log.NewNopLogger(), client, nil, nil) + err := mconn.Start(ctx) require.NoError(t, err) + t.Cleanup(waitAll(mconn)) require.True(t, mconn.IsRunning()) go func() { @@ -207,16 +208,26 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { - receivedCh <- msgBytes + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case receivedCh <- msgBytes: + case <-ctx.Done(): + } } - onError := func(r interface{}) { - errorsCh <- r + onError := func(ctx context.Context, r interface{}) { + select { + case errorsCh <- r: + case <-ctx.Done(): + } } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn := createMConnectionWithCallbacks(log.NewNopLogger(), client, onReceive, onError) + err := mconn.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn)) // sending 3 pongs in a row (abuse) protoWriter := protoio.NewDelimitedWriter(server) @@ -256,16 +267,25 @@ func TestMConnectionMultiplePings(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { - receivedCh <- msgBytes + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case receivedCh <- msgBytes: + case <-ctx.Done(): + } } - onError := func(r interface{}) { - errorsCh <- r + onError := func(ctx context.Context, r interface{}) { + select { + case errorsCh <- r: + case <-ctx.Done(): + } } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn := createMConnectionWithCallbacks(log.NewNopLogger(), client, onReceive, onError) + err := mconn.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn)) // sending 3 pings in a row (abuse) // see https://github.com/tendermint/tendermint/issues/1190 @@ -306,22 +326,23 @@ func TestMConnectionPingPongs(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { select { - case <-ctx.Done(): case receivedCh <- msgBytes: + case <-ctx.Done(): } } - onError := func(r interface{}) { + onError := func(ctx context.Context, r interface{}) { select { case errorsCh <- r: case <-ctx.Done(): } } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) + + mconn := createMConnectionWithCallbacks(log.NewNopLogger(), client, onReceive, onError) + err := mconn.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn)) protoReader := protoio.NewDelimitedReader(server, maxPingPongPacketSize) protoWriter := protoio.NewDelimitedWriter(server) @@ -357,21 +378,30 @@ func TestMConnectionPingPongs(t *testing.T) { } func TestMConnectionStopsAndReturnsError(t *testing.T) { - server, client := NetPipe() + server, client := net.Pipe() t.Cleanup(closeAll(t, client, server)) receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { - receivedCh <- msgBytes + onReceive := func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case receivedCh <- msgBytes: + case <-ctx.Done(): + } } - onError := func(r interface{}) { - errorsCh <- r + onError := func(ctx context.Context, r interface{}) { + select { + case errorsCh <- r: + case <-ctx.Done(): + } } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconn := createMConnectionWithCallbacks(log.NewNopLogger(), client, onReceive, onError) + err := mconn.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn)) if err := client.Close(); err != nil { t.Error(err) @@ -388,32 +418,40 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { } } -func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) (*MConnection, *MConnection) { - server, client := NetPipe() +func newClientAndServerConnsForReadErrors( + ctx context.Context, + t *testing.T, + chOnErr chan struct{}, +) (*MConnection, *MConnection) { + server, client := net.Pipe() - onReceive := func(chID byte, msgBytes []byte) {} - onError := func(r interface{}) {} + onReceive := func(context.Context, ChannelID, []byte) {} + onError := func(context.Context, interface{}) {} // create client conn with two channels chDescs := []*ChannelDescriptor{ {ID: 0x01, Priority: 1, SendQueueCapacity: 1}, {ID: 0x02, Priority: 1, SendQueueCapacity: 1}, } - mconnClient := NewMConnection(client, chDescs, onReceive, onError) - mconnClient.SetLogger(log.TestingLogger().With("module", "client")) - err := mconnClient.Start() - require.Nil(t, err) + logger := log.NewNopLogger() + + mconnClient := NewMConnection(logger.With("module", "client"), client, chDescs, onReceive, onError, DefaultMConnConfig()) + err := mconnClient.Start(ctx) + require.NoError(t, err) // create server conn with 1 channel // it fires on chOnErr when there's an error - serverLogger := log.TestingLogger().With("module", "server") - onError = func(r interface{}) { - chOnErr <- struct{}{} + serverLogger := logger.With("module", "server") + onError = func(ctx context.Context, r interface{}) { + select { + case <-ctx.Done(): + case chOnErr <- struct{}{}: + } } - mconnServer := createMConnectionWithCallbacks(server, onReceive, onError) - mconnServer.SetLogger(serverLogger) - err = mconnServer.Start() - require.Nil(t, err) + + mconnServer := createMConnectionWithCallbacks(serverLogger, server, onReceive, onError) + err = mconnServer.Start(ctx) + require.NoError(t, err) return mconnClient, mconnServer } @@ -428,8 +466,11 @@ func expectSend(ch chan struct{}) bool { } func TestMConnectionReadErrorBadEncoding(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + chOnErr := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) client := mconnClient.conn @@ -437,12 +478,15 @@ func TestMConnectionReadErrorBadEncoding(t *testing.T) { _, err := client.Write([]byte{1, 2, 3, 4, 5}) require.NoError(t, err) assert.True(t, expectSend(chOnErr), "badly encoded msgPacket") - t.Cleanup(stopAll(t, mconnClient, mconnServer)) + t.Cleanup(waitAll(mconnClient, mconnServer)) } func TestMConnectionReadErrorUnknownChannel(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + chOnErr := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) msg := []byte("Ant-Man") @@ -453,18 +497,24 @@ func TestMConnectionReadErrorUnknownChannel(t *testing.T) { // should cause an error assert.True(t, mconnClient.Send(0x02, msg)) assert.True(t, expectSend(chOnErr), "unknown channel") - t.Cleanup(stopAll(t, mconnClient, mconnServer)) + t.Cleanup(waitAll(mconnClient, mconnServer)) } func TestMConnectionReadErrorLongMessage(t *testing.T) { chOnErr := make(chan struct{}) chOnRcv := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - t.Cleanup(stopAll(t, mconnClient, mconnServer)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) + t.Cleanup(waitAll(mconnClient, mconnServer)) - mconnServer.onReceive = func(chID byte, msgBytes []byte) { - chOnRcv <- struct{}{} + mconnServer.onReceive = func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case <-ctx.Done(): + case chOnRcv <- struct{}{}: + } } client := mconnClient.conn @@ -494,9 +544,12 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { } func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + chOnErr := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - t.Cleanup(stopAll(t, mconnClient, mconnServer)) + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) + t.Cleanup(waitAll(mconnClient, mconnServer)) // send msg with unknown msg type _, err := protoio.NewDelimitedWriter(mconnClient.conn).WriteMsg(&types.Header{ChainID: "x"}) @@ -505,28 +558,27 @@ func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { } func TestMConnectionTrySend(t *testing.T) { - server, client := NetPipe() + server, client := net.Pipe() t.Cleanup(closeAll(t, client, server)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - mconn := createTestMConnection(client) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) + mconn := createTestMConnection(log.NewNopLogger(), client) + err := mconn.Start(ctx) + require.NoError(t, err) + t.Cleanup(waitAll(mconn)) msg := []byte("Semicolon-Woman") resultCh := make(chan string, 2) - assert.True(t, mconn.TrySend(0x01, msg)) + assert.True(t, mconn.Send(0x01, msg)) _, err = server.Read(make([]byte, len(msg))) require.NoError(t, err) - assert.True(t, mconn.CanSend(0x01)) - assert.True(t, mconn.TrySend(0x01, msg)) - assert.False(t, mconn.CanSend(0x01)) + assert.True(t, mconn.Send(0x01, msg)) go func() { - mconn.TrySend(0x01, msg) + mconn.Send(0x01, msg) resultCh <- "TrySend" }() - assert.False(t, mconn.CanSend(0x01)) - assert.False(t, mconn.TrySend(0x01, msg)) + assert.False(t, mconn.Send(0x01, msg)) assert.Equal(t, "TrySend", <-resultCh) } @@ -557,11 +609,17 @@ func TestMConnectionChannelOverflow(t *testing.T) { chOnErr := make(chan struct{}) chOnRcv := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - t.Cleanup(stopAll(t, mconnClient, mconnServer)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - mconnServer.onReceive = func(chID byte, msgBytes []byte) { - chOnRcv <- struct{}{} + mconnClient, mconnServer := newClientAndServerConnsForReadErrors(ctx, t, chOnErr) + t.Cleanup(waitAll(mconnClient, mconnServer)) + + mconnServer.onReceive = func(ctx context.Context, chID ChannelID, msgBytes []byte) { + select { + case <-ctx.Done(): + case chOnRcv <- struct{}{}: + } } client := mconnClient.conn @@ -583,16 +641,26 @@ func TestMConnectionChannelOverflow(t *testing.T) { } -type stopper interface { - Stop() error -} - -func stopAll(t *testing.T, stoppers ...stopper) func() { +func waitAll(waiters ...service.Service) func() { return func() { - for _, s := range stoppers { - if err := s.Stop(); err != nil { - t.Log(err) + switch len(waiters) { + case 0: + return + case 1: + waiters[0].Wait() + return + default: + wg := &sync.WaitGroup{} + + for _, w := range waiters { + wg.Add(1) + go func(s service.Service) { + defer wg.Done() + s.Wait() + }(w) } + + wg.Wait() } } } diff --git a/internal/p2p/conn/secret_connection.go b/internal/p2p/conn/secret_connection.go index f0a2879229..08554aae8b 100644 --- a/internal/p2p/conn/secret_connection.go +++ b/internal/p2p/conn/secret_connection.go @@ -11,6 +11,7 @@ import ( "io" "math" "net" + "sync" "time" gogotypes "github.com/gogo/protobuf/types" @@ -24,9 +25,8 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/libs/async" "github.com/tendermint/tendermint/internal/libs/protoio" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/async" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) @@ -76,11 +76,11 @@ type SecretConnection struct { // are independent, so we can use two mtxs. // All .Read are covered by recvMtx, // all .Write are covered by sendMtx. - recvMtx tmsync.Mutex + recvMtx sync.Mutex recvBuffer []byte recvNonce *[aeadNonceSize]byte - sendMtx tmsync.Mutex + sendMtx sync.Mutex sendNonce *[aeadNonceSize]byte } diff --git a/internal/p2p/conn/secret_connection_test.go b/internal/p2p/conn/secret_connection_test.go index 895921dc4f..26082a3325 100644 --- a/internal/p2p/conn/secret_connection_test.go +++ b/internal/p2p/conn/secret_connection_test.go @@ -6,7 +6,6 @@ import ( "flag" "fmt" "io" - "io/ioutil" "log" mrand "math/rand" "os" @@ -21,7 +20,7 @@ import ( "github.com/tendermint/tendermint/crypto/bls12381" "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/libs/async" + "github.com/tendermint/tendermint/internal/libs/async" tmrand "github.com/tendermint/tendermint/libs/rand" ) @@ -184,8 +183,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { compareWritesReads := func(writes []string, reads []string) { for { // Pop next write & corresponding reads - var read string - var write = writes[0] + var read, write = "", writes[0] var readCount = 0 for _, readChunk := range reads { read += readChunk @@ -218,8 +216,8 @@ func TestDeriveSecretsAndChallengeGolden(t *testing.T) { goldenFilepath := filepath.Join("testdata", t.Name()+".golden") if *update { t.Logf("Updating golden test vector file %s", goldenFilepath) - data := createGoldenTestVectors(t) - require.NoError(t, ioutil.WriteFile(goldenFilepath, []byte(data), 0644)) + data := createGoldenTestVectors() + require.NoError(t, os.WriteFile(goldenFilepath, []byte(data), 0644)) } f, err := os.Open(goldenFilepath) if err != nil { @@ -231,15 +229,15 @@ func TestDeriveSecretsAndChallengeGolden(t *testing.T) { line := scanner.Text() params := strings.Split(line, ",") randSecretVector, err := hex.DecodeString(params[0]) - require.Nil(t, err) + require.NoError(t, err) randSecret := new([32]byte) copy((*randSecret)[:], randSecretVector) locIsLeast, err := strconv.ParseBool(params[1]) - require.Nil(t, err) + require.NoError(t, err) expectedRecvSecret, err := hex.DecodeString(params[2]) - require.Nil(t, err) + require.NoError(t, err) expectedSendSecret, err := hex.DecodeString(params[3]) - require.Nil(t, err) + require.NoError(t, err) recvSecret, sendSecret := deriveSecrets(randSecret, locIsLeast) require.Equal(t, expectedRecvSecret, (*recvSecret)[:], "Recv Secrets aren't equal") @@ -310,7 +308,7 @@ func readLots(t *testing.T, wg *sync.WaitGroup, conn io.Reader, n int) { // Creates the data for a test vector file. // The file format is: // Hex(diffie_hellman_secret), loc_is_least, Hex(recvSecret), Hex(sendSecret), Hex(challenge) -func createGoldenTestVectors(t *testing.T) string { +func createGoldenTestVectors() string { data := "" for i := 0; i < 32; i++ { randSecretVector := tmrand.Bytes(32) diff --git a/internal/p2p/conn_set.go b/internal/p2p/conn_set.go deleted file mode 100644 index 987d9f968a..0000000000 --- a/internal/p2p/conn_set.go +++ /dev/null @@ -1,82 +0,0 @@ -package p2p - -import ( - "net" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -// ConnSet is a lookup table for connections and all their ips. -type ConnSet interface { - Has(net.Conn) bool - HasIP(net.IP) bool - Set(net.Conn, []net.IP) - Remove(net.Conn) - RemoveAddr(net.Addr) -} - -type connSetItem struct { - conn net.Conn - ips []net.IP -} - -type connSet struct { - tmsync.RWMutex - - conns map[string]connSetItem -} - -// NewConnSet returns a ConnSet implementation. -func NewConnSet() ConnSet { - return &connSet{ - conns: map[string]connSetItem{}, - } -} - -func (cs *connSet) Has(c net.Conn) bool { - cs.RLock() - defer cs.RUnlock() - - _, ok := cs.conns[c.RemoteAddr().String()] - - return ok -} - -func (cs *connSet) HasIP(ip net.IP) bool { - cs.RLock() - defer cs.RUnlock() - - for _, c := range cs.conns { - for _, known := range c.ips { - if known.Equal(ip) { - return true - } - } - } - - return false -} - -func (cs *connSet) Remove(c net.Conn) { - cs.Lock() - defer cs.Unlock() - - delete(cs.conns, c.RemoteAddr().String()) -} - -func (cs *connSet) RemoveAddr(addr net.Addr) { - cs.Lock() - defer cs.Unlock() - - delete(cs.conns, addr.String()) -} - -func (cs *connSet) Set(c net.Conn, ips []net.IP) { - cs.Lock() - defer cs.Unlock() - - cs.conns[c.RemoteAddr().String()] = connSetItem{ - conn: c, - ips: ips, - } -} diff --git a/internal/p2p/conn_tracker.go b/internal/p2p/conn_tracker.go index 09673c0937..54f9c89801 100644 --- a/internal/p2p/conn_tracker.go +++ b/internal/p2p/conn_tracker.go @@ -26,6 +26,7 @@ func newConnTracker(max uint, window time.Duration) connectionTracker { cache: make(map[string]uint), lastConnect: make(map[string]time.Time), max: max, + window: window, } } @@ -43,7 +44,7 @@ func (rat *connTrackerImpl) AddConn(addr net.IP) error { if num := rat.cache[address]; num >= rat.max { return fmt.Errorf("%q has %d connections [max=%d]", address, num, rat.max) } else if num == 0 { - // if there is already at least connection, check to + // if there is already at least one connection, check to // see if it was established before within the window, // and error if so. if last := rat.lastConnect[address]; time.Since(last) < rat.window { diff --git a/internal/p2p/conn_tracker_test.go b/internal/p2p/conn_tracker_test.go index 66656e114a..daa3351f24 100644 --- a/internal/p2p/conn_tracker_test.go +++ b/internal/p2p/conn_tracker_test.go @@ -70,4 +70,15 @@ func TestConnTracker(t *testing.T) { } require.Equal(t, 10, ct.Len()) }) + t.Run("Window", func(t *testing.T) { + const window = 100 * time.Millisecond + ct := newConnTracker(10, window) + ip := randLocalIPv4() + require.NoError(t, ct.AddConn(ip)) + ct.RemoveConn(ip) + require.Error(t, ct.AddConn(ip)) + time.Sleep(window) + require.NoError(t, ct.AddConn(ip)) + }) + } diff --git a/internal/p2p/errors.go b/internal/p2p/errors.go index 648f2cb3a2..d4df287926 100644 --- a/internal/p2p/errors.go +++ b/internal/p2p/errors.go @@ -17,7 +17,7 @@ func (e ErrFilterTimeout) Error() string { // ErrRejected indicates that a Peer was rejected carrying additional // information as to the reason. type ErrRejected struct { - addr NetAddress + addr NodeAddress conn net.Conn err error id types.NodeID @@ -30,7 +30,7 @@ type ErrRejected struct { } // Addr returns the NetAddress for the rejected Peer. -func (e ErrRejected) Addr() NetAddress { +func (e ErrRejected) Addr() NodeAddress { return e.addr } @@ -120,15 +120,15 @@ func (e ErrSwitchDuplicatePeerIP) Error() string { // ErrSwitchConnectToSelf to be raised when trying to connect to itself. type ErrSwitchConnectToSelf struct { - Addr *NetAddress + Addr *NodeAddress } func (e ErrSwitchConnectToSelf) Error() string { - return fmt.Sprintf("connect to self: %v", e.Addr) + return fmt.Sprintf("connect to self: %s", e.Addr) } type ErrSwitchAuthenticationFailure struct { - Dialed *NetAddress + Dialed *NodeAddress Got types.NodeID } diff --git a/internal/p2p/metrics_test.go b/internal/p2p/metrics_test.go index 53b3c47bd8..839786d919 100644 --- a/internal/p2p/metrics_test.go +++ b/internal/p2p/metrics_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/proto/tendermint/p2p" ) diff --git a/internal/p2p/mock/peer.go b/internal/p2p/mock/peer.go deleted file mode 100644 index cede517685..0000000000 --- a/internal/p2p/mock/peer.go +++ /dev/null @@ -1,70 +0,0 @@ -package mock - -import ( - "net" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -type Peer struct { - *service.BaseService - ip net.IP - id types.NodeID - addr *p2p.NetAddress - kv map[string]interface{} - Outbound, Persistent bool -} - -// NewPeer creates and starts a new mock peer. If the ip -// is nil, random routable address is used. -func NewPeer(ip net.IP) *Peer { - var netAddr *p2p.NetAddress - if ip == nil { - _, netAddr = p2p.CreateRoutableAddr() - } else { - netAddr = types.NewNetAddressIPPort(ip, 26656) - } - nodeKey := types.GenNodeKey() - netAddr.ID = nodeKey.ID - mp := &Peer{ - ip: ip, - id: nodeKey.ID, - addr: netAddr, - kv: make(map[string]interface{}), - } - mp.BaseService = service.NewBaseService(nil, "MockPeer", mp) - if err := mp.Start(); err != nil { - panic(err) - } - return mp -} - -func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error -func (mp *Peer) TrySend(chID byte, msgBytes []byte) bool { return true } -func (mp *Peer) Send(chID byte, msgBytes []byte) bool { return true } -func (mp *Peer) NodeInfo() types.NodeInfo { - return types.NodeInfo{ - NodeID: mp.addr.ID, - ListenAddr: mp.addr.DialString(), - } -} -func (mp *Peer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } -func (mp *Peer) ID() types.NodeID { return mp.id } -func (mp *Peer) IsOutbound() bool { return mp.Outbound } -func (mp *Peer) IsPersistent() bool { return mp.Persistent } -func (mp *Peer) Get(key string) interface{} { - if value, ok := mp.kv[key]; ok { - return value - } - return nil -} -func (mp *Peer) Set(key string, value interface{}) { - mp.kv[key] = value -} -func (mp *Peer) RemoteIP() net.IP { return mp.ip } -func (mp *Peer) SocketAddr() *p2p.NetAddress { return mp.addr } -func (mp *Peer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } -func (mp *Peer) CloseConn() error { return nil } diff --git a/internal/p2p/mock/reactor.go b/internal/p2p/mock/reactor.go deleted file mode 100644 index 8f29115528..0000000000 --- a/internal/p2p/mock/reactor.go +++ /dev/null @@ -1,25 +0,0 @@ -package mock - -import ( - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/log" -) - -type Reactor struct { - p2p.BaseReactor - - Channels []*conn.ChannelDescriptor -} - -func NewReactor() *Reactor { - r := &Reactor{} - r.BaseReactor = *p2p.NewBaseReactor("Mock-PEX", r) - r.SetLogger(log.TestingLogger()) - return r -} - -func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return r.Channels } -func (r *Reactor) AddPeer(peer p2p.Peer) {} -func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {} -func (r *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {} diff --git a/internal/p2p/mocks/connection.go b/internal/p2p/mocks/connection.go index 6c61741172..73b6cfc3b3 100644 --- a/internal/p2p/mocks/connection.go +++ b/internal/p2p/mocks/connection.go @@ -13,6 +13,8 @@ import ( p2p "github.com/tendermint/tendermint/internal/p2p" + testing "testing" + types "github.com/tendermint/tendermint/types" ) @@ -35,20 +37,6 @@ func (_m *Connection) Close() error { return r0 } -// FlushClose provides a mock function with given fields: -func (_m *Connection) FlushClose() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - // Handshake provides a mock function with given fields: _a0, _a1, _a2 func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) { ret := _m.Called(_a0, _a1, _a2) @@ -93,20 +81,20 @@ func (_m *Connection) LocalEndpoint() p2p.Endpoint { return r0 } -// ReceiveMessage provides a mock function with given fields: -func (_m *Connection) ReceiveMessage() (p2p.ChannelID, []byte, error) { - ret := _m.Called() +// ReceiveMessage provides a mock function with given fields: _a0 +func (_m *Connection) ReceiveMessage(_a0 context.Context) (conn.ChannelID, []byte, error) { + ret := _m.Called(_a0) - var r0 p2p.ChannelID - if rf, ok := ret.Get(0).(func() p2p.ChannelID); ok { - r0 = rf() + var r0 conn.ChannelID + if rf, ok := ret.Get(0).(func(context.Context) conn.ChannelID); ok { + r0 = rf(_a0) } else { - r0 = ret.Get(0).(p2p.ChannelID) + r0 = ret.Get(0).(conn.ChannelID) } var r1 []byte - if rf, ok := ret.Get(1).(func() []byte); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) []byte); ok { + r1 = rf(_a0) } else { if ret.Get(1) != nil { r1 = ret.Get(1).([]byte) @@ -114,8 +102,8 @@ func (_m *Connection) ReceiveMessage() (p2p.ChannelID, []byte, error) { } var r2 error - if rf, ok := ret.Get(2).(func() error); ok { - r2 = rf() + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(_a0) } else { r2 = ret.Error(2) } @@ -137,36 +125,15 @@ func (_m *Connection) RemoteEndpoint() p2p.Endpoint { return r0 } -// SendMessage provides a mock function with given fields: _a0, _a1 -func (_m *Connection) SendMessage(_a0 p2p.ChannelID, _a1 []byte) (bool, error) { - ret := _m.Called(_a0, _a1) - - var r0 bool - if rf, ok := ret.Get(0).(func(p2p.ChannelID, []byte) bool); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(p2p.ChannelID, []byte) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Status provides a mock function with given fields: -func (_m *Connection) Status() conn.ConnectionStatus { - ret := _m.Called() +// SendMessage provides a mock function with given fields: _a0, _a1, _a2 +func (_m *Connection) SendMessage(_a0 context.Context, _a1 conn.ChannelID, _a2 []byte) error { + ret := _m.Called(_a0, _a1, _a2) - var r0 conn.ConnectionStatus - if rf, ok := ret.Get(0).(func() conn.ConnectionStatus); ok { - r0 = rf() + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, conn.ChannelID, []byte) error); ok { + r0 = rf(_a0, _a1, _a2) } else { - r0 = ret.Get(0).(conn.ConnectionStatus) + r0 = ret.Error(0) } return r0 @@ -186,23 +153,12 @@ func (_m *Connection) String() string { return r0 } -// TrySendMessage provides a mock function with given fields: _a0, _a1 -func (_m *Connection) TrySendMessage(_a0 p2p.ChannelID, _a1 []byte) (bool, error) { - ret := _m.Called(_a0, _a1) - - var r0 bool - if rf, ok := ret.Get(0).(func(p2p.ChannelID, []byte) bool); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Get(0).(bool) - } +// NewConnection creates a new instance of Connection. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewConnection(t testing.TB) *Connection { + mock := &Connection{} + mock.Mock.Test(t) - var r1 error - if rf, ok := ret.Get(1).(func(p2p.ChannelID, []byte) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } + t.Cleanup(func() { mock.AssertExpectations(t) }) - return r0, r1 + return mock } diff --git a/internal/p2p/mocks/peer.go b/internal/p2p/mocks/peer.go deleted file mode 100644 index b905c1156b..0000000000 --- a/internal/p2p/mocks/peer.go +++ /dev/null @@ -1,334 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - conn "github.com/tendermint/tendermint/internal/p2p/conn" - log "github.com/tendermint/tendermint/libs/log" - - mock "github.com/stretchr/testify/mock" - - net "net" - - types "github.com/tendermint/tendermint/types" -) - -// Peer is an autogenerated mock type for the Peer type -type Peer struct { - mock.Mock -} - -// CloseConn provides a mock function with given fields: -func (_m *Peer) CloseConn() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// FlushStop provides a mock function with given fields: -func (_m *Peer) FlushStop() { - _m.Called() -} - -// Get provides a mock function with given fields: _a0 -func (_m *Peer) Get(_a0 string) interface{} { - ret := _m.Called(_a0) - - var r0 interface{} - if rf, ok := ret.Get(0).(func(string) interface{}); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - return r0 -} - -// ID provides a mock function with given fields: -func (_m *Peer) ID() types.NodeID { - ret := _m.Called() - - var r0 types.NodeID - if rf, ok := ret.Get(0).(func() types.NodeID); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(types.NodeID) - } - - return r0 -} - -// IsOutbound provides a mock function with given fields: -func (_m *Peer) IsOutbound() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// IsPersistent provides a mock function with given fields: -func (_m *Peer) IsPersistent() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// IsRunning provides a mock function with given fields: -func (_m *Peer) IsRunning() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// NodeInfo provides a mock function with given fields: -func (_m *Peer) NodeInfo() types.NodeInfo { - ret := _m.Called() - - var r0 types.NodeInfo - if rf, ok := ret.Get(0).(func() types.NodeInfo); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(types.NodeInfo) - } - - return r0 -} - -// OnReset provides a mock function with given fields: -func (_m *Peer) OnReset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStart provides a mock function with given fields: -func (_m *Peer) OnStart() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStop provides a mock function with given fields: -func (_m *Peer) OnStop() { - _m.Called() -} - -// Quit provides a mock function with given fields: -func (_m *Peer) Quit() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// RemoteAddr provides a mock function with given fields: -func (_m *Peer) RemoteAddr() net.Addr { - ret := _m.Called() - - var r0 net.Addr - if rf, ok := ret.Get(0).(func() net.Addr); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(net.Addr) - } - } - - return r0 -} - -// RemoteIP provides a mock function with given fields: -func (_m *Peer) RemoteIP() net.IP { - ret := _m.Called() - - var r0 net.IP - if rf, ok := ret.Get(0).(func() net.IP); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(net.IP) - } - } - - return r0 -} - -// Reset provides a mock function with given fields: -func (_m *Peer) Reset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Send provides a mock function with given fields: _a0, _a1 -func (_m *Peer) Send(_a0 byte, _a1 []byte) bool { - ret := _m.Called(_a0, _a1) - - var r0 bool - if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Set provides a mock function with given fields: _a0, _a1 -func (_m *Peer) Set(_a0 string, _a1 interface{}) { - _m.Called(_a0, _a1) -} - -// SetLogger provides a mock function with given fields: _a0 -func (_m *Peer) SetLogger(_a0 log.Logger) { - _m.Called(_a0) -} - -// SocketAddr provides a mock function with given fields: -func (_m *Peer) SocketAddr() *types.NetAddress { - ret := _m.Called() - - var r0 *types.NetAddress - if rf, ok := ret.Get(0).(func() *types.NetAddress); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.NetAddress) - } - } - - return r0 -} - -// Start provides a mock function with given fields: -func (_m *Peer) Start() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Status provides a mock function with given fields: -func (_m *Peer) Status() conn.ConnectionStatus { - ret := _m.Called() - - var r0 conn.ConnectionStatus - if rf, ok := ret.Get(0).(func() conn.ConnectionStatus); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(conn.ConnectionStatus) - } - - return r0 -} - -// Stop provides a mock function with given fields: -func (_m *Peer) Stop() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// String provides a mock function with given fields: -func (_m *Peer) String() string { - ret := _m.Called() - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - -// TrySend provides a mock function with given fields: _a0, _a1 -func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool { - ret := _m.Called(_a0, _a1) - - var r0 bool - if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Wait provides a mock function with given fields: -func (_m *Peer) Wait() { - _m.Called() -} diff --git a/internal/p2p/mocks/transport.go b/internal/p2p/mocks/transport.go index 82bd670cbd..34ebec20e7 100644 --- a/internal/p2p/mocks/transport.go +++ b/internal/p2p/mocks/transport.go @@ -5,8 +5,13 @@ package mocks import ( context "context" + conn "github.com/tendermint/tendermint/internal/p2p/conn" + mock "github.com/stretchr/testify/mock" + p2p "github.com/tendermint/tendermint/internal/p2p" + + testing "testing" ) // Transport is an autogenerated mock type for the Transport type @@ -14,13 +19,13 @@ type Transport struct { mock.Mock } -// Accept provides a mock function with given fields: -func (_m *Transport) Accept() (p2p.Connection, error) { - ret := _m.Called() +// Accept provides a mock function with given fields: _a0 +func (_m *Transport) Accept(_a0 context.Context) (p2p.Connection, error) { + ret := _m.Called(_a0) var r0 p2p.Connection - if rf, ok := ret.Get(0).(func() p2p.Connection); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) p2p.Connection); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(p2p.Connection) @@ -28,8 +33,8 @@ func (_m *Transport) Accept() (p2p.Connection, error) { } var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) } else { r1 = ret.Error(1) } @@ -37,6 +42,11 @@ func (_m *Transport) Accept() (p2p.Connection, error) { return r0, r1 } +// AddChannelDescriptors provides a mock function with given fields: _a0 +func (_m *Transport) AddChannelDescriptors(_a0 []*conn.ChannelDescriptor) { + _m.Called(_a0) +} + // Close provides a mock function with given fields: func (_m *Transport) Close() error { ret := _m.Called() @@ -52,11 +62,11 @@ func (_m *Transport) Close() error { } // Dial provides a mock function with given fields: _a0, _a1 -func (_m *Transport) Dial(_a0 context.Context, _a1 p2p.Endpoint) (p2p.Connection, error) { +func (_m *Transport) Dial(_a0 context.Context, _a1 *p2p.Endpoint) (p2p.Connection, error) { ret := _m.Called(_a0, _a1) var r0 p2p.Connection - if rf, ok := ret.Get(0).(func(context.Context, p2p.Endpoint) p2p.Connection); ok { + if rf, ok := ret.Get(0).(func(context.Context, *p2p.Endpoint) p2p.Connection); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -65,7 +75,7 @@ func (_m *Transport) Dial(_a0 context.Context, _a1 p2p.Endpoint) (p2p.Connection } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, p2p.Endpoint) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, *p2p.Endpoint) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -74,19 +84,40 @@ func (_m *Transport) Dial(_a0 context.Context, _a1 p2p.Endpoint) (p2p.Connection return r0, r1 } -// Endpoints provides a mock function with given fields: -func (_m *Transport) Endpoints() []p2p.Endpoint { +// Endpoint provides a mock function with given fields: +func (_m *Transport) Endpoint() (*p2p.Endpoint, error) { ret := _m.Called() - var r0 []p2p.Endpoint - if rf, ok := ret.Get(0).(func() []p2p.Endpoint); ok { + var r0 *p2p.Endpoint + if rf, ok := ret.Get(0).(func() *p2p.Endpoint); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]p2p.Endpoint) + r0 = ret.Get(0).(*p2p.Endpoint) } } + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Listen provides a mock function with given fields: _a0 +func (_m *Transport) Listen(_a0 *p2p.Endpoint) error { + ret := _m.Called(_a0) + + var r0 error + if rf, ok := ret.Get(0).(func(*p2p.Endpoint) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + return r0 } @@ -119,3 +150,13 @@ func (_m *Transport) String() string { return r0 } + +// NewTransport creates a new instance of Transport. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewTransport(t testing.TB) *Transport { + mock := &Transport{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/p2p/netaddress.go b/internal/p2p/netaddress.go deleted file mode 100644 index 6fce3a7696..0000000000 --- a/internal/p2p/netaddress.go +++ /dev/null @@ -1,11 +0,0 @@ -// Modified for Tendermint -// Originally Copyright (c) 2013-2014 Conformal Systems LLC. -// https://github.com/conformal/btcd/blob/master/LICENSE - -package p2p - -import ( - "github.com/tendermint/tendermint/types" -) - -type NetAddress = types.NetAddress diff --git a/internal/p2p/p2p_test.go b/internal/p2p/p2p_test.go index 6e524d492a..d8657b774c 100644 --- a/internal/p2p/p2p_test.go +++ b/internal/p2p/p2p_test.go @@ -1,25 +1,23 @@ package p2p_test import ( - "context" - "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/types" ) // Common setup for P2P tests. var ( - ctx = context.Background() chID = p2p.ChannelID(1) - chDesc = p2p.ChannelDescriptor{ - ID: byte(chID), + chDesc = &p2p.ChannelDescriptor{ + ID: chID, + MessageType: &p2ptest.Message{}, Priority: 5, SendQueueCapacity: 10, RecvMessageCapacity: 10, - MaxSendBytes: 1000, } selfKey crypto.PrivKey = ed25519.GenPrivKeyFromSecret([]byte{0xf9, 0x1b, 0x08, 0xaa, 0x38, 0xee, 0x34, 0xdd}) diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index 4c852341d5..fc4657596d 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -26,6 +25,7 @@ type Network struct { logger log.Logger memoryNetwork *p2p.MemoryNetwork + cancel context.CancelFunc } // NetworkOptions is an argument structure to parameterize the @@ -50,20 +50,21 @@ func (opts *NetworkOptions) setDefaults() { // MakeNetwork creates a test network with the given number of nodes and // connects them to each other. -func MakeNetwork(t *testing.T, opts NetworkOptions) *Network { +func MakeNetwork(ctx context.Context, t *testing.T, opts NetworkOptions) *Network { opts.setDefaults() - logger := log.TestingLogger() + logger := log.NewNopLogger() network := &Network{ Nodes: map[types.NodeID]*Node{}, logger: logger, memoryNetwork: p2p.NewMemoryNetwork(logger, opts.BufferSize), } + for i := 0; i < opts.NumNodes; i++ { var proTxHash crypto.ProTxHash if i < len(opts.ProTxHashes) { proTxHash = opts.ProTxHashes[i] } - node := network.MakeNode(t, proTxHash, opts.NodeOpts, network.logger.With("validator", i)) + node := network.MakeNode(ctx, t, proTxHash, opts.NodeOpts) network.Nodes[node.NodeID] = node } @@ -73,15 +74,19 @@ func MakeNetwork(t *testing.T, opts NetworkOptions) *Network { // Start starts the network by setting up a list of node addresses to dial in // addition to creating a peer update subscription for each node. Finally, all // nodes are connected to each other. -func (n *Network) Start(t *testing.T) { +func (n *Network) Start(ctx context.Context, t *testing.T) { + ctx, n.cancel = context.WithCancel(ctx) + t.Cleanup(n.cancel) + // Set up a list of node addresses to dial, and a peer update subscription // for each node. - dialQueue := []p2p.NodeAddress{} + dialQueue := make([]p2p.NodeAddress, 0, len(n.Nodes)) subs := map[types.NodeID]*p2p.PeerUpdates{} + subctx, subcancel := context.WithCancel(ctx) + defer subcancel() for _, node := range n.Nodes { dialQueue = append(dialQueue, node.NodeAddress) - subs[node.NodeID] = node.PeerManager.Subscribe() - defer subs[node.NodeID].Close() + subs[node.NodeID] = node.PeerManager.Subscribe(subctx) } // For each node, dial the nodes that it still doesn't have a connection to @@ -94,29 +99,30 @@ func (n *Network) Start(t *testing.T) { for _, targetAddress := range dialQueue[i+1:] { // nodes = 1) { + require.NoError(t, ctx.Err(), "timed out waiting for message %v", expect) } } // RequireReceiveUnordered requires that the given envelopes are all received on // the channel, ignoring order. -func RequireReceiveUnordered(t *testing.T, channel *p2p.Channel, expect []p2p.Envelope) { - timer := time.NewTimer(time.Second) // not time.After due to goroutine leaks - defer timer.Stop() +func RequireReceiveUnordered(ctx context.Context, t *testing.T, channel *p2p.Channel, expect []*p2p.Envelope) { + ctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() - actual := []p2p.Envelope{} - for { - select { - case e, ok := <-channel.In: - require.True(t, ok, "channel %v is closed", channel.ID) - actual = append(actual, e) - if len(actual) == len(expect) { - require.ElementsMatch(t, expect, actual) - return - } - - case <-channel.Done(): - require.Fail(t, "channel %v is closed", channel.ID) + actual := []*p2p.Envelope{} - case <-timer.C: - require.ElementsMatch(t, expect, actual) + iter := channel.Receive(ctx) + for iter.Next(ctx) { + actual = append(actual, iter.Envelope()) + if len(actual) == len(expect) { + require.ElementsMatch(t, expect, actual, "len=%d", len(actual)) return } } + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + require.ElementsMatch(t, expect, actual) + } } // RequireSend requires that the given envelope is sent on the channel. -func RequireSend(t *testing.T, channel *p2p.Channel, envelope p2p.Envelope) { - timer := time.NewTimer(time.Second) // not time.After due to goroutine leaks - defer timer.Stop() - select { - case channel.Out <- envelope: - case <-timer.C: - require.Fail(t, "timed out sending message", "%v on channel %v", envelope, channel.ID) +func RequireSend(ctx context.Context, t *testing.T, channel *p2p.Channel, envelope p2p.Envelope) { + tctx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + + err := channel.Send(tctx, envelope) + switch { + case errors.Is(err, context.DeadlineExceeded): + require.Fail(t, "timed out sending message to %q", envelope.To) + default: + require.NoError(t, err, "unexpected error") } } // RequireSendReceive requires that a given Protobuf message is sent to the // given peer, and then that the given response is received back. func RequireSendReceive( + ctx context.Context, t *testing.T, channel *p2p.Channel, peerID types.NodeID, send proto.Message, receive proto.Message, ) { - RequireSend(t, channel, p2p.Envelope{To: peerID, Message: send}) - RequireReceive(t, channel, p2p.Envelope{From: peerID, Message: send}) + RequireSend(ctx, t, channel, p2p.Envelope{To: peerID, Message: send}) + RequireReceive(ctx, t, channel, p2p.Envelope{From: peerID, Message: send}) } // RequireNoUpdates requires that a PeerUpdates subscription is empty. -func RequireNoUpdates(t *testing.T, peerUpdates *p2p.PeerUpdates) { +func RequireNoUpdates(ctx context.Context, t *testing.T, peerUpdates *p2p.PeerUpdates) { t.Helper() select { case update := <-peerUpdates.Updates(): - require.Fail(t, "unexpected peer updates", "got %v", update) + if ctx.Err() == nil { + require.Fail(t, "unexpected peer updates", "got %v", update) + } + case <-ctx.Done(): default: } } // RequireError requires that the given peer error is submitted for a peer. -func RequireError(t *testing.T, channel *p2p.Channel, peerError p2p.PeerError) { - timer := time.NewTimer(time.Second) // not time.After due to goroutine leaks - defer timer.Stop() - select { - case channel.Error <- peerError: - case <-timer.C: - require.Fail(t, "timed out reporting error", "%v on %v", peerError, channel.ID) +func RequireError(ctx context.Context, t *testing.T, channel *p2p.Channel, peerError p2p.PeerError) { + tctx, tcancel := context.WithTimeout(ctx, time.Second) + defer tcancel() + + err := channel.SendError(tctx, peerError) + switch { + case errors.Is(err, context.DeadlineExceeded): + require.Fail(t, "timed out reporting error", "%v for %q", peerError, channel.String()) + default: + require.NoError(t, err, "unexpected error") } } @@ -122,8 +138,6 @@ func RequireUpdate(t *testing.T, peerUpdates *p2p.PeerUpdates, expect p2p.PeerUp case update := <-peerUpdates.Updates(): require.Equal(t, expect.NodeID, update.NodeID, "node id did not match") require.Equal(t, expect.Status, update.Status, "statuses did not match") - case <-peerUpdates.Done(): - require.Fail(t, "peer updates subscription is closed") case <-timer.C: require.Fail(t, "timed out waiting for peer update", "expected %v", expect) } @@ -149,9 +163,6 @@ func RequireUpdates(t *testing.T, peerUpdates *p2p.PeerUpdates, expect []p2p.Pee return } - case <-peerUpdates.Done(): - require.Fail(t, "peer updates subscription is closed") - case <-timer.C: require.Equal(t, expect, actual, "did not receive expected peer updates") return diff --git a/internal/p2p/p2ptest/util.go b/internal/p2p/p2ptest/util.go index 544e937bbe..e0d18caaee 100644 --- a/internal/p2p/p2ptest/util.go +++ b/internal/p2p/p2ptest/util.go @@ -2,6 +2,7 @@ package p2ptest import ( gogotypes "github.com/gogo/protobuf/types" + "github.com/tendermint/tendermint/types" ) diff --git a/internal/p2p/peer.go b/internal/p2p/peer.go deleted file mode 100644 index 57c4c95a99..0000000000 --- a/internal/p2p/peer.go +++ /dev/null @@ -1,383 +0,0 @@ -package p2p - -import ( - "fmt" - "io" - "net" - "runtime/debug" - "time" - - tmconn "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/cmap" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -//go:generate ../../scripts/mockery_generate.sh Peer - -const metricsTickerDuration = 10 * time.Second - -// Peer is an interface representing a peer connected on a reactor. -type Peer interface { - service.Service - FlushStop() - - ID() types.NodeID // peer's cryptographic ID - RemoteIP() net.IP // remote IP of the connection - RemoteAddr() net.Addr // remote address of the connection - - IsOutbound() bool // did we dial the peer - IsPersistent() bool // do we redial this peer when we disconnect - - CloseConn() error // close original connection - - NodeInfo() types.NodeInfo // peer's info - Status() tmconn.ConnectionStatus - SocketAddr() *NetAddress // actual address of the socket - - Send(byte, []byte) bool - TrySend(byte, []byte) bool - - Set(string, interface{}) - Get(string) interface{} -} - -//---------------------------------------------------------- - -// peerConn contains the raw connection and its config. -type peerConn struct { - outbound bool - persistent bool - conn Connection - ip net.IP // cached RemoteIP() -} - -func newPeerConn(outbound, persistent bool, conn Connection) peerConn { - return peerConn{ - outbound: outbound, - persistent: persistent, - conn: conn, - } -} - -// Return the IP from the connection RemoteAddr -func (pc peerConn) RemoteIP() net.IP { - if pc.ip == nil { - pc.ip = pc.conn.RemoteEndpoint().IP - } - return pc.ip -} - -// peer implements Peer. -// -// Before using a peer, you will need to perform a handshake on connection. -type peer struct { - service.BaseService - - // raw peerConn and the multiplex connection - peerConn - - // peer's node info and the channel it knows about - // channels = nodeInfo.Channels - // cached to avoid copying nodeInfo in hasChannel - nodeInfo types.NodeInfo - channels []byte - reactors map[byte]Reactor - onPeerError func(Peer, interface{}) - - // User data - Data *cmap.CMap - - metrics *Metrics - metricsTicker *time.Ticker -} - -type PeerOption func(*peer) - -func newPeer( - nodeInfo types.NodeInfo, - pc peerConn, - reactorsByCh map[byte]Reactor, - onPeerError func(Peer, interface{}), - options ...PeerOption, -) *peer { - p := &peer{ - peerConn: pc, - nodeInfo: nodeInfo, - channels: nodeInfo.Channels, - reactors: reactorsByCh, - onPeerError: onPeerError, - Data: cmap.NewCMap(), - metricsTicker: time.NewTicker(metricsTickerDuration), - metrics: NopMetrics(), - } - - p.BaseService = *service.NewBaseService(nil, "Peer", p) - for _, option := range options { - option(p) - } - - return p -} - -// onError calls the peer error callback. -func (p *peer) onError(err interface{}) { - p.onPeerError(p, err) -} - -// String representation. -func (p *peer) String() string { - if p.outbound { - return fmt.Sprintf("Peer{%v %v out}", p.conn, p.ID()) - } - - proTxHash := p.NodeInfo().GetProTxHash() - mConnString := fmt.Sprintf("%v", p.conn) - if mConnString == "MConn{pipe}" { - mConnString = "" - } else { - mConnString += " " - } - - if proTxHash != nil { - return fmt.Sprintf("Peer{%sproTxHash:%v}", mConnString, proTxHash.ShortString()) - } - return fmt.Sprintf("Peer{%speerId:%v}", mConnString, p.ID()) -} - -//--------------------------------------------------- -// Implements service.Service - -// SetLogger implements BaseService. -func (p *peer) SetLogger(l log.Logger) { - p.Logger = l -} - -// OnStart implements BaseService. -func (p *peer) OnStart() error { - if err := p.BaseService.OnStart(); err != nil { - return err - } - - go p.processMessages() - go p.metricsReporter() - - return nil -} - -// processMessages processes messages received from the connection. -func (p *peer) processMessages() { - defer func() { - if r := recover(); r != nil { - p.Logger.Error("peer message processing panic", "err", r, "stack", string(debug.Stack())) - p.onError(fmt.Errorf("panic during peer message processing: %v", r)) - } - }() - - for { - chID, msg, err := p.conn.ReceiveMessage() - if err != nil { - p.onError(err) - return - } - reactor, ok := p.reactors[byte(chID)] - if !ok { - p.onError(fmt.Errorf("unknown channel %v", chID)) - return - } - reactor.Receive(byte(chID), p, msg) - } -} - -// FlushStop mimics OnStop but additionally ensures that all successful -// .Send() calls will get flushed before closing the connection. -// NOTE: it is not safe to call this method more than once. -func (p *peer) FlushStop() { - p.metricsTicker.Stop() - p.BaseService.OnStop() - if err := p.conn.FlushClose(); err != nil { - p.Logger.Debug("error while stopping peer", "err", err) - } -} - -// OnStop implements BaseService. -func (p *peer) OnStop() { - p.metricsTicker.Stop() - p.BaseService.OnStop() - if err := p.conn.Close(); err != nil { - p.Logger.Debug("error while stopping peer", "err", err) - } -} - -//--------------------------------------------------- -// Implements Peer - -// ID returns the peer's ID - the hex encoded hash of its pubkey. -func (p *peer) ID() types.NodeID { - return p.nodeInfo.ID() -} - -// IsOutbound returns true if the connection is outbound, false otherwise. -func (p *peer) IsOutbound() bool { - return p.peerConn.outbound -} - -// IsPersistent returns true if the peer is persitent, false otherwise. -func (p *peer) IsPersistent() bool { - return p.peerConn.persistent -} - -// NodeInfo returns a copy of the peer's NodeInfo. -func (p *peer) NodeInfo() types.NodeInfo { - return p.nodeInfo -} - -// SocketAddr returns the address of the socket. -// For outbound peers, it's the address dialed (after DNS resolution). -// For inbound peers, it's the address returned by the underlying connection -// (not what's reported in the peer's NodeInfo). -func (p *peer) SocketAddr() *NetAddress { - endpoint := p.peerConn.conn.RemoteEndpoint() - return &NetAddress{ - ID: p.ID(), - IP: endpoint.IP, - Port: endpoint.Port, - } -} - -// Status returns the peer's ConnectionStatus. -func (p *peer) Status() tmconn.ConnectionStatus { - return p.conn.Status() -} - -// Send msg bytes to the channel identified by chID byte. Returns false if the -// send queue is full after timeout, specified by MConnection. -func (p *peer) Send(chID byte, msgBytes []byte) bool { - if !p.IsRunning() { - // see Switch#Broadcast, where we fetch the list of peers and loop over - // them - while we're looping, one peer may be removed and stopped. - return false - } else if !p.hasChannel(chID) { - return false - } - res, err := p.conn.SendMessage(ChannelID(chID), msgBytes) - if err == io.EOF { - return false - } else if err != nil { - p.onError(err) - return false - } - if res { - labels := []string{ - "peer_id", string(p.ID()), - "chID", fmt.Sprintf("%#x", chID), - "message_type", "bytes", - } - p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) - } - return res -} - -// TrySend msg bytes to the channel identified by chID byte. Immediately returns -// false if the send queue is full. -func (p *peer) TrySend(chID byte, msgBytes []byte) bool { - if !p.IsRunning() { - return false - } else if !p.hasChannel(chID) { - return false - } - res, err := p.conn.TrySendMessage(ChannelID(chID), msgBytes) - if err == io.EOF { - return false - } else if err != nil { - p.onError(err) - return false - } - if res { - labels := []string{ - "peer_id", string(p.ID()), - "chID", fmt.Sprintf("%#x", chID), - } - p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) - } - return res -} - -// Get the data for a given key. -func (p *peer) Get(key string) interface{} { - return p.Data.Get(key) -} - -// Set sets the data for the given key. -func (p *peer) Set(key string, data interface{}) { - p.Data.Set(key, data) -} - -// hasChannel returns true if the peer reported -// knowing about the given chID. -func (p *peer) hasChannel(chID byte) bool { - for _, ch := range p.channels { - if ch == chID { - return true - } - } - // NOTE: probably will want to remove this - // but could be helpful while the feature is new - p.Logger.Debug( - "Unknown channel for peer", - "channel", - chID, - "channels", - p.channels, - ) - return false -} - -// CloseConn closes original connection. Used for cleaning up in cases where the peer had not been started at all. -func (p *peer) CloseConn() error { - return p.peerConn.conn.Close() -} - -//--------------------------------------------------- -// methods only used for testing -// TODO: can we remove these? - -// CloseConn closes the underlying connection -func (pc *peerConn) CloseConn() { - pc.conn.Close() -} - -// RemoteAddr returns peer's remote network address. -func (p *peer) RemoteAddr() net.Addr { - endpoint := p.conn.RemoteEndpoint() - return &net.TCPAddr{ - IP: endpoint.IP, - Port: int(endpoint.Port), - } -} - -//--------------------------------------------------- - -func PeerMetrics(metrics *Metrics) PeerOption { - return func(p *peer) { - p.metrics = metrics - } -} - -func (p *peer) metricsReporter() { - for { - select { - case <-p.metricsTicker.C: - status := p.conn.Status() - var sendQueueSize float64 - for _, chStatus := range status.Channels { - sendQueueSize += float64(chStatus.SendQueueSize) - } - - p.metrics.PeerPendingSendBytes.With("peer_id", string(p.ID())).Set(sendQueueSize) - case <-p.Quit(): - return - } - } -} diff --git a/internal/p2p/peer_set.go b/internal/p2p/peer_set.go deleted file mode 100644 index 8d4ad4939e..0000000000 --- a/internal/p2p/peer_set.go +++ /dev/null @@ -1,149 +0,0 @@ -package p2p - -import ( - "net" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/types" -) - -// IPeerSet has a (immutable) subset of the methods of PeerSet. -type IPeerSet interface { - Has(key types.NodeID) bool - HasIP(ip net.IP) bool - Get(key types.NodeID) Peer - List() []Peer - Size() int -} - -//----------------------------------------------------------------------------- - -// PeerSet is a special structure for keeping a table of peers. -// Iteration over the peers is super fast and thread-safe. -type PeerSet struct { - mtx tmsync.Mutex - lookup map[types.NodeID]*peerSetItem - list []Peer -} - -type peerSetItem struct { - peer Peer - index int -} - -// NewPeerSet creates a new peerSet with a list of initial capacity of 256 items. -func NewPeerSet() *PeerSet { - return &PeerSet{ - lookup: make(map[types.NodeID]*peerSetItem), - list: make([]Peer, 0, 256), - } -} - -// Add adds the peer to the PeerSet. -// It returns an error carrying the reason, if the peer is already present. -func (ps *PeerSet) Add(peer Peer) error { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - if ps.lookup[peer.ID()] != nil { - return ErrSwitchDuplicatePeerID{peer.ID()} - } - - index := len(ps.list) - // Appending is safe even with other goroutines - // iterating over the ps.list slice. - ps.list = append(ps.list, peer) - ps.lookup[peer.ID()] = &peerSetItem{peer, index} - return nil -} - -// Has returns true if the set contains the peer referred to by this -// peerKey, otherwise false. -func (ps *PeerSet) Has(peerKey types.NodeID) bool { - ps.mtx.Lock() - _, ok := ps.lookup[peerKey] - ps.mtx.Unlock() - return ok -} - -// HasIP returns true if the set contains the peer referred to by this IP -// address, otherwise false. -func (ps *PeerSet) HasIP(peerIP net.IP) bool { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - return ps.hasIP(peerIP) -} - -// hasIP does not acquire a lock so it can be used in public methods which -// already lock. -func (ps *PeerSet) hasIP(peerIP net.IP) bool { - for _, item := range ps.lookup { - if item.peer.RemoteIP().Equal(peerIP) { - return true - } - } - - return false -} - -// Get looks up a peer by the provided peerKey. Returns nil if peer is not -// found. -func (ps *PeerSet) Get(peerKey types.NodeID) Peer { - ps.mtx.Lock() - defer ps.mtx.Unlock() - item, ok := ps.lookup[peerKey] - if ok { - return item.peer - } - return nil -} - -// Remove discards peer by its Key, if the peer was previously memoized. -// Returns true if the peer was removed, and false if it was not found. -// in the set. -func (ps *PeerSet) Remove(peer Peer) bool { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - item := ps.lookup[peer.ID()] - if item == nil { - return false - } - - index := item.index - // Create a new copy of the list but with one less item. - // (we must copy because we'll be mutating the list). - newList := make([]Peer, len(ps.list)-1) - copy(newList, ps.list) - // If it's the last peer, that's an easy special case. - if index == len(ps.list)-1 { - ps.list = newList - delete(ps.lookup, peer.ID()) - return true - } - - // Replace the popped item with the last item in the old list. - lastPeer := ps.list[len(ps.list)-1] - lastPeerKey := lastPeer.ID() - lastPeerItem := ps.lookup[lastPeerKey] - newList[index] = lastPeer - lastPeerItem.index = index - ps.list = newList - delete(ps.lookup, peer.ID()) - return true -} - -// Size returns the number of unique items in the peerSet. -func (ps *PeerSet) Size() int { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return len(ps.list) -} - -// List returns the threadsafe list of peers. -func (ps *PeerSet) List() []Peer { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return ps.list -} diff --git a/internal/p2p/peer_set_test.go b/internal/p2p/peer_set_test.go deleted file mode 100644 index 59ff2d6e66..0000000000 --- a/internal/p2p/peer_set_test.go +++ /dev/null @@ -1,189 +0,0 @@ -package p2p - -import ( - "net" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -// mockPeer for testing the PeerSet -type mockPeer struct { - service.BaseService - ip net.IP - id types.NodeID -} - -func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error -func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true } -func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true } -func (mp *mockPeer) NodeInfo() types.NodeInfo { return types.NodeInfo{} } -func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } -func (mp *mockPeer) ID() types.NodeID { return mp.id } -func (mp *mockPeer) IsOutbound() bool { return false } -func (mp *mockPeer) IsPersistent() bool { return true } -func (mp *mockPeer) Get(s string) interface{} { return s } -func (mp *mockPeer) Set(string, interface{}) {} -func (mp *mockPeer) RemoteIP() net.IP { return mp.ip } -func (mp *mockPeer) SocketAddr() *NetAddress { return nil } -func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } -func (mp *mockPeer) CloseConn() error { return nil } - -// Returns a mock peer -func newMockPeer(ip net.IP) *mockPeer { - if ip == nil { - ip = net.IP{127, 0, 0, 1} - } - nodeKey := types.GenNodeKey() - return &mockPeer{ - ip: ip, - id: nodeKey.ID, - } -} - -func TestPeerSetAddRemoveOne(t *testing.T) { - t.Parallel() - - peerSet := NewPeerSet() - - var peerList []Peer - for i := 0; i < 5; i++ { - p := newMockPeer(net.IP{127, 0, 0, byte(i)}) - if err := peerSet.Add(p); err != nil { - t.Error(err) - } - peerList = append(peerList, p) - } - - n := len(peerList) - // 1. Test removing from the front - for i, peerAtFront := range peerList { - removed := peerSet.Remove(peerAtFront) - assert.True(t, removed) - wantSize := n - i - 1 - for j := 0; j < 2; j++ { - assert.Equal(t, false, peerSet.Has(peerAtFront.ID()), "#%d Run #%d: failed to remove peer", i, j) - assert.Equal(t, wantSize, peerSet.Size(), "#%d Run #%d: failed to remove peer and decrement size", i, j) - // Test the route of removing the now non-existent element - removed := peerSet.Remove(peerAtFront) - assert.False(t, removed) - } - } - - // 2. Next we are testing removing the peer at the end - // a) Replenish the peerSet - for _, peer := range peerList { - if err := peerSet.Add(peer); err != nil { - t.Error(err) - } - } - - // b) In reverse, remove each element - for i := n - 1; i >= 0; i-- { - peerAtEnd := peerList[i] - removed := peerSet.Remove(peerAtEnd) - assert.True(t, removed) - assert.Equal(t, false, peerSet.Has(peerAtEnd.ID()), "#%d: failed to remove item at end", i) - assert.Equal(t, i, peerSet.Size(), "#%d: differing sizes after peerSet.Remove(atEndPeer)", i) - } -} - -func TestPeerSetAddRemoveMany(t *testing.T) { - t.Parallel() - peerSet := NewPeerSet() - - peers := []Peer{} - N := 100 - for i := 0; i < N; i++ { - peer := newMockPeer(net.IP{127, 0, 0, byte(i)}) - if err := peerSet.Add(peer); err != nil { - t.Errorf("failed to add new peer") - } - if peerSet.Size() != i+1 { - t.Errorf("failed to add new peer and increment size") - } - peers = append(peers, peer) - } - - for i, peer := range peers { - removed := peerSet.Remove(peer) - assert.True(t, removed) - if peerSet.Has(peer.ID()) { - t.Errorf("failed to remove peer") - } - if peerSet.Size() != len(peers)-i-1 { - t.Errorf("failed to remove peer and decrement size") - } - } -} - -func TestPeerSetAddDuplicate(t *testing.T) { - t.Parallel() - peerSet := NewPeerSet() - peer := newMockPeer(nil) - - n := 20 - errsChan := make(chan error) - // Add the same asynchronously to test the - // concurrent guarantees of our APIs, and - // our expectation in the end is that only - // one addition succeeded, but the rest are - // instances of ErrSwitchDuplicatePeer. - for i := 0; i < n; i++ { - go func() { - errsChan <- peerSet.Add(peer) - }() - } - - // Now collect and tally the results - errsTally := make(map[string]int) - for i := 0; i < n; i++ { - err := <-errsChan - - switch err.(type) { - case ErrSwitchDuplicatePeerID: - errsTally["duplicateID"]++ - default: - errsTally["other"]++ - } - } - - // Our next procedure is to ensure that only one addition - // succeeded and that the rest are each ErrSwitchDuplicatePeer. - wantErrCount, gotErrCount := n-1, errsTally["duplicateID"] - assert.Equal(t, wantErrCount, gotErrCount, "invalid ErrSwitchDuplicatePeer count") - - wantNilErrCount, gotNilErrCount := 1, errsTally["other"] - assert.Equal(t, wantNilErrCount, gotNilErrCount, "invalid nil errCount") -} - -func TestPeerSetGet(t *testing.T) { - t.Parallel() - - var ( - peerSet = NewPeerSet() - peer = newMockPeer(nil) - ) - - assert.Nil(t, peerSet.Get(peer.ID()), "expecting a nil lookup, before .Add") - - if err := peerSet.Add(peer); err != nil { - t.Fatalf("Failed to add new peer: %v", err) - } - - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - // Add them asynchronously to test the - // concurrent guarantees of our APIs. - wg.Add(1) - go func(i int) { - defer wg.Done() - have, want := peerSet.Get(peer.ID()), peer - assert.Equal(t, have, want, "%d: have %v, want %v", i, have, want) - }(i) - } - wg.Wait() -} diff --git a/internal/p2p/peer_test.go b/internal/p2p/peer_test.go deleted file mode 100644 index b9f42fe84b..0000000000 --- a/internal/p2p/peer_test.go +++ /dev/null @@ -1,239 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - golog "log" - "net" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/config" - tmconn "github.com/tendermint/tendermint/internal/p2p/conn" -) - -func TestPeerBasic(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - t.Cleanup(rp.Stop) - - p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), cfg, tmconn.DefaultMConnConfig()) - require.Nil(err) - - err = p.Start() - require.Nil(err) - t.Cleanup(func() { - if err := p.Stop(); err != nil { - t.Error(err) - } - }) - - assert.True(p.IsRunning()) - assert.True(p.IsOutbound()) - assert.False(p.IsPersistent()) - p.persistent = true - assert.True(p.IsPersistent()) - assert.Equal(rp.Addr().DialString(), p.RemoteAddr().String()) - assert.Equal(rp.ID(), p.ID()) -} - -func TestPeerSend(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - config := cfg - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: config} - rp.Start() - t.Cleanup(rp.Stop) - - p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config, tmconn.DefaultMConnConfig()) - require.Nil(err) - - err = p.Start() - require.Nil(err) - - t.Cleanup(func() { - if err := p.Stop(); err != nil { - t.Error(err) - } - }) - - assert.True(p.Send(testCh, []byte("Asylum"))) -} - -func createOutboundPeerAndPerformHandshake( - addr *NetAddress, - config *config.P2PConfig, - mConfig tmconn.MConnConfig, -) (*peer, error) { - chDescs := []*tmconn.ChannelDescriptor{ - {ID: testCh, Priority: 1}, - } - pk := ed25519.GenPrivKey() - ourNodeInfo := testNodeInfo(types.NodeIDFromPubKey(pk.PubKey()), "host_peer", nil) - transport := NewMConnTransport(log.TestingLogger(), mConfig, chDescs, MConnTransportOptions{}) - reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)} - pc, err := testOutboundPeerConn(transport, addr, config, false, pk) - if err != nil { - return nil, err - } - peerInfo, _, err := pc.conn.Handshake(context.Background(), ourNodeInfo, pk) - if err != nil { - return nil, err - } - - p := newPeer(peerInfo, pc, reactorsByCh, func(p Peer, r interface{}) {}) - p.SetLogger(log.TestingLogger().With("peer", addr)) - return p, nil -} - -func testDial(addr *NetAddress, cfg *config.P2PConfig) (net.Conn, error) { - if cfg.TestDialFail { - return nil, fmt.Errorf("dial err (peerConfig.DialFail == true)") - } - - conn, err := addr.DialTimeout(cfg.DialTimeout) - if err != nil { - return nil, err - } - return conn, nil -} - -func testOutboundPeerConn( - transport *MConnTransport, - addr *NetAddress, - config *config.P2PConfig, - persistent bool, - ourNodePrivKey crypto.PrivKey, -) (peerConn, error) { - - var pc peerConn - conn, err := testDial(addr, config) - if err != nil { - return pc, fmt.Errorf("error creating peer: %w", err) - } - - pc, err = testPeerConn(transport, conn, true, persistent) - if err != nil { - if cerr := conn.Close(); cerr != nil { - return pc, fmt.Errorf("%v: %w", cerr.Error(), err) - } - return pc, err - } - - return pc, nil -} - -type remotePeer struct { - PrivKey crypto.PrivKey - Config *config.P2PConfig - Network string - addr *NetAddress - channels bytes.HexBytes - listenAddr string - listener net.Listener -} - -func (rp *remotePeer) Addr() *NetAddress { - return rp.addr -} - -func (rp *remotePeer) ID() types.NodeID { - return types.NodeIDFromPubKey(rp.PrivKey.PubKey()) -} - -func (rp *remotePeer) Start() { - if rp.listenAddr == "" { - rp.listenAddr = "127.0.0.1:0" - } - - l, e := net.Listen("tcp", rp.listenAddr) // any available address - if e != nil { - golog.Fatalf("net.Listen tcp :0: %+v", e) - } - rp.listener = l - rp.addr = types.NewNetAddress(types.NodeIDFromPubKey(rp.PrivKey.PubKey()), l.Addr()) - if rp.channels == nil { - rp.channels = []byte{testCh} - } - go rp.accept() -} - -func (rp *remotePeer) Stop() { - rp.listener.Close() -} - -func (rp *remotePeer) Dial(addr *NetAddress) (net.Conn, error) { - transport := NewMConnTransport(log.TestingLogger(), MConnConfig(rp.Config), - []*ChannelDescriptor{}, MConnTransportOptions{}) - conn, err := addr.DialTimeout(1 * time.Second) - if err != nil { - return nil, err - } - pc, err := testInboundPeerConn(transport, conn) - if err != nil { - return nil, err - } - _, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey) - if err != nil { - return nil, err - } - return conn, err -} - -func (rp *remotePeer) accept() { - transport := NewMConnTransport(log.TestingLogger(), MConnConfig(rp.Config), - []*ChannelDescriptor{}, MConnTransportOptions{}) - conns := []net.Conn{} - - for { - conn, err := rp.listener.Accept() - if err != nil { - golog.Printf("Failed to accept conn: %+v", err) - for _, conn := range conns { - _ = conn.Close() - } - return - } - - pc, err := testInboundPeerConn(transport, conn) - if err != nil { - golog.Printf("Failed to create a peer: %+v", err) - } - _, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey) - if err != nil { - golog.Printf("Failed to handshake a peer: %+v", err) - } - - conns = append(conns, conn) - } -} - -func (rp *remotePeer) nodeInfo() types.NodeInfo { - ni := types.NodeInfo{ - ProtocolVersion: defaultProtocolVersion, - NodeID: rp.Addr().ID, - ListenAddr: rp.listener.Addr().String(), - Network: "testing", - Version: "1.2.3-rc0-deadbeef", - Channels: rp.channels, - Moniker: "remote_peer", - } - if rp.Network != "" { - ni.Network = rp.Network - } - return ni -} diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 19c3afc921..452e0df96c 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -42,7 +42,8 @@ const ( type PeerScore uint8 const ( - PeerScorePersistent PeerScore = math.MaxUint8 // persistent peers + PeerScorePersistent PeerScore = math.MaxUint8 // persistent peers + MaxPeerScoreNotPersistent PeerScore = PeerScorePersistent - 1 ) // PeerUpdate is a peer update event sent via PeerUpdates. @@ -54,9 +55,9 @@ type PeerUpdate struct { ProTxHash types.ProTxHash } -// SetProTxHash copies `protxhash` into `PeerUpdate.ProTxHash` -func (pu *PeerUpdate) SetProTxHash(protxhash types.ProTxHash) { - pu.ProTxHash = protxhash.Copy() +// SetProTxHash copies `proTxHash` into `PeerUpdate.ProTxHash` +func (pu *PeerUpdate) SetProTxHash(proTxHash types.ProTxHash) { + pu.ProTxHash = proTxHash.Copy() } // PeerUpdates is a peer update subscription with notifications about peer @@ -64,8 +65,6 @@ func (pu *PeerUpdate) SetProTxHash(protxhash types.ProTxHash) { type PeerUpdates struct { routerUpdatesCh chan PeerUpdate reactorUpdatesCh chan PeerUpdate - closeCh chan struct{} - closeOnce sync.Once } // NewPeerUpdates creates a new PeerUpdates subscription. It is primarily for @@ -75,7 +74,6 @@ func NewPeerUpdates(updatesCh chan PeerUpdate, buf int) *PeerUpdates { return &PeerUpdates{ reactorUpdatesCh: updatesCh, routerUpdatesCh: make(chan PeerUpdate, buf), - closeCh: make(chan struct{}), } } @@ -86,28 +84,13 @@ func (pu *PeerUpdates) Updates() <-chan PeerUpdate { // SendUpdate pushes information about a peer into the routing layer, // presumably from a peer. -func (pu *PeerUpdates) SendUpdate(update PeerUpdate) { +func (pu *PeerUpdates) SendUpdate(ctx context.Context, update PeerUpdate) { select { - case <-pu.closeCh: + case <-ctx.Done(): case pu.routerUpdatesCh <- update: } } -// Close closes the peer updates subscription. -func (pu *PeerUpdates) Close() { - pu.closeOnce.Do(func() { - // NOTE: We don't close updatesCh since multiple goroutines may be - // sending on it. The PeerManager senders will select on closeCh as well - // to avoid blocking on a closed subscription. - close(pu.closeCh) - }) -} - -// Done returns a channel that is closed when the subscription is closed. -func (pu *PeerUpdates) Done() <-chan struct{} { - return pu.closeCh -} - // PeerManagerOptions specifies options for a PeerManager. type PeerManagerOptions struct { // PersistentPeers are peers that we want to maintain persistent connections @@ -192,8 +175,7 @@ func (o *PeerManagerOptions) Validate() error { if o.MaxPeers > 0 { if o.MaxConnected == 0 || o.MaxConnected+o.MaxConnectedUpgrade > o.MaxPeers { - return fmt.Errorf( - "MaxConnected %v and MaxConnectedUpgrade %v can't exceed MaxPeers %v", + return fmt.Errorf("MaxConnected %v and MaxConnectedUpgrade %v can't exceed MaxPeers %v", o.MaxConnected, o.MaxConnectedUpgrade, o.MaxPeers) } } @@ -213,8 +195,7 @@ func (o *PeerManagerOptions) Validate() error { return errors.New("can't set MaxRetryTimePersistent without MinRetryTime") } if o.MinRetryTime > o.MaxRetryTimePersistent { - return fmt.Errorf( - "MinRetryTime %v is greater than MaxRetryTimePersistent %v", + return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTimePersistent %v", o.MinRetryTime, o.MaxRetryTimePersistent) } } @@ -290,8 +271,6 @@ type PeerManager struct { rand *rand.Rand dialWaker *tmsync.Waker // wakes up DialNext() on relevant peer changes evictWaker *tmsync.Waker // wakes up EvictNext() on relevant peer changes - closeCh chan struct{} // signal channel for Close() - closeOnce sync.Once mtx sync.Mutex store *peerStore @@ -326,7 +305,6 @@ func NewPeerManager(selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptio rand: rand.New(rand.NewSource(time.Now().UnixNano())), // nolint:gosec dialWaker: tmsync.NewWaker(), evictWaker: tmsync.NewWaker(), - closeCh: make(chan struct{}), store: store, dialing: map[types.NodeID]bool{}, @@ -527,7 +505,7 @@ func (m *PeerManager) TryDialNext() (NodeAddress, error) { // for dialing again when appropriate (possibly after a retry timeout). // // FIXME: This should probably delete or mark bad addresses/peers after some time. -func (m *PeerManager) DialFailed(address NodeAddress) error { +func (m *PeerManager) DialFailed(ctx context.Context, address NodeAddress) error { m.mtx.Lock() defer m.mtx.Unlock() @@ -566,7 +544,7 @@ func (m *PeerManager) DialFailed(address NodeAddress) error { select { case <-timer.C: m.dialWaker.Wake() - case <-m.closeCh: + case <-ctx.Done(): } }() } else { @@ -718,7 +696,7 @@ func (m *PeerManager) Accepted(peerID types.NodeID, peerOpts ...func(*peerInfo)) // channels set here are passed in the peer update broadcast to // reactors, which can then mediate their own behavior based on the // capability of the peers. -func (m *PeerManager) Ready(peerID types.NodeID, channels ChannelIDSet) { +func (m *PeerManager) Ready(ctx context.Context, peerID types.NodeID, channels ChannelIDSet) { m.mtx.Lock() defer m.mtx.Unlock() @@ -733,7 +711,7 @@ func (m *PeerManager) Ready(peerID types.NodeID, channels ChannelIDSet) { if ok && len(peer.ProTxHash) > 0 { pu.SetProTxHash(peer.ProTxHash) } - m.broadcast(pu) + m.broadcast(ctx, pu) } } @@ -791,7 +769,7 @@ func (m *PeerManager) TryEvictNext() (types.NodeID, error) { // Disconnected unmarks a peer as connected, allowing it to be dialed or // accepted again as appropriate. -func (m *PeerManager) Disconnected(peerID types.NodeID) { +func (m *PeerManager) Disconnected(ctx context.Context, peerID types.NodeID) { m.mtx.Lock() defer m.mtx.Unlock() @@ -812,7 +790,7 @@ func (m *PeerManager) Disconnected(peerID types.NodeID) { if ok && len(peer.ProTxHash) > 0 { pu.SetProTxHash(peer.ProTxHash) } - m.broadcast(pu) + m.broadcast(ctx, pu) } m.dialWaker.Wake() @@ -873,10 +851,15 @@ func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress return addresses } +// PeerEventSubscriber describes the type of the subscription method, to assist +// in isolating reactors specific construction and lifecycle from the +// peer manager. +type PeerEventSubscriber func(context.Context) *PeerUpdates + // Subscribe subscribes to peer updates. The caller must consume the peer // updates in a timely fashion and close the subscription when done, otherwise // the PeerManager will halt. -func (m *PeerManager) Subscribe() *PeerUpdates { +func (m *PeerManager) Subscribe(ctx context.Context) *PeerUpdates { // FIXME: We use a size 1 buffer here. When we broadcast a peer update // we have to loop over all of the subscriptions, and we want to avoid // having to block and wait for a context switch before continuing on @@ -884,7 +867,7 @@ func (m *PeerManager) Subscribe() *PeerUpdates { // compounding. Limiting it to 1 means that the subscribers are still // reasonably in sync. However, this should probably be benchmarked. peerUpdates := NewPeerUpdates(make(chan PeerUpdate, 1), 1) - m.Register(peerUpdates) + m.Register(ctx, peerUpdates) return peerUpdates } @@ -896,39 +879,38 @@ func (m *PeerManager) Subscribe() *PeerUpdates { // The caller must consume the peer updates from this PeerUpdates // instance in a timely fashion and close the subscription when done, // otherwise the PeerManager will halt. -func (m *PeerManager) Register(peerUpdates *PeerUpdates) { +func (m *PeerManager) Register(ctx context.Context, peerUpdates *PeerUpdates) { m.mtx.Lock() + defer m.mtx.Unlock() m.subscriptions[peerUpdates] = peerUpdates - m.mtx.Unlock() go func() { for { select { - case <-peerUpdates.closeCh: - return - case <-m.closeCh: + case <-ctx.Done(): return case pu := <-peerUpdates.routerUpdatesCh: - m.processPeerEvent(pu) + m.processPeerEvent(ctx, pu) } } }() go func() { - select { - case <-peerUpdates.Done(): - m.mtx.Lock() - delete(m.subscriptions, peerUpdates) - m.mtx.Unlock() - case <-m.closeCh: - } + <-ctx.Done() + m.mtx.Lock() + defer m.mtx.Unlock() + delete(m.subscriptions, peerUpdates) }() } -func (m *PeerManager) processPeerEvent(pu PeerUpdate) { +func (m *PeerManager) processPeerEvent(ctx context.Context, pu PeerUpdate) { m.mtx.Lock() defer m.mtx.Unlock() + if ctx.Err() != nil { + return + } + if _, ok := m.store.peers[pu.NodeID]; !ok { m.store.peers[pu.NodeID] = &peerInfo{} } @@ -948,29 +930,19 @@ func (m *PeerManager) processPeerEvent(pu PeerUpdate) { // // FIXME: Consider using an internal channel to buffer updates while also // maintaining order if this is a problem. -func (m *PeerManager) broadcast(peerUpdate PeerUpdate) { +func (m *PeerManager) broadcast(ctx context.Context, peerUpdate PeerUpdate) { for _, sub := range m.subscriptions { - // We have to check closeCh separately first, otherwise there's a 50% - // chance the second select will send on a closed subscription. - select { - case <-sub.closeCh: - continue - default: + if ctx.Err() != nil { + return } select { + case <-ctx.Done(): + return case sub.reactorUpdatesCh <- peerUpdate: - case <-sub.closeCh: } } } -// Close closes the peer manager, releasing resources (i.e. goroutines). -func (m *PeerManager) Close() { - m.closeOnce.Do(func() { - close(m.closeCh) - }) -} - // Addresses returns all known addresses for a peer, primarily for testing. // The order is arbitrary. func (m *PeerManager) Addresses(peerID types.NodeID) []NodeAddress { @@ -1351,6 +1323,9 @@ func (p *peerInfo) Score() PeerScore { } score := p.MutableScore + if score > int64(MaxPeerScoreNotPersistent) { + score = int64(MaxPeerScoreNotPersistent) + } for _, addr := range p.AddressInfo { // DialFailures is reset when dials succeed, so this @@ -1362,10 +1337,6 @@ func (p *peerInfo) Score() PeerScore { return 0 } - if score >= math.MaxUint8 { - return PeerScore(math.MaxUint8) - } - return PeerScore(score) } diff --git a/internal/p2p/peermanager_scoring_test.go b/internal/p2p/peermanager_scoring_test.go index edb5fc6fc0..a45df0b728 100644 --- a/internal/p2p/peermanager_scoring_test.go +++ b/internal/p2p/peermanager_scoring_test.go @@ -1,6 +1,7 @@ package p2p import ( + "context" "strings" "testing" "time" @@ -21,7 +22,6 @@ func TestPeerScoring(t *testing.T) { db := dbm.NewMemDB() peerManager, err := NewPeerManager(selfID, db, PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() // create a fake node id := types.NodeID(strings.Repeat("a1", 20)) @@ -29,13 +29,16 @@ func TestPeerScoring(t *testing.T) { require.NoError(t, err) require.True(t, added) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Run("Synchronous", func(t *testing.T) { // update the manager and make sure it's correct require.EqualValues(t, 0, peerManager.Scores()[id]) // add a bunch of good status updates and watch things increase. for i := 1; i < 10; i++ { - peerManager.processPeerEvent(PeerUpdate{ + peerManager.processPeerEvent(ctx, PeerUpdate{ NodeID: id, Status: PeerStatusGood, }) @@ -44,7 +47,7 @@ func TestPeerScoring(t *testing.T) { // watch the corresponding decreases respond to update for i := 10; i == 0; i-- { - peerManager.processPeerEvent(PeerUpdate{ + peerManager.processPeerEvent(ctx, PeerUpdate{ NodeID: id, Status: PeerStatusBad, }) @@ -53,9 +56,8 @@ func TestPeerScoring(t *testing.T) { }) t.Run("AsynchronousIncrement", func(t *testing.T) { start := peerManager.Scores()[id] - pu := peerManager.Subscribe() - defer pu.Close() - pu.SendUpdate(PeerUpdate{ + pu := peerManager.Subscribe(ctx) + pu.SendUpdate(ctx, PeerUpdate{ NodeID: id, Status: PeerStatusGood, }) @@ -67,9 +69,8 @@ func TestPeerScoring(t *testing.T) { }) t.Run("AsynchronousDecrement", func(t *testing.T) { start := peerManager.Scores()[id] - pu := peerManager.Subscribe() - defer pu.Close() - pu.SendUpdate(PeerUpdate{ + pu := peerManager.Subscribe(ctx) + pu.SendUpdate(ctx, PeerUpdate{ NodeID: id, Status: PeerStatusBad, }) @@ -79,4 +80,20 @@ func TestPeerScoring(t *testing.T) { time.Millisecond, "startAt=%d score=%d", start, peerManager.Scores()[id]) }) + t.Run("TestNonPersistantPeerUpperBound", func(t *testing.T) { + start := int64(peerManager.Scores()[id] + 1) + + for i := start; i <= int64(PeerScorePersistent); i++ { + peerManager.processPeerEvent(ctx, PeerUpdate{ + NodeID: id, + Status: PeerStatusGood, + }) + + if i == int64(PeerScorePersistent) { + require.EqualValues(t, MaxPeerScoreNotPersistent, peerManager.Scores()[id]) + } else { + require.EqualValues(t, i, peerManager.Scores()[id]) + } + } + }) } diff --git a/internal/p2p/peermanager_test.go b/internal/p2p/peermanager_test.go index 2868c14e43..82d1e26932 100644 --- a/internal/p2p/peermanager_test.go +++ b/internal/p2p/peermanager_test.go @@ -156,7 +156,6 @@ func TestNewPeerManager_Persistence(t *testing.T) { PeerScores: map[types.NodeID]p2p.PeerScore{bID: 1}, }) require.NoError(t, err) - defer peerManager.Close() for _, addr := range append(append(aAddresses, bAddresses...), cAddresses...) { added, err := peerManager.Add(addr) @@ -173,8 +172,6 @@ func TestNewPeerManager_Persistence(t *testing.T) { cID: 0, }, peerManager.Scores()) - peerManager.Close() - // Creating a new peer manager with the same database should retain the // peers, but they should have updated scores from the new PersistentPeers // configuration. @@ -183,7 +180,6 @@ func TestNewPeerManager_Persistence(t *testing.T) { PeerScores: map[types.NodeID]p2p.PeerScore{cID: 1}, }) require.NoError(t, err) - defer peerManager.Close() require.ElementsMatch(t, aAddresses, peerManager.Addresses(aID)) require.ElementsMatch(t, bAddresses, peerManager.Addresses(bID)) @@ -210,7 +206,6 @@ func TestNewPeerManager_SelfIDChange(t *testing.T) { require.NoError(t, err) require.True(t, added) require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) - peerManager.Close() // If we change our selfID to one of the peers in the peer store, it // should be removed from the store. @@ -275,6 +270,9 @@ func TestPeerManager_Add(t *testing.T) { } func TestPeerManager_DialNext(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -298,6 +296,9 @@ func TestPeerManager_DialNext(t *testing.T) { } func TestPeerManager_DialNext_Retry(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} options := p2p.PeerManagerOptions{ @@ -313,7 +314,7 @@ func TestPeerManager_DialNext_Retry(t *testing.T) { // Do five dial retries (six dials total). The retry time should double for // each failure. At the forth retry, MaxRetryTime should kick in. - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + ctx, cancel = context.WithTimeout(ctx, 5*time.Second) defer cancel() for i := 0; i <= 6; i++ { @@ -338,12 +339,14 @@ func TestPeerManager_DialNext_Retry(t *testing.T) { default: t.Fatal("unexpected retry") } - - require.NoError(t, peerManager.DialFailed(a)) + require.NoError(t, peerManager.DialFailed(ctx, a)) } } func TestPeerManager_DialNext_WakeOnAdd(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -358,7 +361,7 @@ func TestPeerManager_DialNext_WakeOnAdd(t *testing.T) { }() // This will block until peer is added above. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() dial, err := peerManager.DialNext(ctx) require.NoError(t, err) @@ -366,6 +369,9 @@ func TestPeerManager_DialNext_WakeOnAdd(t *testing.T) { } func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 1, }) @@ -391,20 +397,26 @@ func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) { require.Zero(t, dial) // Spawn a goroutine to fail a's dial attempt. + sig := make(chan struct{}) go func() { + defer close(sig) time.Sleep(200 * time.Millisecond) - require.NoError(t, peerManager.DialFailed(a)) + require.NoError(t, peerManager.DialFailed(ctx, a)) }() // This should make b available for dialing (not a, retries are disabled). - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) - defer cancel() - dial, err = peerManager.DialNext(ctx) + opctx, opcancel := context.WithTimeout(ctx, 3*time.Second) + defer opcancel() + dial, err = peerManager.DialNext(opctx) require.NoError(t, err) require.Equal(t, b, dial) + <-sig } func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + options := p2p.PeerManagerOptions{MinRetryTime: 200 * time.Millisecond} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), options) require.NoError(t, err) @@ -418,12 +430,12 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) { dial, err := peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, a, dial) - require.NoError(t, peerManager.DialFailed(dial)) + require.NoError(t, peerManager.DialFailed(ctx, dial)) failed := time.Now() // The retry timer should unblock DialNext and make a available again after // the retry time passes. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() dial, err = peerManager.DialNext(ctx) require.NoError(t, err) @@ -432,6 +444,9 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) { } func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -447,12 +462,14 @@ func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) { require.NoError(t, err) require.Zero(t, dial) + dctx, dcancel := context.WithTimeout(ctx, 300*time.Millisecond) + defer dcancel() go func() { time.Sleep(200 * time.Millisecond) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(dctx, a.NodeID) }() - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() dial, err = peerManager.DialNext(ctx) require.NoError(t, err) @@ -496,6 +513,9 @@ func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) { } func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -561,7 +581,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { // Now, if we disconnect a, we should be allowed to dial d because we have a // free upgrade slot. - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) dial, err = peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, d, dial) @@ -570,7 +590,7 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { // However, if we disconnect b (such that only c and d are connected), we // should not be allowed to dial e even though there are upgrade slots, // because there are no lower-scored nodes that can be upgraded. - peerManager.Disconnected(b.NodeID) + peerManager.Disconnected(ctx, b.NodeID) added, err = peerManager.Add(e) require.NoError(t, err) require.True(t, added) @@ -664,6 +684,9 @@ func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) { } func TestPeerManager_TryDialNext_Multiple(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + aID := types.NodeID(strings.Repeat("a", 40)) bID := types.NodeID(strings.Repeat("b", 40)) addresses := []p2p.NodeAddress{ @@ -688,7 +711,7 @@ func TestPeerManager_TryDialNext_Multiple(t *testing.T) { address, err := peerManager.TryDialNext() require.NoError(t, err) require.NotZero(t, address) - require.NoError(t, peerManager.DialFailed(address)) + require.NoError(t, peerManager.DialFailed(ctx, address)) dial = append(dial, address) } require.ElementsMatch(t, dial, addresses) @@ -713,13 +736,16 @@ func TestPeerManager_DialFailed(t *testing.T) { require.NoError(t, err) require.True(t, added) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Dialing and then calling DialFailed with a different address (same // NodeID) should unmark as dialing and allow us to dial the other address // again, but not register the failed address. dial, err := peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, a, dial) - require.NoError(t, peerManager.DialFailed(p2p.NodeAddress{ + require.NoError(t, peerManager.DialFailed(ctx, p2p.NodeAddress{ Protocol: "tcp", NodeID: aID, Hostname: "localhost"})) require.Equal(t, []p2p.NodeAddress{a}, peerManager.Addresses(aID)) @@ -728,15 +754,18 @@ func TestPeerManager_DialFailed(t *testing.T) { require.Equal(t, a, dial) // Calling DialFailed on same address twice should be fine. - require.NoError(t, peerManager.DialFailed(a)) - require.NoError(t, peerManager.DialFailed(a)) + require.NoError(t, peerManager.DialFailed(ctx, a)) + require.NoError(t, peerManager.DialFailed(ctx, a)) // DialFailed on an unknown peer shouldn't error or add it. - require.NoError(t, peerManager.DialFailed(b)) + require.NoError(t, peerManager.DialFailed(ctx, b)) require.Equal(t, []types.NodeID{aID}, peerManager.Peers()) } func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -776,7 +805,7 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) { require.Empty(t, dial) // Failing b's dial will now make c available for dialing. - require.NoError(t, peerManager.DialFailed(b)) + require.NoError(t, peerManager.DialFailed(ctx, b)) dial, err = peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, c, dial) @@ -943,6 +972,9 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) { } func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -982,7 +1014,7 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) { // In the meanwhile, a disconnects and d connects. d is even lower-scored // than b (1 vs 2), which is currently being upgraded. - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) added, err = peerManager.Add(d) require.NoError(t, err) require.True(t, added) @@ -997,6 +1029,9 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) { } func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -1032,7 +1067,7 @@ func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) { require.Equal(t, c, dial) // In the meanwhile, b disconnects. - peerManager.Disconnected(b.NodeID) + peerManager.Disconnected(ctx, b.NodeID) // Once c completes the upgrade of b, there is no longer a need to // evict anything since we're at capacity. @@ -1165,6 +1200,9 @@ func TestPeerManager_Accepted_MaxConnectedUpgrade(t *testing.T) { } func TestPeerManager_Accepted_Upgrade(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -1201,7 +1239,7 @@ func TestPeerManager_Accepted_Upgrade(t *testing.T) { evict, err := peerManager.TryEvictNext() require.NoError(t, err) require.Equal(t, a.NodeID, evict) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) // c still cannot get accepted, since it's not scored above b. require.Error(t, peerManager.Accepted(c.NodeID)) @@ -1258,11 +1296,13 @@ func TestPeerManager_Ready(t *testing.T) { a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - sub := peerManager.Subscribe() - defer sub.Close() + sub := peerManager.Subscribe(ctx) // Connecting to a should still have it as status down. added, err := peerManager.Add(a) @@ -1272,7 +1312,7 @@ func TestPeerManager_Ready(t *testing.T) { require.Equal(t, p2p.PeerStatusDown, peerManager.Status(a.NodeID)) // Marking a as ready should transition it to PeerStatusUp and send an update. - peerManager.Ready(a.NodeID, nil) + peerManager.Ready(ctx, a.NodeID, nil) require.Equal(t, p2p.PeerStatusUp, peerManager.Status(a.NodeID)) require.Equal(t, p2p.PeerUpdate{ NodeID: a.NodeID, @@ -1284,16 +1324,19 @@ func TestPeerManager_Ready(t *testing.T) { require.NoError(t, err) require.True(t, added) require.Equal(t, p2p.PeerStatusDown, peerManager.Status(b.NodeID)) - peerManager.Ready(b.NodeID, nil) + peerManager.Ready(ctx, b.NodeID, nil) require.Equal(t, p2p.PeerStatusDown, peerManager.Status(b.NodeID)) require.Empty(t, sub.Updates()) } func TestPeerManager_Ready_Channels(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + pm, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - sub := pm.Subscribe() + sub := pm.Subscribe(ctx) a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} added, err := pm.Add(a) @@ -1301,7 +1344,7 @@ func TestPeerManager_Ready_Channels(t *testing.T) { require.True(t, added) require.NoError(t, pm.Accepted(a.NodeID)) - pm.Ready(a.NodeID, p2p.ChannelIDSet{42: struct{}{}}) + pm.Ready(ctx, a.NodeID, p2p.ChannelIDSet{42: struct{}{}}) require.NotEmpty(t, sub.Updates()) update := <-sub.Updates() assert.Equal(t, a.NodeID, update.NodeID) @@ -1311,6 +1354,9 @@ func TestPeerManager_Ready_Channels(t *testing.T) { // See TryEvictNext for most tests, this just tests blocking behavior. func TestPeerManager_EvictNext(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -1320,7 +1366,7 @@ func TestPeerManager_EvictNext(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID, nil) + peerManager.Ready(ctx, a.NodeID, nil) // Since there are no peers to evict, EvictNext should block until timeout. timeoutCtx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) @@ -1344,6 +1390,9 @@ func TestPeerManager_EvictNext(t *testing.T) { } func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -1353,7 +1402,7 @@ func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID, nil) + peerManager.Ready(ctx, a.NodeID, nil) // Spawn a goroutine to error a peer after a delay. go func() { @@ -1362,7 +1411,7 @@ func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { }() // This will block until peer errors above. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() evict, err := peerManager.EvictNext(ctx) require.NoError(t, err) @@ -1370,6 +1419,9 @@ func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { } func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} @@ -1385,7 +1437,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID, nil) + peerManager.Ready(ctx, a.NodeID, nil) // Spawn a goroutine to upgrade to b with a delay. go func() { @@ -1400,7 +1452,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { }() // This will block until peer is upgraded above. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() evict, err := peerManager.EvictNext(ctx) require.NoError(t, err) @@ -1408,6 +1460,9 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { } func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} @@ -1423,7 +1478,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID, nil) + peerManager.Ready(ctx, a.NodeID, nil) // Spawn a goroutine to upgrade b with a delay. go func() { @@ -1432,13 +1487,16 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) { }() // This will block until peer is upgraded above. - ctx, cancel := context.WithTimeout(ctx, 3*time.Second) + ctx, cancel = context.WithTimeout(ctx, 3*time.Second) defer cancel() evict, err := peerManager.EvictNext(ctx) require.NoError(t, err) require.Equal(t, a.NodeID, evict) } func TestPeerManager_TryEvictNext(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -1455,7 +1513,7 @@ func TestPeerManager_TryEvictNext(t *testing.T) { // Connecting to a won't evict anything either. require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID, nil) + peerManager.Ready(ctx, a.NodeID, nil) // But if a errors it should be evicted. peerManager.Errored(a.NodeID, errors.New("foo")) @@ -1480,11 +1538,13 @@ func TestPeerManager_Disconnected(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - sub := peerManager.Subscribe() - defer sub.Close() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sub := peerManager.Subscribe(ctx) // Disconnecting an unknown peer does nothing. - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) require.Empty(t, peerManager.Peers()) require.Empty(t, sub.Updates()) @@ -1493,14 +1553,14 @@ func TestPeerManager_Disconnected(t *testing.T) { require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) require.Empty(t, sub.Updates()) // Disconnecting a ready peer sends a status update. _, err = peerManager.Add(a) require.NoError(t, err) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID, nil) + peerManager.Ready(ctx, a.NodeID, nil) require.Equal(t, p2p.PeerStatusUp, peerManager.Status(a.NodeID)) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{ @@ -1508,7 +1568,7 @@ func TestPeerManager_Disconnected(t *testing.T) { Status: p2p.PeerStatusUp, }, <-sub.Updates()) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) require.Equal(t, p2p.PeerStatusDown, peerManager.Status(a.NodeID)) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{ @@ -1522,13 +1582,16 @@ func TestPeerManager_Disconnected(t *testing.T) { require.NoError(t, err) require.Equal(t, a, dial) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) dial, err = peerManager.TryDialNext() require.NoError(t, err) require.Zero(t, dial) } func TestPeerManager_Errored(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -1552,7 +1615,7 @@ func TestPeerManager_Errored(t *testing.T) { require.Zero(t, evict) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID, nil) + peerManager.Ready(ctx, a.NodeID, nil) evict, err = peerManager.TryEvictNext() require.NoError(t, err) require.Zero(t, evict) @@ -1565,14 +1628,16 @@ func TestPeerManager_Errored(t *testing.T) { } func TestPeerManager_Subscribe(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) // This tests all subscription events for full peer lifecycles. - sub := peerManager.Subscribe() - defer sub.Close() + sub := peerManager.Subscribe(ctx) added, err := peerManager.Add(a) require.NoError(t, err) @@ -1583,11 +1648,11 @@ func TestPeerManager_Subscribe(t *testing.T) { require.NoError(t, peerManager.Accepted(a.NodeID)) require.Empty(t, sub.Updates()) - peerManager.Ready(a.NodeID, nil) + peerManager.Ready(ctx, a.NodeID, nil) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusUp}, <-sub.Updates()) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown}, <-sub.Updates()) @@ -1600,7 +1665,7 @@ func TestPeerManager_Subscribe(t *testing.T) { require.NoError(t, peerManager.Dialed(a)) require.Empty(t, sub.Updates()) - peerManager.Ready(a.NodeID, nil) + peerManager.Ready(ctx, a.NodeID, nil) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusUp}, <-sub.Updates()) @@ -1611,7 +1676,7 @@ func TestPeerManager_Subscribe(t *testing.T) { require.NoError(t, err) require.Equal(t, a.NodeID, evict) - peerManager.Disconnected(a.NodeID) + peerManager.Disconnected(ctx, a.NodeID) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown}, <-sub.Updates()) @@ -1621,18 +1686,20 @@ func TestPeerManager_Subscribe(t *testing.T) { require.Equal(t, a, dial) require.Empty(t, sub.Updates()) - require.NoError(t, peerManager.DialFailed(a)) + require.NoError(t, peerManager.DialFailed(ctx, a)) require.Empty(t, sub.Updates()) } func TestPeerManager_Subscribe_Close(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - sub := peerManager.Subscribe() - defer sub.Close() + sub := peerManager.Subscribe(ctx) added, err := peerManager.Add(a) require.NoError(t, err) @@ -1640,17 +1707,20 @@ func TestPeerManager_Subscribe_Close(t *testing.T) { require.NoError(t, peerManager.Accepted(a.NodeID)) require.Empty(t, sub.Updates()) - peerManager.Ready(a.NodeID, nil) + peerManager.Ready(ctx, a.NodeID, nil) require.NotEmpty(t, sub.Updates()) require.Equal(t, p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusUp}, <-sub.Updates()) // Closing the subscription should not send us the disconnected update. - sub.Close() - peerManager.Disconnected(a.NodeID) + cancel() + peerManager.Disconnected(ctx, a.NodeID) require.Empty(t, sub.Updates()) } func TestPeerManager_Subscribe_Broadcast(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Cleanup(leaktest.Check(t)) a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} @@ -1658,19 +1728,19 @@ func TestPeerManager_Subscribe_Broadcast(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - s1 := peerManager.Subscribe() - defer s1.Close() - s2 := peerManager.Subscribe() - defer s2.Close() - s3 := peerManager.Subscribe() - defer s3.Close() + s2ctx, s2cancel := context.WithCancel(ctx) + defer s2cancel() + + s1 := peerManager.Subscribe(ctx) + s2 := peerManager.Subscribe(s2ctx) + s3 := peerManager.Subscribe(ctx) // Connecting to a peer should send updates on all subscriptions. added, err := peerManager.Add(a) require.NoError(t, err) require.True(t, added) require.NoError(t, peerManager.Accepted(a.NodeID)) - peerManager.Ready(a.NodeID, nil) + peerManager.Ready(ctx, a.NodeID, nil) expectUp := p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusUp} require.NotEmpty(t, s1) @@ -1682,8 +1752,9 @@ func TestPeerManager_Subscribe_Broadcast(t *testing.T) { // We now close s2. Disconnecting the peer should only send updates // on s1 and s3. - s2.Close() - peerManager.Disconnected(a.NodeID) + s2cancel() + time.Sleep(250 * time.Millisecond) // give the thread a chance to exit + peerManager.Disconnected(ctx, a.NodeID) expectDown := p2p.PeerUpdate{NodeID: a.NodeID, Status: p2p.PeerStatusDown} require.NotEmpty(t, s1) @@ -1697,6 +1768,9 @@ func TestPeerManager_Close(t *testing.T) { // leaktest will check that spawned goroutines are closed. t.Cleanup(leaktest.CheckTimeout(t, 1*time.Second)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ @@ -1706,7 +1780,7 @@ func TestPeerManager_Close(t *testing.T) { // This subscription isn't closed, but PeerManager.Close() // should reap the spawned goroutine. - _ = peerManager.Subscribe() + _ = peerManager.Subscribe(ctx) // This dial failure will start a retry timer for 10 seconds, which // should be reaped. @@ -1716,10 +1790,7 @@ func TestPeerManager_Close(t *testing.T) { dial, err := peerManager.TryDialNext() require.NoError(t, err) require.Equal(t, a, dial) - require.NoError(t, peerManager.DialFailed(a)) - - // This should clean up the goroutines. - peerManager.Close() + require.NoError(t, peerManager.DialFailed(ctx, a)) } func TestPeerManager_Advertise(t *testing.T) { @@ -1742,7 +1813,6 @@ func TestPeerManager_Advertise(t *testing.T) { PeerScores: map[types.NodeID]p2p.PeerScore{aID: 3, bID: 2, cID: 1}, }) require.NoError(t, err) - defer peerManager.Close() added, err := peerManager.Add(aTCP) require.NoError(t, err) @@ -1792,7 +1862,6 @@ func TestPeerManager_Advertise_Self(t *testing.T) { SelfAddress: self, }) require.NoError(t, err) - defer peerManager.Close() // peer manager should always advertise its SelfAddress. require.ElementsMatch(t, []p2p.NodeAddress{ @@ -1827,7 +1896,6 @@ func TestPeerManager_SetHeight_GetHeight(t *testing.T) { require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) // The heights should not be persisted. - peerManager.Close() peerManager, err = p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{}) require.NoError(t, err) diff --git a/internal/p2p/pex/addrbook.go b/internal/p2p/pex/addrbook.go deleted file mode 100644 index 6c5f786637..0000000000 --- a/internal/p2p/pex/addrbook.go +++ /dev/null @@ -1,948 +0,0 @@ -// Modified for Tendermint -// Originally Copyright (c) 2013-2014 Conformal Systems LLC. -// https://github.com/conformal/btcd/blob/master/LICENSE - -package pex - -import ( - "encoding/binary" - "fmt" - "hash" - "math" - mrand "math/rand" - "net" - "sync" - "time" - - "github.com/minio/highwayhash" - "github.com/tendermint/tendermint/crypto" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p" - tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -const ( - bucketTypeNew = 0x01 - bucketTypeOld = 0x02 -) - -// AddrBook is an address book used for tracking peers -// so we can gossip about them to others and select -// peers to dial. -// TODO: break this up? -type AddrBook interface { - service.Service - - // Add our own addresses so we don't later add ourselves - AddOurAddress(*p2p.NetAddress) - // Check if it is our address - OurAddress(*p2p.NetAddress) bool - - AddPrivateIDs([]string) - - // Add and remove an address - AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error - RemoveAddress(*p2p.NetAddress) - - // Check if the address is in the book - HasAddress(*p2p.NetAddress) bool - - // Do we need more peers? - NeedMoreAddrs() bool - // Is Address Book Empty? Answer should not depend on being in your own - // address book, or private peers - Empty() bool - - // Pick an address to dial - PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress - - // Mark address - MarkGood(types.NodeID) - MarkAttempt(*p2p.NetAddress) - MarkBad(*p2p.NetAddress, time.Duration) // Move peer to bad peers list - // Add bad peers back to addrBook - ReinstateBadPeers() - - IsGood(*p2p.NetAddress) bool - IsBanned(*p2p.NetAddress) bool - - // Send a selection of addresses to peers - GetSelection() []*p2p.NetAddress - // Send a selection of addresses with bias - GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress - - Size() int - - // Persist to disk - Save() -} - -var _ AddrBook = (*addrBook)(nil) - -// addrBook - concurrency safe peer address manager. -// Implements AddrBook. -type addrBook struct { - service.BaseService - - // accessed concurrently - mtx tmsync.Mutex - ourAddrs map[string]struct{} - privateIDs map[types.NodeID]struct{} - addrLookup map[types.NodeID]*knownAddress // new & old - badPeers map[types.NodeID]*knownAddress // blacklisted peers - bucketsOld []map[string]*knownAddress - bucketsNew []map[string]*knownAddress - nOld int - nNew int - - // immutable after creation - filePath string - key string // random prefix for bucket placement - routabilityStrict bool - hasher hash.Hash64 - - wg sync.WaitGroup -} - -func mustNewHasher() hash.Hash64 { - key := crypto.CRandBytes(highwayhash.Size) - hasher, err := highwayhash.New64(key) - if err != nil { - panic(err) - } - return hasher -} - -// NewAddrBook creates a new address book. -// Use Start to begin processing asynchronous address updates. -func NewAddrBook(filePath string, routabilityStrict bool) AddrBook { - am := &addrBook{ - ourAddrs: make(map[string]struct{}), - privateIDs: make(map[types.NodeID]struct{}), - addrLookup: make(map[types.NodeID]*knownAddress), - badPeers: make(map[types.NodeID]*knownAddress), - filePath: filePath, - routabilityStrict: routabilityStrict, - } - am.init() - am.BaseService = *service.NewBaseService(nil, "AddrBook", am) - return am -} - -// Initialize the buckets. -// When modifying this, don't forget to update loadFromFile() -func (a *addrBook) init() { - a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits - // New addr buckets - a.bucketsNew = make([]map[string]*knownAddress, newBucketCount) - for i := range a.bucketsNew { - a.bucketsNew[i] = make(map[string]*knownAddress) - } - // Old addr buckets - a.bucketsOld = make([]map[string]*knownAddress, oldBucketCount) - for i := range a.bucketsOld { - a.bucketsOld[i] = make(map[string]*knownAddress) - } - a.hasher = mustNewHasher() -} - -// OnStart implements Service. -func (a *addrBook) OnStart() error { - if err := a.BaseService.OnStart(); err != nil { - return err - } - a.loadFromFile(a.filePath) - - // wg.Add to ensure that any invocation of .Wait() - // later on will wait for saveRoutine to terminate. - a.wg.Add(1) - go a.saveRoutine() - - return nil -} - -// OnStop implements Service. -func (a *addrBook) OnStop() { - a.BaseService.OnStop() -} - -func (a *addrBook) Wait() { - a.wg.Wait() -} - -func (a *addrBook) FilePath() string { - return a.filePath -} - -//------------------------------------------------------- - -// AddOurAddress one of our addresses. -func (a *addrBook) AddOurAddress(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.Logger.Info("Add our address to book", "addr", addr) - a.ourAddrs[addr.String()] = struct{}{} -} - -// OurAddress returns true if it is our address. -func (a *addrBook) OurAddress(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - _, ok := a.ourAddrs[addr.String()] - return ok -} - -func (a *addrBook) AddPrivateIDs(ids []string) { - a.mtx.Lock() - defer a.mtx.Unlock() - - for _, id := range ids { - a.privateIDs[types.NodeID(id)] = struct{}{} - } -} - -// AddAddress implements AddrBook -// Add address to a "new" bucket. If it's already in one, only add it probabilistically. -// Returns error if the addr is non-routable. Does not add self. -// NOTE: addr must not be nil -func (a *addrBook) AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.addAddress(addr, src) -} - -// RemoveAddress implements AddrBook - removes the address from the book. -func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.removeAddress(addr) -} - -// IsGood returns true if peer was ever marked as good and haven't -// done anything wrong since then. -func (a *addrBook) IsGood(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.addrLookup[addr.ID].isOld() -} - -// IsBanned returns true if the peer is currently banned -func (a *addrBook) IsBanned(addr *p2p.NetAddress) bool { - a.mtx.Lock() - _, ok := a.badPeers[addr.ID] - a.mtx.Unlock() - - return ok -} - -// HasAddress returns true if the address is in the book. -func (a *addrBook) HasAddress(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.addrLookup[addr.ID] - return ka != nil -} - -// NeedMoreAddrs implements AddrBook - returns true if there are not have enough addresses in the book. -func (a *addrBook) NeedMoreAddrs() bool { - return a.Size() < needAddressThreshold -} - -// Empty implements AddrBook - returns true if there are no addresses in the address book. -// Does not count the peer appearing in its own address book, or private peers. -func (a *addrBook) Empty() bool { - return a.Size() == 0 -} - -// PickAddress implements AddrBook. It picks an address to connect to. -// The address is picked randomly from an old or new bucket according -// to the biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to that range) -// and determines how biased we are to pick an address from a new bucket. -// PickAddress returns nil if the AddrBook is empty or if we try to pick -// from an empty bucket. -// nolint:gosec // G404: Use of weak random number generator -func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) - } - return nil - } - if biasTowardsNewAddrs > 100 { - biasTowardsNewAddrs = 100 - } - if biasTowardsNewAddrs < 0 { - biasTowardsNewAddrs = 0 - } - - // Bias between new and old addresses. - oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(biasTowardsNewAddrs)) - newCorrelation := math.Sqrt(float64(a.nNew)) * float64(biasTowardsNewAddrs) - - // pick a random peer from a random bucket - var bucket map[string]*knownAddress - pickFromOldBucket := (newCorrelation+oldCorrelation)*mrand.Float64() < oldCorrelation - if (pickFromOldBucket && a.nOld == 0) || - (!pickFromOldBucket && a.nNew == 0) { - return nil - } - // loop until we pick a random non-empty bucket - for len(bucket) == 0 { - if pickFromOldBucket { - bucket = a.bucketsOld[mrand.Intn(len(a.bucketsOld))] - } else { - bucket = a.bucketsNew[mrand.Intn(len(a.bucketsNew))] - } - } - // pick a random index and loop over the map to return that index - randIndex := mrand.Intn(len(bucket)) - for _, ka := range bucket { - if randIndex == 0 { - return ka.Addr - } - randIndex-- - } - return nil -} - -// MarkGood implements AddrBook - it marks the peer as good and -// moves it into an "old" bucket. -func (a *addrBook) MarkGood(id types.NodeID) { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.addrLookup[id] - if ka == nil { - return - } - ka.markGood() - if ka.isNew() { - if err := a.moveToOld(ka); err != nil { - a.Logger.Error("Error moving address to old", "err", err) - } - } -} - -// MarkAttempt implements AddrBook - it marks that an attempt was made to connect to the address. -func (a *addrBook) MarkAttempt(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.addrLookup[addr.ID] - if ka == nil { - return - } - ka.markAttempt() -} - -// MarkBad implements AddrBook. Kicks address out from book, places -// the address in the badPeers pool. -func (a *addrBook) MarkBad(addr *p2p.NetAddress, banTime time.Duration) { - a.mtx.Lock() - defer a.mtx.Unlock() - - if a.addBadPeer(addr, banTime) { - a.removeAddress(addr) - } -} - -// ReinstateBadPeers removes bad peers from ban list and places them into a new -// bucket. -func (a *addrBook) ReinstateBadPeers() { - a.mtx.Lock() - defer a.mtx.Unlock() - - for _, ka := range a.badPeers { - if ka.isBanned() { - continue - } - - bucket, err := a.calcNewBucket(ka.Addr, ka.Src) - if err != nil { - a.Logger.Error("Failed to calculate new bucket (bad peer won't be reinstantiated)", - "addr", ka.Addr, "err", err) - continue - } - - if err := a.addToNewBucket(ka, bucket); err != nil { - a.Logger.Error("Error adding peer to new bucket", "err", err) - } - delete(a.badPeers, ka.ID()) - - a.Logger.Info("Reinstated address", "addr", ka.Addr) - } -} - -// GetSelection implements AddrBook. -// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols. -// Must never return a nil address. -func (a *addrBook) GetSelection() []*p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) - } - return nil - } - - numAddresses := tmmath.MaxInt( - tmmath.MinInt(minGetSelection, bookSize), - bookSize*getSelectionPercent/100) - numAddresses = tmmath.MinInt(maxGetSelection, numAddresses) - - // XXX: instead of making a list of all addresses, shuffling, and slicing a random chunk, - // could we just select a random numAddresses of indexes? - allAddr := make([]*p2p.NetAddress, bookSize) - i := 0 - for _, ka := range a.addrLookup { - allAddr[i] = ka.Addr - i++ - } - - // Fisher-Yates shuffle the array. We only need to do the first - // `numAddresses' since we are throwing the rest. - for i := 0; i < numAddresses; i++ { - // pick a number between current index and the end - // nolint:gosec // G404: Use of weak random number generator - j := mrand.Intn(len(allAddr)-i) + i - allAddr[i], allAddr[j] = allAddr[j], allAddr[i] - } - - // slice off the limit we are willing to share. - return allAddr[:numAddresses] -} - -func percentageOfNum(p, n int) int { - return int(math.Round((float64(p) / float64(100)) * float64(n))) -} - -// GetSelectionWithBias implements AddrBook. -// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols. -// Must never return a nil address. -// -// Each address is picked randomly from an old or new bucket according to the -// biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to -// that range) and determines how biased we are to pick an address from a new -// bucket. -func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) - } - return nil - } - - if biasTowardsNewAddrs > 100 { - biasTowardsNewAddrs = 100 - } - if biasTowardsNewAddrs < 0 { - biasTowardsNewAddrs = 0 - } - - numAddresses := tmmath.MaxInt( - tmmath.MinInt(minGetSelection, bookSize), - bookSize*getSelectionPercent/100) - numAddresses = tmmath.MinInt(maxGetSelection, numAddresses) - - // number of new addresses that, if possible, should be in the beginning of the selection - // if there are no enough old addrs, will choose new addr instead. - numRequiredNewAdd := tmmath.MaxInt(percentageOfNum(biasTowardsNewAddrs, numAddresses), numAddresses-a.nOld) - selection := a.randomPickAddresses(bucketTypeNew, numRequiredNewAdd) - selection = append(selection, a.randomPickAddresses(bucketTypeOld, numAddresses-len(selection))...) - return selection -} - -//------------------------------------------------ - -// Size returns the number of addresses in the book. -func (a *addrBook) Size() int { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.size() -} - -func (a *addrBook) size() int { - return a.nNew + a.nOld -} - -//---------------------------------------------------------- - -// Save persists the address book to disk. -func (a *addrBook) Save() { - a.saveToFile(a.filePath) // thread safe -} - -func (a *addrBook) saveRoutine() { - defer a.wg.Done() - - saveFileTicker := time.NewTicker(dumpAddressInterval) -out: - for { - select { - case <-saveFileTicker.C: - a.saveToFile(a.filePath) - case <-a.Quit(): - break out - } - } - saveFileTicker.Stop() - a.saveToFile(a.filePath) -} - -//---------------------------------------------------------- - -func (a *addrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress { - switch bucketType { - case bucketTypeNew: - return a.bucketsNew[bucketIdx] - case bucketTypeOld: - return a.bucketsOld[bucketIdx] - default: - panic("Invalid bucket type") - } -} - -// Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full. -// NOTE: currently it always returns true. -func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) error { - // Consistency check to ensure we don't add an already known address - if ka.isOld() { - return errAddrBookOldAddressNewBucket{ka.Addr, bucketIdx} - } - - addrStr := ka.Addr.String() - bucket := a.getBucket(bucketTypeNew, bucketIdx) - - // Already exists? - if _, ok := bucket[addrStr]; ok { - return nil - } - - // Enforce max addresses. - if len(bucket) > newBucketSize { - a.Logger.Info("new bucket is full, expiring new") - a.expireNew(bucketIdx) - } - - // Add to bucket. - bucket[addrStr] = ka - // increment nNew if the peer doesnt already exist in a bucket - if ka.addBucketRef(bucketIdx) == 1 { - a.nNew++ - } - - // Add it to addrLookup - a.addrLookup[ka.ID()] = ka - return nil -} - -// Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full. -func (a *addrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool { - // Sanity check - if ka.isNew() { - a.Logger.Error(fmt.Sprintf("Cannot add new address to old bucket: %v", ka)) - return false - } - if len(ka.Buckets) != 0 { - a.Logger.Error(fmt.Sprintf("Cannot add already old address to another old bucket: %v", ka)) - return false - } - - addrStr := ka.Addr.String() - bucket := a.getBucket(bucketTypeOld, bucketIdx) - - // Already exists? - if _, ok := bucket[addrStr]; ok { - return true - } - - // Enforce max addresses. - if len(bucket) > oldBucketSize { - return false - } - - // Add to bucket. - bucket[addrStr] = ka - if ka.addBucketRef(bucketIdx) == 1 { - a.nOld++ - } - - // Ensure in addrLookup - a.addrLookup[ka.ID()] = ka - - return true -} - -func (a *addrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) { - if ka.BucketType != bucketType { - a.Logger.Error(fmt.Sprintf("Bucket type mismatch: %v", ka)) - return - } - bucket := a.getBucket(bucketType, bucketIdx) - delete(bucket, ka.Addr.String()) - if ka.removeBucketRef(bucketIdx) == 0 { - if bucketType == bucketTypeNew { - a.nNew-- - } else { - a.nOld-- - } - delete(a.addrLookup, ka.ID()) - } -} - -func (a *addrBook) removeFromAllBuckets(ka *knownAddress) { - for _, bucketIdx := range ka.Buckets { - bucket := a.getBucket(ka.BucketType, bucketIdx) - delete(bucket, ka.Addr.String()) - } - ka.Buckets = nil - if ka.BucketType == bucketTypeNew { - a.nNew-- - } else { - a.nOld-- - } - delete(a.addrLookup, ka.ID()) -} - -//---------------------------------------------------------- - -func (a *addrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress { - bucket := a.getBucket(bucketType, bucketIdx) - var oldest *knownAddress - for _, ka := range bucket { - if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) { - oldest = ka - } - } - return oldest -} - -// adds the address to a "new" bucket. if its already in one, -// it only adds it probabilistically -func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { - if addr == nil || src == nil { - return ErrAddrBookNilAddr{addr, src} - } - - if err := addr.Valid(); err != nil { - return ErrAddrBookInvalidAddr{Addr: addr, AddrErr: err} - } - - if _, ok := a.badPeers[addr.ID]; ok { - return ErrAddressBanned{addr} - } - - if _, ok := a.privateIDs[addr.ID]; ok { - return ErrAddrBookPrivate{addr} - } - - if _, ok := a.privateIDs[src.ID]; ok { - return ErrAddrBookPrivateSrc{src} - } - - // TODO: we should track ourAddrs by ID and by IP:PORT and refuse both. - if _, ok := a.ourAddrs[addr.String()]; ok { - return ErrAddrBookSelf{addr} - } - - if a.routabilityStrict && !addr.Routable() { - return ErrAddrBookNonRoutable{addr} - } - - ka := a.addrLookup[addr.ID] - if ka != nil { - // If its already old and the address ID's are the same, ignore it. - // Thereby avoiding issues with a node on the network attempting to change - // the IP of a known node ID. (Which could yield an eclipse attack on the node) - if ka.isOld() && ka.Addr.ID == addr.ID { - return nil - } - // Already in max new buckets. - if len(ka.Buckets) == maxNewBucketsPerAddress { - return nil - } - // The more entries we have, the less likely we are to add more. - factor := int32(2 * len(ka.Buckets)) - // nolint:gosec // G404: Use of weak random number generator - if mrand.Int31n(factor) != 0 { - return nil - } - } else { - ka = newKnownAddress(addr, src) - } - - bucket, err := a.calcNewBucket(addr, src) - if err != nil { - return err - } - return a.addToNewBucket(ka, bucket) -} - -func (a *addrBook) randomPickAddresses(bucketType byte, num int) []*p2p.NetAddress { - var buckets []map[string]*knownAddress - switch bucketType { - case bucketTypeNew: - buckets = a.bucketsNew - case bucketTypeOld: - buckets = a.bucketsOld - default: - panic("unexpected bucketType") - } - total := 0 - for _, bucket := range buckets { - total += len(bucket) - } - addresses := make([]*knownAddress, 0, total) - for _, bucket := range buckets { - for _, ka := range bucket { - addresses = append(addresses, ka) - } - } - selection := make([]*p2p.NetAddress, 0, num) - chosenSet := make(map[string]bool, num) - rand := tmrand.NewRand() - rand.Shuffle(total, func(i, j int) { - addresses[i], addresses[j] = addresses[j], addresses[i] - }) - for _, addr := range addresses { - if chosenSet[addr.Addr.String()] { - continue - } - chosenSet[addr.Addr.String()] = true - selection = append(selection, addr.Addr) - if len(selection) >= num { - return selection - } - } - return selection -} - -// Make space in the new buckets by expiring the really bad entries. -// If no bad entries are available we remove the oldest. -func (a *addrBook) expireNew(bucketIdx int) { - for addrStr, ka := range a.bucketsNew[bucketIdx] { - // If an entry is bad, throw it away - if ka.isBad() { - a.Logger.Info(fmt.Sprintf("expiring bad address %v", addrStr)) - a.removeFromBucket(ka, bucketTypeNew, bucketIdx) - return - } - } - - // If we haven't thrown out a bad entry, throw out the oldest entry - oldest := a.pickOldest(bucketTypeNew, bucketIdx) - a.removeFromBucket(oldest, bucketTypeNew, bucketIdx) -} - -// Promotes an address from new to old. If the destination bucket is full, -// demote the oldest one to a "new" bucket. -// TODO: Demote more probabilistically? -func (a *addrBook) moveToOld(ka *knownAddress) error { - // Sanity check - if ka.isOld() { - a.Logger.Error(fmt.Sprintf("Cannot promote address that is already old %v", ka)) - return nil - } - if len(ka.Buckets) == 0 { - a.Logger.Error(fmt.Sprintf("Cannot promote address that isn't in any new buckets %v", ka)) - return nil - } - - // Remove from all (new) buckets. - a.removeFromAllBuckets(ka) - // It's officially old now. - ka.BucketType = bucketTypeOld - - // Try to add it to its oldBucket destination. - oldBucketIdx, err := a.calcOldBucket(ka.Addr) - if err != nil { - return err - } - added := a.addToOldBucket(ka, oldBucketIdx) - if !added { - // No room; move the oldest to a new bucket - oldest := a.pickOldest(bucketTypeOld, oldBucketIdx) - a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx) - newBucketIdx, err := a.calcNewBucket(oldest.Addr, oldest.Src) - if err != nil { - return err - } - if err := a.addToNewBucket(oldest, newBucketIdx); err != nil { - a.Logger.Error("Error adding peer to old bucket", "err", err) - } - - // Finally, add our ka to old bucket again. - added = a.addToOldBucket(ka, oldBucketIdx) - if !added { - a.Logger.Error(fmt.Sprintf("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx)) - } - } - return nil -} - -func (a *addrBook) removeAddress(addr *p2p.NetAddress) { - ka := a.addrLookup[addr.ID] - if ka == nil { - return - } - a.Logger.Info("Remove address from book", "addr", addr) - a.removeFromAllBuckets(ka) -} - -func (a *addrBook) addBadPeer(addr *p2p.NetAddress, banTime time.Duration) bool { - // check it exists in addrbook - ka := a.addrLookup[addr.ID] - // check address is not already there - if ka == nil { - return false - } - - if _, alreadyBadPeer := a.badPeers[addr.ID]; !alreadyBadPeer { - // add to bad peer list - ka.ban(banTime) - a.badPeers[addr.ID] = ka - a.Logger.Info("Add address to blacklist", "addr", addr) - } - return true -} - -//--------------------------------------------------------------------- -// calculate bucket placements - -// hash(key + sourcegroup + int64(hash(key + group + sourcegroup)) % bucket_per_group) % num_new_buckets -func (a *addrBook) calcNewBucket(addr, src *p2p.NetAddress) (int, error) { - data1 := []byte{} - data1 = append(data1, []byte(a.key)...) - data1 = append(data1, []byte(a.groupKey(addr))...) - data1 = append(data1, []byte(a.groupKey(src))...) - hash1, err := a.hash(data1) - if err != nil { - return 0, err - } - hash64 := binary.BigEndian.Uint64(hash1) - hash64 %= newBucketsPerGroup - var hashbuf [8]byte - binary.BigEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, []byte(a.key)...) - data2 = append(data2, a.groupKey(src)...) - data2 = append(data2, hashbuf[:]...) - - hash2, err := a.hash(data2) - if err != nil { - return 0, err - } - result := int(binary.BigEndian.Uint64(hash2) % newBucketCount) - return result, nil -} - -// hash(key + group + int64(hash(key + addr)) % buckets_per_group) % num_old_buckets -func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) (int, error) { - data1 := []byte{} - data1 = append(data1, []byte(a.key)...) - data1 = append(data1, []byte(addr.String())...) - hash1, err := a.hash(data1) - if err != nil { - return 0, err - } - hash64 := binary.BigEndian.Uint64(hash1) - hash64 %= oldBucketsPerGroup - var hashbuf [8]byte - binary.BigEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, []byte(a.key)...) - data2 = append(data2, a.groupKey(addr)...) - data2 = append(data2, hashbuf[:]...) - - hash2, err := a.hash(data2) - if err != nil { - return 0, err - } - result := int(binary.BigEndian.Uint64(hash2) % oldBucketCount) - return result, nil -} - -// Return a string representing the network group of this address. -// This is the /16 for IPv4 (e.g. 1.2.0.0), the /32 (/36 for he.net) for IPv6, the string -// "local" for a local address and the string "unroutable" for an unroutable -// address. -func (a *addrBook) groupKey(na *p2p.NetAddress) string { - return groupKeyFor(na, a.routabilityStrict) -} - -func groupKeyFor(na *p2p.NetAddress, routabilityStrict bool) string { - if routabilityStrict && na.Local() { - return "local" - } - if routabilityStrict && !na.Routable() { - return "unroutable" - } - - if ipv4 := na.IP.To4(); ipv4 != nil { - return na.IP.Mask(net.CIDRMask(16, 32)).String() - } - - if na.RFC6145() || na.RFC6052() { - // last four bytes are the ip address - ip := na.IP[12:16] - return ip.Mask(net.CIDRMask(16, 32)).String() - } - - if na.RFC3964() { - ip := na.IP[2:6] - return ip.Mask(net.CIDRMask(16, 32)).String() - } - - if na.RFC4380() { - // teredo tunnels have the last 4 bytes as the v4 address XOR - // 0xff. - ip := net.IP(make([]byte, 4)) - for i, byte := range na.IP[12:16] { - ip[i] = byte ^ 0xff - } - return ip.Mask(net.CIDRMask(16, 32)).String() - } - - if na.OnionCatTor() { - // group is keyed off the first 4 bits of the actual onion key. - return fmt.Sprintf("tor:%d", na.IP[6]&((1<<4)-1)) - } - - // OK, so now we know ourselves to be a IPv6 address. - // bitcoind uses /32 for everything, except for Hurricane Electric's - // (he.net) IP range, which it uses /36 for. - bits := 32 - heNet := &net.IPNet{IP: net.ParseIP("2001:470::"), Mask: net.CIDRMask(32, 128)} - if heNet.Contains(na.IP) { - bits = 36 - } - ipv6Mask := net.CIDRMask(bits, 128) - return na.IP.Mask(ipv6Mask).String() -} - -func (a *addrBook) hash(b []byte) ([]byte, error) { - a.hasher.Reset() - a.hasher.Write(b) - return a.hasher.Sum(nil), nil -} diff --git a/internal/p2p/pex/addrbook_test.go b/internal/p2p/pex/addrbook_test.go deleted file mode 100644 index 3d21314ad8..0000000000 --- a/internal/p2p/pex/addrbook_test.go +++ /dev/null @@ -1,777 +0,0 @@ -package pex - -import ( - "encoding/hex" - "fmt" - "io/ioutil" - "math" - mrand "math/rand" - "net" - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/types" -) - -// FIXME These tests should not rely on .(*addrBook) assertions - -func TestAddrBookPickAddress(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - - // 0 addresses - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - assert.Zero(t, book.Size()) - - addr := book.PickAddress(50) - assert.Nil(t, addr, "expected no address") - - randAddrs := randNetAddressPairs(t, 1) - addrSrc := randAddrs[0] - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - - // pick an address when we only have new address - addr = book.PickAddress(0) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(50) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(100) - assert.NotNil(t, addr, "expected an address") - - // pick an address when we only have old address - book.MarkGood(addrSrc.addr.ID) - addr = book.PickAddress(0) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(50) - assert.NotNil(t, addr, "expected an address") - - // in this case, nNew==0 but we biased 100% to new, so we return nil - addr = book.PickAddress(100) - assert.Nil(t, addr, "did not expected an address") -} - -func TestAddrBookSaveLoad(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - - // 0 addresses - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - book.Save() - - book = NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - err := book.Start() - require.NoError(t, err) - - assert.True(t, book.Empty()) - - // 100 addresses - randAddrs := randNetAddressPairs(t, 100) - - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - assert.Equal(t, 100, book.Size()) - book.Save() - - book = NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - err = book.Start() - require.NoError(t, err) - - assert.Equal(t, 100, book.Size()) -} - -func TestAddrBookLookup(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - randAddrs := randNetAddressPairs(t, 100) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - for _, addrSrc := range randAddrs { - addr := addrSrc.addr - src := addrSrc.src - err := book.AddAddress(addr, src) - require.NoError(t, err) - - ka := book.HasAddress(addr) - assert.True(t, ka, "Expected to find KnownAddress %v but wasn't there.", addr) - } -} - -func TestAddrBookPromoteToOld(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - randAddrs := randNetAddressPairs(t, 100) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - // Attempt all addresses. - for _, addrSrc := range randAddrs { - book.MarkAttempt(addrSrc.addr) - } - - // Promote half of them - for i, addrSrc := range randAddrs { - if i%2 == 0 { - book.MarkGood(addrSrc.addr.ID) - } - } - - // TODO: do more testing :) - - selection := book.GetSelection() - t.Logf("selection: %v", selection) - - if len(selection) > book.Size() { - t.Errorf("selection could not be bigger than the book") - } - - selection = book.GetSelectionWithBias(30) - t.Logf("selection: %v", selection) - - if len(selection) > book.Size() { - t.Errorf("selection with bias could not be bigger than the book") - } - - assert.Equal(t, book.Size(), 100, "expecting book size to be 100") -} - -func TestAddrBookHandlesDuplicates(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - - book.SetLogger(log.TestingLogger()) - - randAddrs := randNetAddressPairs(t, 100) - - differentSrc := randIPv4Address(t) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - err = book.AddAddress(addrSrc.addr, addrSrc.src) // duplicate - require.NoError(t, err) - err = book.AddAddress(addrSrc.addr, differentSrc) // different src - require.NoError(t, err) - } - - assert.Equal(t, 100, book.Size()) -} - -type netAddressPair struct { - addr *p2p.NetAddress - src *p2p.NetAddress -} - -func randNetAddressPairs(t *testing.T, n int) []netAddressPair { - randAddrs := make([]netAddressPair, n) - for i := 0; i < n; i++ { - randAddrs[i] = netAddressPair{addr: randIPv4Address(t), src: randIPv4Address(t)} - } - return randAddrs -} - -func randIPv4Address(t *testing.T) *p2p.NetAddress { - for { - ip := fmt.Sprintf("%v.%v.%v.%v", - mrand.Intn(254)+1, - mrand.Intn(255), - mrand.Intn(255), - mrand.Intn(255), - ) - port := mrand.Intn(65535-1) + 1 - id := types.NodeID(hex.EncodeToString(tmrand.Bytes(types.NodeIDByteLength))) - idAddr := id.AddressString(fmt.Sprintf("%v:%v", ip, port)) - addr, err := types.NewNetAddressString(idAddr) - assert.Nil(t, err, "error generating rand network address") - if addr.Routable() { - return addr - } - } -} - -func TestAddrBookRemoveAddress(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - assert.Equal(t, 1, book.Size()) - - book.RemoveAddress(addr) - assert.Equal(t, 0, book.Size()) - - nonExistingAddr := randIPv4Address(t) - book.RemoveAddress(nonExistingAddr) - assert.Equal(t, 0, book.Size()) -} - -func TestAddrBookGetSelectionWithOneMarkedGood(t *testing.T) { - // create a book with 10 addresses, 1 good/old and 9 new - book, _ := createAddrBookWithMOldAndNNewAddrs(t, 1, 9) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.NotNil(t, addrs) - assertMOldAndNNewAddrsInSelection(t, 1, 9, addrs, book) -} - -func TestAddrBookGetSelectionWithOneNotMarkedGood(t *testing.T) { - // create a book with 10 addresses, 9 good/old and 1 new - book, _ := createAddrBookWithMOldAndNNewAddrs(t, 9, 1) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.NotNil(t, addrs) - assertMOldAndNNewAddrsInSelection(t, 9, 1, addrs, book) -} - -func TestAddrBookGetSelectionReturnsNilWhenAddrBookIsEmpty(t *testing.T) { - book, _ := createAddrBookWithMOldAndNNewAddrs(t, 0, 0) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.Nil(t, addrs) -} - -func TestAddrBookGetSelection(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - // 1) empty book - assert.Empty(t, book.GetSelection()) - - // 2) add one address - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - - assert.Equal(t, 1, len(book.GetSelection())) - assert.Equal(t, addr, book.GetSelection()[0]) - - // 3) add a bunch of addresses - randAddrs := randNetAddressPairs(t, 100) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - // check there is no duplicates - addrs := make(map[string]*p2p.NetAddress) - selection := book.GetSelection() - for _, addr := range selection { - if dup, ok := addrs[addr.String()]; ok { - t.Fatalf("selection %v contains duplicates %v", selection, dup) - } - addrs[addr.String()] = addr - } - - if len(selection) > book.Size() { - t.Errorf("selection %v could not be bigger than the book", selection) - } -} - -func TestAddrBookGetSelectionWithBias(t *testing.T) { - const biasTowardsNewAddrs = 30 - - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - // 1) empty book - selection := book.GetSelectionWithBias(biasTowardsNewAddrs) - assert.Empty(t, selection) - - // 2) add one address - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - assert.Equal(t, 1, len(selection)) - assert.Equal(t, addr, selection[0]) - - // 3) add a bunch of addresses - randAddrs := randNetAddressPairs(t, 100) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - // check there is no duplicates - addrs := make(map[string]*p2p.NetAddress) - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - for _, addr := range selection { - if dup, ok := addrs[addr.String()]; ok { - t.Fatalf("selection %v contains duplicates %v", selection, dup) - } - addrs[addr.String()] = addr - } - - if len(selection) > book.Size() { - t.Fatalf("selection %v could not be bigger than the book", selection) - } - - // 4) mark 80% of the addresses as good - randAddrsLen := len(randAddrs) - for i, addrSrc := range randAddrs { - if int((float64(i)/float64(randAddrsLen))*100) >= 20 { - book.MarkGood(addrSrc.addr.ID) - } - } - - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - - // check that ~70% of addresses returned are good - good := 0 - for _, addr := range selection { - if book.IsGood(addr) { - good++ - } - } - - got, expected := int((float64(good)/float64(len(selection)))*100), 100-biasTowardsNewAddrs - - // compute some slack to protect against small differences due to rounding: - slack := int(math.Round(float64(100) / float64(len(selection)))) - if got > expected+slack { - t.Fatalf( - "got more good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", - got, - expected, - good, - len(selection), - ) - } - if got < expected-slack { - t.Fatalf( - "got fewer good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", - got, - expected, - good, - len(selection), - ) - } -} - -func TestAddrBookHasAddress(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - - assert.True(t, book.HasAddress(addr)) - - book.RemoveAddress(addr) - - assert.False(t, book.HasAddress(addr)) -} - -func testCreatePrivateAddrs(t *testing.T, numAddrs int) ([]*p2p.NetAddress, []string) { - t.Helper() - addrs := make([]*p2p.NetAddress, numAddrs) - for i := 0; i < numAddrs; i++ { - addrs[i] = randIPv4Address(t) - } - - private := make([]string, numAddrs) - for i, addr := range addrs { - private[i] = string(addr.ID) - } - return addrs, private -} - -func TestBanBadPeers(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - addr := randIPv4Address(t) - _ = book.AddAddress(addr, addr) - - book.MarkBad(addr, 1*time.Second) - // addr should not reachable - assert.False(t, book.HasAddress(addr)) - assert.True(t, book.IsBanned(addr)) - - err := book.AddAddress(addr, addr) - // book should not add address from the blacklist - assert.Error(t, err) - - time.Sleep(1 * time.Second) - book.ReinstateBadPeers() - // address should be reinstated in the new bucket - assert.EqualValues(t, 1, book.Size()) - assert.True(t, book.HasAddress(addr)) - assert.False(t, book.IsGood(addr)) -} - -func TestAddrBookEmpty(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - // Check that empty book is empty - require.True(t, book.Empty()) - // Check that book with our address is empty - book.AddOurAddress(randIPv4Address(t)) - require.True(t, book.Empty()) - // Check that book with private addrs is empty - _, privateIds := testCreatePrivateAddrs(t, 5) - book.AddPrivateIDs(privateIds) - require.True(t, book.Empty()) - - // Check that book with address is not empty - err := book.AddAddress(randIPv4Address(t), randIPv4Address(t)) - require.NoError(t, err) - require.False(t, book.Empty()) -} - -func TestPrivatePeers(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - addrs, private := testCreatePrivateAddrs(t, 10) - book.AddPrivateIDs(private) - - // private addrs must not be added - for _, addr := range addrs { - err := book.AddAddress(addr, addr) - if assert.Error(t, err) { - _, ok := err.(ErrAddrBookPrivate) - assert.True(t, ok) - } - } - - // addrs coming from private peers must not be added - err := book.AddAddress(randIPv4Address(t), addrs[0]) - if assert.Error(t, err) { - _, ok := err.(ErrAddrBookPrivateSrc) - assert.True(t, ok) - } -} - -func testAddrBookAddressSelection(t *testing.T, bookSize int) { - // generate all combinations of old (m) and new addresses - for nBookOld := 0; nBookOld <= bookSize; nBookOld++ { - nBookNew := bookSize - nBookOld - dbgStr := fmt.Sprintf("book of size %d (new %d, old %d)", bookSize, nBookNew, nBookOld) - - // create book and get selection - book, _ := createAddrBookWithMOldAndNNewAddrs(t, nBookOld, nBookNew) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.NotNil(t, addrs, "%s - expected a non-nil selection", dbgStr) - nAddrs := len(addrs) - assert.NotZero(t, nAddrs, "%s - expected at least one address in selection", dbgStr) - - // check there's no nil addresses - for _, addr := range addrs { - if addr == nil { - t.Fatalf("%s - got nil address in selection %v", dbgStr, addrs) - } - } - - // XXX: shadowing - nOld, nNew := countOldAndNewAddrsInSelection(addrs, book) - - // Given: - // n - num new addrs, m - num old addrs - // k - num new addrs expected in the beginning (based on bias %) - // i=min(n, max(k,r-m)), aka expNew - // j=min(m, r-i), aka expOld - // - // We expect this layout: - // indices: 0...i-1 i...i+j-1 - // addresses: N0..Ni-1 O0..Oj-1 - // - // There is at least one partition and at most three. - var ( - k = percentageOfNum(biasToSelectNewPeers, nAddrs) - expNew = tmmath.MinInt(nNew, tmmath.MaxInt(k, nAddrs-nBookOld)) - expOld = tmmath.MinInt(nOld, nAddrs-expNew) - ) - - // Verify that the number of old and new addresses are as expected - if nNew != expNew { - t.Fatalf("%s - expected new addrs %d, got %d", dbgStr, expNew, nNew) - } - if nOld != expOld { - t.Fatalf("%s - expected old addrs %d, got %d", dbgStr, expOld, nOld) - } - - // Verify that the order of addresses is as expected - // Get the sequence types and lengths of the selection - seqLens, seqTypes, err := analyseSelectionLayout(book, addrs) - assert.NoError(t, err, "%s", dbgStr) - - // Build a list with the expected lengths of partitions and another with the expected types, e.g.: - // expSeqLens = [10, 22], expSeqTypes = [1, 2] - // means we expect 10 new (type 1) addresses followed by 22 old (type 2) addresses. - var expSeqLens []int - var expSeqTypes []int - - switch { - case expOld == 0: // all new addresses - expSeqLens = []int{nAddrs} - expSeqTypes = []int{1} - case expNew == 0: // all old addresses - expSeqLens = []int{nAddrs} - expSeqTypes = []int{2} - case nAddrs-expNew-expOld == 0: // new addresses, old addresses - expSeqLens = []int{expNew, expOld} - expSeqTypes = []int{1, 2} - } - - assert.Equal(t, expSeqLens, seqLens, - "%s - expected sequence lengths of old/new %v, got %v", - dbgStr, expSeqLens, seqLens) - assert.Equal(t, expSeqTypes, seqTypes, - "%s - expected sequence types (1-new, 2-old) was %v, got %v", - dbgStr, expSeqTypes, seqTypes) - } -} - -func TestMultipleAddrBookAddressSelection(t *testing.T) { - // test books with smaller size, < N - const N = 32 - for bookSize := 1; bookSize < N; bookSize++ { - testAddrBookAddressSelection(t, bookSize) - } - - // Test for two books with sizes from following ranges - ranges := [...][]int{{33, 100}, {100, 175}} - bookSizes := make([]int, 0, len(ranges)) - for _, r := range ranges { - bookSizes = append(bookSizes, mrand.Intn(r[1]-r[0])+r[0]) - } - t.Logf("Testing address selection for the following book sizes %v\n", bookSizes) - for _, bookSize := range bookSizes { - testAddrBookAddressSelection(t, bookSize) - } -} - -func TestAddrBookAddDoesNotOverwriteOldIP(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - - // This test creates adds a peer to the address book and marks it good - // It then attempts to override the peer's IP, by adding a peer with the same ID - // but different IP. We distinguish the IP's by "RealIP" and "OverrideAttemptIP" - peerID := "678503e6c8f50db7279c7da3cb9b072aac4bc0d5" - peerRealIP := "1.1.1.1:26656" - peerOverrideAttemptIP := "2.2.2.2:26656" - SrcAddr := "b0dd378c3fbc4c156cd6d302a799f0d2e4227201@159.89.121.174:26656" - - // There is a chance that AddAddress will ignore the new peer its given. - // So we repeat trying to override the peer several times, - // to ensure we aren't in a case that got probabilistically ignored - numOverrideAttempts := 10 - - peerRealAddr, err := types.NewNetAddressString(peerID + "@" + peerRealIP) - require.Nil(t, err) - - peerOverrideAttemptAddr, err := types.NewNetAddressString(peerID + "@" + peerOverrideAttemptIP) - require.Nil(t, err) - - src, err := types.NewNetAddressString(SrcAddr) - require.Nil(t, err) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - err = book.AddAddress(peerRealAddr, src) - require.Nil(t, err) - book.MarkAttempt(peerRealAddr) - book.MarkGood(peerRealAddr.ID) - - // Double check that adding a peer again doesn't error - err = book.AddAddress(peerRealAddr, src) - require.Nil(t, err) - - // Try changing ip but keeping the same node id. (change 1.1.1.1 to 2.2.2.2) - // This should just be ignored, and not error. - for i := 0; i < numOverrideAttempts; i++ { - err = book.AddAddress(peerOverrideAttemptAddr, src) - require.Nil(t, err) - } - // Now check that the IP was not overridden. - // This is done by sampling several peers from addr book - // and ensuring they all have the correct IP. - // In the expected functionality, this test should only have 1 Peer, hence will pass. - for i := 0; i < numOverrideAttempts; i++ { - selection := book.GetSelection() - for _, addr := range selection { - require.Equal(t, addr.IP, peerRealAddr.IP) - } - } -} - -func TestAddrBookGroupKey(t *testing.T) { - // non-strict routability - testCases := []struct { - name string - ip string - expKey string - }{ - // IPv4 normal. - {"ipv4 normal class a", "12.1.2.3", "12.1.0.0"}, - {"ipv4 normal class b", "173.1.2.3", "173.1.0.0"}, - {"ipv4 normal class c", "196.1.2.3", "196.1.0.0"}, - - // IPv6/IPv4 translations. - {"ipv6 rfc3964 with ipv4 encap", "2002:0c01:0203::", "12.1.0.0"}, - {"ipv6 rfc4380 toredo ipv4", "2001:0:1234::f3fe:fdfc", "12.1.0.0"}, - {"ipv6 rfc6052 well-known prefix with ipv4", "64:ff9b::0c01:0203", "12.1.0.0"}, - {"ipv6 rfc6145 translated ipv4", "::ffff:0:0c01:0203", "12.1.0.0"}, - - // Tor. - {"ipv6 tor onioncat", "fd87:d87e:eb43:1234::5678", "tor:2"}, - {"ipv6 tor onioncat 2", "fd87:d87e:eb43:1245::6789", "tor:2"}, - {"ipv6 tor onioncat 3", "fd87:d87e:eb43:1345::6789", "tor:3"}, - - // IPv6 normal. - {"ipv6 normal", "2602:100::1", "2602:100::"}, - {"ipv6 normal 2", "2602:0100::1234", "2602:100::"}, - {"ipv6 hurricane electric", "2001:470:1f10:a1::2", "2001:470:1000::"}, - {"ipv6 hurricane electric 2", "2001:0470:1f10:a1::2", "2001:470:1000::"}, - } - - for i, tc := range testCases { - nip := net.ParseIP(tc.ip) - key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), false) - assert.Equal(t, tc.expKey, key, "#%d", i) - } - - // strict routability - testCases = []struct { - name string - ip string - expKey string - }{ - // Local addresses. - {"ipv4 localhost", "127.0.0.1", "local"}, - {"ipv6 localhost", "::1", "local"}, - {"ipv4 zero", "0.0.0.0", "local"}, - {"ipv4 first octet zero", "0.1.2.3", "local"}, - - // Unroutable addresses. - {"ipv4 invalid bcast", "255.255.255.255", "unroutable"}, - {"ipv4 rfc1918 10/8", "10.1.2.3", "unroutable"}, - {"ipv4 rfc1918 172.16/12", "172.16.1.2", "unroutable"}, - {"ipv4 rfc1918 192.168/16", "192.168.1.2", "unroutable"}, - {"ipv6 rfc3849 2001:db8::/32", "2001:db8::1234", "unroutable"}, - {"ipv4 rfc3927 169.254/16", "169.254.1.2", "unroutable"}, - {"ipv6 rfc4193 fc00::/7", "fc00::1234", "unroutable"}, - {"ipv6 rfc4843 2001:10::/28", "2001:10::1234", "unroutable"}, - {"ipv6 rfc4862 fe80::/64", "fe80::1234", "unroutable"}, - } - - for i, tc := range testCases { - nip := net.ParseIP(tc.ip) - key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), true) - assert.Equal(t, tc.expKey, key, "#%d", i) - } -} - -func assertMOldAndNNewAddrsInSelection(t *testing.T, m, n int, addrs []*p2p.NetAddress, book *addrBook) { - nOld, nNew := countOldAndNewAddrsInSelection(addrs, book) - assert.Equal(t, m, nOld, "old addresses") - assert.Equal(t, n, nNew, "new addresses") -} - -func createTempFileName(t *testing.T, prefix string) string { - t.Helper() - f, err := ioutil.TempFile("", prefix) - if err != nil { - panic(err) - } - - fname := f.Name() - if err := f.Close(); err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { _ = os.Remove(fname) }) - - return fname -} - -func createAddrBookWithMOldAndNNewAddrs(t *testing.T, nOld, nNew int) (book *addrBook, fname string) { - t.Helper() - fname = createTempFileName(t, "addrbook_test") - - book = NewAddrBook(fname, true).(*addrBook) - book.SetLogger(log.TestingLogger()) - assert.Zero(t, book.Size()) - - randAddrs := randNetAddressPairs(t, nOld) - for _, addr := range randAddrs { - err := book.AddAddress(addr.addr, addr.src) - require.NoError(t, err) - book.MarkGood(addr.addr.ID) - } - - randAddrs = randNetAddressPairs(t, nNew) - for _, addr := range randAddrs { - err := book.AddAddress(addr.addr, addr.src) - require.NoError(t, err) - } - - return -} - -func countOldAndNewAddrsInSelection(addrs []*p2p.NetAddress, book *addrBook) (nOld, nNew int) { - for _, addr := range addrs { - if book.IsGood(addr) { - nOld++ - } else { - nNew++ - } - } - return -} - -// Analyze the layout of the selection specified by 'addrs' -// Returns: -// - seqLens - the lengths of the sequences of addresses of same type -// - seqTypes - the types of sequences in selection -func analyseSelectionLayout(book *addrBook, addrs []*p2p.NetAddress) (seqLens, seqTypes []int, err error) { - // address types are: 0 - nil, 1 - new, 2 - old - var ( - prevType = 0 - currentSeqLen = 0 - ) - - for _, addr := range addrs { - addrType := 0 - if book.IsGood(addr) { - addrType = 2 - } else { - addrType = 1 - } - if addrType != prevType && prevType != 0 { - seqLens = append(seqLens, currentSeqLen) - seqTypes = append(seqTypes, prevType) - currentSeqLen = 0 - } - currentSeqLen++ - prevType = addrType - } - - seqLens = append(seqLens, currentSeqLen) - seqTypes = append(seqTypes, prevType) - - return -} diff --git a/internal/p2p/pex/bench_test.go b/internal/p2p/pex/bench_test.go deleted file mode 100644 index 37019f60a0..0000000000 --- a/internal/p2p/pex/bench_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package pex - -import ( - "testing" - - "github.com/tendermint/tendermint/types" -) - -func BenchmarkAddrBook_hash(b *testing.B) { - book := &addrBook{ - ourAddrs: make(map[string]struct{}), - privateIDs: make(map[types.NodeID]struct{}), - addrLookup: make(map[types.NodeID]*knownAddress), - badPeers: make(map[types.NodeID]*knownAddress), - filePath: "", - routabilityStrict: true, - } - book.init() - msg := []byte(`foobar`) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = book.hash(msg) - } -} diff --git a/internal/p2p/pex/doc.go b/internal/p2p/pex/doc.go index dc4f5d37a5..70a5f61743 100644 --- a/internal/p2p/pex/doc.go +++ b/internal/p2p/pex/doc.go @@ -7,19 +7,14 @@ The PEX reactor is a continuous service which periodically requests addresses and serves addresses to other peers. There are two versions of this service aligning with the two p2p frameworks that Tendermint currently supports. -V1 is coupled with the Switch (which handles peer connections and routing of -messages) and, alongside exchanging peer information in the form of port/IP -pairs, also has the responsibility of dialing peers and ensuring that a -node has a sufficient amount of peers connected. - -V2 is embedded with the new p2p stack and uses the peer manager to advertise +The reactor is embedded with the new p2p stack and uses the peer manager to advertise peers as well as add new peers to the peer store. The V2 reactor passes a different set of proto messages which include a list of [urls](https://golang.org/pkg/net/url/#URL).These can be used to save a set of endpoints that each peer uses. The V2 reactor has backwards compatibility with V1. It can also handle V1 messages. -The V2 reactor is able to tweak the intensity of it's search by decreasing or +The reactor is able to tweak the intensity of it's search by decreasing or increasing the interval between each request. It tracks connected peers via a linked list, sending a request to the node at the front of the list and adding it to the back of the list once a response is received. Using this method, a diff --git a/internal/p2p/pex/errors.go b/internal/p2p/pex/errors.go deleted file mode 100644 index 275e71bf9e..0000000000 --- a/internal/p2p/pex/errors.go +++ /dev/null @@ -1,89 +0,0 @@ -package pex - -import ( - "errors" - "fmt" - - "github.com/tendermint/tendermint/internal/p2p" -) - -type ErrAddrBookNonRoutable struct { - Addr *p2p.NetAddress -} - -func (err ErrAddrBookNonRoutable) Error() string { - return fmt.Sprintf("Cannot add non-routable address %v", err.Addr) -} - -type errAddrBookOldAddressNewBucket struct { - Addr *p2p.NetAddress - BucketID int -} - -func (err errAddrBookOldAddressNewBucket) Error() string { - return fmt.Sprintf("failed consistency check!"+ - " Cannot add pre-existing address %v into new bucket %v", - err.Addr, err.BucketID) -} - -type ErrAddrBookSelf struct { - Addr *p2p.NetAddress -} - -func (err ErrAddrBookSelf) Error() string { - return fmt.Sprintf("Cannot add ourselves with address %v", err.Addr) -} - -type ErrAddrBookPrivate struct { - Addr *p2p.NetAddress -} - -func (err ErrAddrBookPrivate) Error() string { - return fmt.Sprintf("Cannot add private peer with address %v", err.Addr) -} - -func (err ErrAddrBookPrivate) PrivateAddr() bool { - return true -} - -type ErrAddrBookPrivateSrc struct { - Src *p2p.NetAddress -} - -func (err ErrAddrBookPrivateSrc) Error() string { - return fmt.Sprintf("Cannot add peer coming from private peer with address %v", err.Src) -} - -func (err ErrAddrBookPrivateSrc) PrivateAddr() bool { - return true -} - -type ErrAddrBookNilAddr struct { - Addr *p2p.NetAddress - Src *p2p.NetAddress -} - -func (err ErrAddrBookNilAddr) Error() string { - return fmt.Sprintf("Cannot add a nil address. Got (addr, src) = (%v, %v)", err.Addr, err.Src) -} - -type ErrAddrBookInvalidAddr struct { - Addr *p2p.NetAddress - AddrErr error -} - -func (err ErrAddrBookInvalidAddr) Error() string { - return fmt.Sprintf("Cannot add invalid address %v: %v", err.Addr, err.AddrErr) -} - -// ErrAddressBanned is thrown when the address has been banned and therefore cannot be used -type ErrAddressBanned struct { - Addr *p2p.NetAddress -} - -func (err ErrAddressBanned) Error() string { - return fmt.Sprintf("Address: %v is currently banned", err.Addr) -} - -// ErrUnsolicitedList is thrown when a peer provides a list of addresses that have not been asked for. -var ErrUnsolicitedList = errors.New("unsolicited pexAddrsMessage") diff --git a/internal/p2p/pex/file.go b/internal/p2p/pex/file.go deleted file mode 100644 index ce65f7d4d3..0000000000 --- a/internal/p2p/pex/file.go +++ /dev/null @@ -1,83 +0,0 @@ -package pex - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/tendermint/tendermint/internal/libs/tempfile" -) - -/* Loading & Saving */ - -type addrBookJSON struct { - Key string `json:"key"` - Addrs []*knownAddress `json:"addrs"` -} - -func (a *addrBook) saveToFile(filePath string) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.Logger.Info("Saving AddrBook to file", "size", a.size()) - - addrs := make([]*knownAddress, 0, len(a.addrLookup)) - for _, ka := range a.addrLookup { - addrs = append(addrs, ka) - } - aJSON := &addrBookJSON{ - Key: a.key, - Addrs: addrs, - } - - jsonBytes, err := json.MarshalIndent(aJSON, "", "\t") - if err != nil { - a.Logger.Error("Failed to save AddrBook to file", "err", err) - return - } - err = tempfile.WriteFileAtomic(filePath, jsonBytes, 0644) - if err != nil { - a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "err", err) - } -} - -// Returns false if file does not exist. -// cmn.Panics if file is corrupt. -func (a *addrBook) loadFromFile(filePath string) bool { - // If doesn't exist, do nothing. - _, err := os.Stat(filePath) - if os.IsNotExist(err) { - return false - } - - // Load addrBookJSON{} - r, err := os.Open(filePath) - if err != nil { - panic(fmt.Sprintf("Error opening file %s: %v", filePath, err)) - } - defer r.Close() - aJSON := &addrBookJSON{} - dec := json.NewDecoder(r) - err = dec.Decode(aJSON) - if err != nil { - panic(fmt.Sprintf("Error reading file %s: %v", filePath, err)) - } - - // Restore all the fields... - // Restore the key - a.key = aJSON.Key - // Restore .bucketsNew & .bucketsOld - for _, ka := range aJSON.Addrs { - for _, bucketIndex := range ka.Buckets { - bucket := a.getBucket(ka.BucketType, bucketIndex) - bucket[ka.Addr.String()] = ka - } - a.addrLookup[ka.ID()] = ka - if ka.BucketType == bucketTypeNew { - a.nNew++ - } else { - a.nOld++ - } - } - return true -} diff --git a/internal/p2p/pex/known_address.go b/internal/p2p/pex/known_address.go deleted file mode 100644 index 2a2ebe0386..0000000000 --- a/internal/p2p/pex/known_address.go +++ /dev/null @@ -1,141 +0,0 @@ -package pex - -import ( - "time" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" -) - -// knownAddress tracks information about a known network address -// that is used to determine how viable an address is. -type knownAddress struct { - Addr *p2p.NetAddress `json:"addr"` - Src *p2p.NetAddress `json:"src"` - Buckets []int `json:"buckets"` - Attempts int32 `json:"attempts"` - BucketType byte `json:"bucket_type"` - LastAttempt time.Time `json:"last_attempt"` - LastSuccess time.Time `json:"last_success"` - LastBanTime time.Time `json:"last_ban_time"` -} - -func newKnownAddress(addr *p2p.NetAddress, src *p2p.NetAddress) *knownAddress { - return &knownAddress{ - Addr: addr, - Src: src, - Attempts: 0, - LastAttempt: time.Now(), - BucketType: bucketTypeNew, - Buckets: nil, - } -} - -func (ka *knownAddress) ID() types.NodeID { - return ka.Addr.ID -} - -func (ka *knownAddress) isOld() bool { - return ka.BucketType == bucketTypeOld -} - -func (ka *knownAddress) isNew() bool { - return ka.BucketType == bucketTypeNew -} - -func (ka *knownAddress) markAttempt() { - now := time.Now() - ka.LastAttempt = now - ka.Attempts++ -} - -func (ka *knownAddress) markGood() { - now := time.Now() - ka.LastAttempt = now - ka.Attempts = 0 - ka.LastSuccess = now -} - -func (ka *knownAddress) ban(banTime time.Duration) { - if ka.LastBanTime.Before(time.Now().Add(banTime)) { - ka.LastBanTime = time.Now().Add(banTime) - } -} - -func (ka *knownAddress) isBanned() bool { - return ka.LastBanTime.After(time.Now()) -} - -func (ka *knownAddress) addBucketRef(bucketIdx int) int { - for _, bucket := range ka.Buckets { - if bucket == bucketIdx { - // TODO refactor to return error? - // log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka)) - return -1 - } - } - ka.Buckets = append(ka.Buckets, bucketIdx) - return len(ka.Buckets) -} - -func (ka *knownAddress) removeBucketRef(bucketIdx int) int { - buckets := []int{} - for _, bucket := range ka.Buckets { - if bucket != bucketIdx { - buckets = append(buckets, bucket) - } - } - if len(buckets) != len(ka.Buckets)-1 { - // TODO refactor to return error? - // log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka)) - return -1 - } - ka.Buckets = buckets - return len(ka.Buckets) -} - -/* - An address is bad if the address in question is a New address, has not been tried in the last - minute, and meets one of the following criteria: - - 1) It claims to be from the future - 2) It hasn't been seen in over a week - 3) It has failed at least three times and never succeeded - 4) It has failed ten times in the last week - - All addresses that meet these criteria are assumed to be worthless and not - worth keeping hold of. - -*/ -func (ka *knownAddress) isBad() bool { - // Is Old --> good - if ka.BucketType == bucketTypeOld { - return false - } - - // Has been attempted in the last minute --> good - if ka.LastAttempt.After(time.Now().Add(-1 * time.Minute)) { - return false - } - - // TODO: From the future? - - // Too old? - // TODO: should be a timestamp of last seen, not just last attempt - if ka.LastAttempt.Before(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) { - return true - } - - // Never succeeded? - if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries { - return true - } - - // Hasn't succeeded in too long? - if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) && - ka.Attempts >= maxFailures { - return true - } - - return false -} diff --git a/internal/p2p/pex/params.go b/internal/p2p/pex/params.go deleted file mode 100644 index 29b4d45ab2..0000000000 --- a/internal/p2p/pex/params.go +++ /dev/null @@ -1,55 +0,0 @@ -package pex - -import "time" - -const ( - // addresses under which the address manager will claim to need more addresses. - needAddressThreshold = 1000 - - // interval used to dump the address cache to disk for future use. - dumpAddressInterval = time.Minute * 2 - - // max addresses in each old address bucket. - oldBucketSize = 64 - - // buckets we split old addresses over. - oldBucketCount = 64 - - // max addresses in each new address bucket. - newBucketSize = 64 - - // buckets that we spread new addresses over. - newBucketCount = 256 - - // old buckets over which an address group will be spread. - oldBucketsPerGroup = 4 - - // new buckets over which a source address group will be spread. - newBucketsPerGroup = 32 - - // buckets a frequently seen new address may end up in. - maxNewBucketsPerAddress = 4 - - // days before which we assume an address has vanished - // if we have not seen it announced in that long. - numMissingDays = 7 - - // tries without a single success before we assume an address is bad. - numRetries = 3 - - // max failures we will accept without a success before considering an address bad. - maxFailures = 10 // ? - - // days since the last success before we will consider evicting an address. - minBadDays = 7 - - // % of total addresses known returned by GetSelection. - getSelectionPercent = 23 - - // min addresses that must be returned by GetSelection. Useful for bootstrapping. - minGetSelection = 32 - - // max addresses returned by GetSelection - // NOTE: this must match "maxMsgSize" - maxGetSelection = 250 -) diff --git a/internal/p2p/pex/pex_reactor.go b/internal/p2p/pex/pex_reactor.go deleted file mode 100644 index fa5319bfb9..0000000000 --- a/internal/p2p/pex/pex_reactor.go +++ /dev/null @@ -1,886 +0,0 @@ -package pex - -import ( - "errors" - "fmt" - "math/rand" - "net" - "sync" - "time" - - "github.com/gogo/protobuf/proto" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/cmap" - tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -type Peer = p2p.Peer - -const ( - // PexChannel is a channel for PEX messages - PexChannel = byte(0x00) - - // over-estimate of max NetAddress size - // hexID (40) + IP (16) + Port (2) + Name (100) ... - // NOTE: dont use massive DNS name .. - maxAddressSize = 256 - - // NOTE: amplificaiton factor! - // small request results in up to maxMsgSize response - maxMsgSize = maxAddressSize * maxGetSelection - - // ensure we have enough peers - defaultEnsurePeersPeriod = 30 * time.Second - - // Seed/Crawler constants - - // minTimeBetweenCrawls is a minimum time between attempts to crawl a peer. - minTimeBetweenCrawls = 2 * time.Minute - - // check some peers every this - crawlPeerPeriod = 30 * time.Second - - // try to connect to at least 1 peer every this - seedConnectMaxDelayPeriod = 5 * time.Second - // limit number of retries to dial other seeds during initialization - seedInitMaxAttemptToDial = 12 - - maxAttemptsToDial = 16 // ~ 35h in total (last attempt - 18h) - - // if node connects to seed, it does not have any trusted peers. - // Especially in the beginning, node should have more trusted peers than - // untrusted. - biasToSelectNewPeers = 30 // 70 to select good peers - - // if a peer is marked bad, it will be banned for at least this time period - defaultBanTime = 24 * time.Hour -) - -type errMaxAttemptsToDial struct { -} - -func (e errMaxAttemptsToDial) Error() string { - return fmt.Sprintf("reached max attempts %d to dial", maxAttemptsToDial) -} - -type errTooEarlyToDial struct { - backoffDuration time.Duration - lastDialed time.Time -} - -func (e errTooEarlyToDial) Error() string { - return fmt.Sprintf( - "too early to dial (backoff duration: %d, last dialed: %v, time since: %v)", - e.backoffDuration, e.lastDialed, time.Since(e.lastDialed)) -} - -// Reactor handles PEX (peer exchange) and ensures that an -// adequate number of peers are connected to the switch. -// -// It uses `AddrBook` (address book) to store `NetAddress`es of the peers. -// -// ## Preventing abuse -// -// Only accept pexAddrsMsg from peers we sent a corresponding pexRequestMsg too. -// Only accept one pexRequestMsg every ~defaultEnsurePeersPeriod. -type Reactor struct { - p2p.BaseReactor - - book AddrBook - config *ReactorConfig - ensurePeersPeriod time.Duration // TODO: should go in the config - - // maps to prevent abuse - requestsSent *cmap.CMap // ID->struct{}: unanswered send requests - lastReceivedRequests *cmap.CMap // ID->time.Time: last time peer requested from us - - seedAddrs []*p2p.NetAddress - - attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)} - - // seed/crawled mode fields - crawlPeerInfos map[types.NodeID]crawlPeerInfo -} - -func (r *Reactor) minReceiveRequestInterval() time.Duration { - // NOTE: must be less than ensurePeersPeriod, otherwise we'll request - // peers too quickly from others and they'll think we're bad! - return r.ensurePeersPeriod / 3 -} - -// ReactorConfig holds reactor specific configuration data. -type ReactorConfig struct { - // Seed/Crawler mode - SeedMode bool - - // We want seeds to only advertise good peers. Therefore they should wait at - // least as long as we expect it to take for a peer to become good before - // disconnecting. - SeedDisconnectWaitPeriod time.Duration - - // Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) - PersistentPeersMaxDialPeriod time.Duration - - // Seeds is a list of addresses reactor may use - // if it can't connect to peers in the addrbook. - Seeds []string -} - -type _attemptsToDial struct { - number int - lastDialed time.Time -} - -// NewReactor creates new PEX reactor. -func NewReactor(b AddrBook, config *ReactorConfig) *Reactor { - r := &Reactor{ - book: b, - config: config, - ensurePeersPeriod: defaultEnsurePeersPeriod, - requestsSent: cmap.NewCMap(), - lastReceivedRequests: cmap.NewCMap(), - crawlPeerInfos: make(map[types.NodeID]crawlPeerInfo), - } - r.BaseReactor = *p2p.NewBaseReactor("PEX", r) - return r -} - -// OnStart implements BaseService -func (r *Reactor) OnStart() error { - err := r.book.Start() - if err != nil && err != service.ErrAlreadyStarted { - return err - } - - numOnline, seedAddrs, err := r.checkSeeds() - if err != nil { - return err - } else if numOnline == 0 && r.book.Empty() { - return errors.New("address book is empty and couldn't resolve any seed nodes") - } - - r.seedAddrs = seedAddrs - - // Check if this node should run - // in seed/crawler mode - if r.config.SeedMode { - go r.crawlPeersRoutine() - } else { - go r.ensurePeersRoutine() - } - return nil -} - -// OnStop implements BaseService -func (r *Reactor) OnStop() { - if err := r.book.Stop(); err != nil { - r.Logger.Error("Error stopping address book", "err", err) - } -} - -// GetChannels implements Reactor -func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { - return []*conn.ChannelDescriptor{ - { - ID: PexChannel, - Priority: 1, - SendQueueCapacity: 10, - RecvMessageCapacity: maxMsgSize, - - MaxSendBytes: 200, - }, - } -} - -// AddPeer implements Reactor by adding peer to the address book (if inbound) -// or by requesting more addresses (if outbound). -func (r *Reactor) AddPeer(p Peer) { - if p.IsOutbound() { - // For outbound peers, the address is already in the books - - // either via DialPeersAsync or r.Receive. - // Ask it for more peers if we need. - if r.book.NeedMoreAddrs() { - r.RequestAddrs(p) - } - } else { - // inbound peer is its own source - addr, err := p.NodeInfo().NetAddress() - if err != nil { - r.Logger.Error("Failed to get peer NetAddress", "err", err, "peer", p) - return - } - - // Make it explicit that addr and src are the same for an inbound peer. - src := addr - - // add to book. dont RequestAddrs right away because - // we don't trust inbound as much - let ensurePeersRoutine handle it. - err = r.book.AddAddress(addr, src) - r.logErrAddrBook(err) - } -} - -// RemovePeer implements Reactor by resetting peer's requests info. -func (r *Reactor) RemovePeer(p Peer, reason interface{}) { - id := string(p.ID()) - r.requestsSent.Delete(id) - r.lastReceivedRequests.Delete(id) -} - -func (r *Reactor) logErrAddrBook(err error) { - if err != nil { - switch err.(type) { - case ErrAddrBookNilAddr: - r.Logger.Error("Failed to add new address", "err", err) - default: - // non-routable, self, full book, private, etc. - r.Logger.Debug("Failed to add new address", "err", err) - } - } -} - -// Receive implements Reactor by handling incoming PEX messages. -// XXX: do not call any methods that can block or incur heavy processing. -// https://github.com/tendermint/tendermint/issues/2888 -func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { - msg, err := decodeMsg(msgBytes) - if err != nil { - r.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - r.Switch.StopPeerForError(src, err) - return - } - // r.Logger.Debug("Received message", "src", src, "chId", chID, "msg", msg) - - switch msg := msg.(type) { - case *tmp2p.PexRequest: - - // NOTE: this is a prime candidate for amplification attacks, - // so it's important we - // 1) restrict how frequently peers can request - // 2) limit the output size - - // If we're a seed and this is an inbound peer, - // respond once and disconnect. - if r.config.SeedMode && !src.IsOutbound() { - id := string(src.ID()) - v := r.lastReceivedRequests.Get(id) - if v != nil { - // FlushStop/StopPeer are already - // running in a go-routine. - return - } - r.lastReceivedRequests.Set(id, time.Now()) - - // Send addrs and disconnect - r.SendAddrs(src, r.book.GetSelectionWithBias(biasToSelectNewPeers)) - go func() { - // In a go-routine so it doesn't block .Receive. - src.FlushStop() - r.Switch.StopPeerGracefully(src) - }() - - } else { - // Check we're not receiving requests too frequently. - if err := r.receiveRequest(src); err != nil { - r.Switch.StopPeerForError(src, err) - r.book.MarkBad(src.SocketAddr(), defaultBanTime) - return - } - r.SendAddrs(src, r.book.GetSelection()) - } - - case *tmp2p.PexResponse: - // If we asked for addresses, add them to the book - addrs, err := NetAddressesFromProto(msg.Addresses) - if err != nil { - r.Switch.StopPeerForError(src, err) - r.book.MarkBad(src.SocketAddr(), defaultBanTime) - return - } - err = r.ReceiveAddrs(addrs, src) - if err != nil { - r.Switch.StopPeerForError(src, err) - if err == ErrUnsolicitedList { - r.book.MarkBad(src.SocketAddr(), defaultBanTime) - } - return - } - - default: - r.Logger.Error(fmt.Sprintf("Unknown message type %T", msg)) - } -} - -// enforces a minimum amount of time between requests -func (r *Reactor) receiveRequest(src Peer) error { - id := string(src.ID()) - v := r.lastReceivedRequests.Get(id) - if v == nil { - // initialize with empty time - lastReceived := time.Time{} - r.lastReceivedRequests.Set(id, lastReceived) - return nil - } - - lastReceived := v.(time.Time) - if lastReceived.Equal(time.Time{}) { - // first time gets a free pass. then we start tracking the time - lastReceived = time.Now() - r.lastReceivedRequests.Set(id, lastReceived) - return nil - } - - now := time.Now() - minInterval := r.minReceiveRequestInterval() - if now.Sub(lastReceived) < minInterval { - return fmt.Errorf( - "peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting", - src.ID(), - lastReceived, - now, - minInterval, - ) - } - r.lastReceivedRequests.Set(id, now) - return nil -} - -// RequestAddrs asks peer for more addresses if we do not already have a -// request out for this peer. -func (r *Reactor) RequestAddrs(p Peer) { - id := string(p.ID()) - if _, exists := r.requestsSent.GetOrSet(id, struct{}{}); exists { - return - } - r.Logger.Debug("Request addrs", "from", p) - p.Send(PexChannel, mustEncode(&tmp2p.PexRequest{})) -} - -// ReceiveAddrs adds the given addrs to the addrbook if theres an open -// request for this peer and deletes the open request. -// If there's no open request for the src peer, it returns an error. -func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { - id := string(src.ID()) - if !r.requestsSent.Has(id) { - return ErrUnsolicitedList - } - r.requestsSent.Delete(id) - - srcAddr, err := src.NodeInfo().NetAddress() - if err != nil { - return err - } - - srcIsSeed := false - for _, seedAddr := range r.seedAddrs { - if seedAddr.Equals(srcAddr) { - srcIsSeed = true - break - } - } - - for _, netAddr := range addrs { - // NOTE: we check netAddr validity and routability in book#AddAddress. - err = r.book.AddAddress(netAddr, srcAddr) - if err != nil { - r.logErrAddrBook(err) - // XXX: should we be strict about incoming data and disconnect from a - // peer here too? - continue - } - - // If this address came from a seed node, try to connect to it without - // waiting (#2093) - if srcIsSeed { - r.Logger.Info("Will dial address, which came from seed", "addr", netAddr, "seed", srcAddr) - go func(addr *p2p.NetAddress) { - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial, p2p.ErrCurrentlyDialingOrExistingAddress: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Error(err.Error(), "addr", addr) - } - } - }(netAddr) - } - } - - return nil -} - -// SendAddrs sends addrs to the peer. -func (r *Reactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) { - p.Send(PexChannel, mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto(netAddrs)})) -} - -// SetEnsurePeersPeriod sets period to ensure peers connected. -func (r *Reactor) SetEnsurePeersPeriod(d time.Duration) { - r.ensurePeersPeriod = d -} - -// Ensures that sufficient peers are connected. (continuous) -func (r *Reactor) ensurePeersRoutine() { - var ( - seed = tmrand.NewRand() - jitter = seed.Int63n(r.ensurePeersPeriod.Nanoseconds()) - ) - - // Randomize first round of communication to avoid thundering herd. - // If no peers are present directly start connecting so we guarantee swift - // setup with the help of configured seeds. - if r.nodeHasSomePeersOrDialingAny() { - time.Sleep(time.Duration(jitter)) - } - - // fire once immediately. - // ensures we dial the seeds right away if the book is empty - r.ensurePeers() - - // fire periodically - ticker := time.NewTicker(r.ensurePeersPeriod) - for { - select { - case <-ticker.C: - r.ensurePeers() - case <-r.Quit(): - ticker.Stop() - return - } - } -} - -// ensurePeers ensures that sufficient peers are connected. (once) -// -// heuristic that we haven't perfected yet, or, perhaps is manually edited by -// the node operator. It should not be used to compute what addresses are -// already connected or not. -func (r *Reactor) ensurePeers() { - var ( - out, in, dial = r.Switch.NumPeers() - numToDial = r.Switch.MaxNumOutboundPeers() - (out + dial) - ) - r.Logger.Info( - "Ensure peers", - "numOutPeers", out, - "numInPeers", in, - "numDialing", dial, - "numToDial", numToDial, - ) - - if numToDial <= 0 { - return - } - - // bias to prefer more vetted peers when we have fewer connections. - // not perfect, but somewhate ensures that we prioritize connecting to more-vetted - // NOTE: range here is [10, 90]. Too high ? - newBias := tmmath.MinInt(out, 8)*10 + 10 - - toDial := make(map[types.NodeID]*p2p.NetAddress) - // Try maxAttempts times to pick numToDial addresses to dial - maxAttempts := numToDial * 3 - - for i := 0; i < maxAttempts && len(toDial) < numToDial; i++ { - try := r.book.PickAddress(newBias) - if try == nil { - continue - } - if _, selected := toDial[try.ID]; selected { - continue - } - if r.Switch.IsDialingOrExistingAddress(try) { - continue - } - // TODO: consider moving some checks from toDial into here - // so we don't even consider dialing peers that we want to wait - // before dialing again, or have dialed too many times already - r.Logger.Info("Will dial address", "addr", try) - toDial[try.ID] = try - } - - // Dial picked addresses - for _, addr := range toDial { - go func(addr *p2p.NetAddress) { - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Error(err.Error(), "addr", addr) - } - } - }(addr) - } - - if r.book.NeedMoreAddrs() { - // Check if banned nodes can be reinstated - r.book.ReinstateBadPeers() - } - - if r.book.NeedMoreAddrs() { - - // 1) Pick a random peer and ask for more. - peers := r.Switch.Peers().List() - peersCount := len(peers) - if peersCount > 0 { - rand := tmrand.NewRand() - peer := peers[rand.Int()%peersCount] - r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer) - r.RequestAddrs(peer) - } - - // 2) Dial seeds if we are not dialing anyone. - // This is done in addition to asking a peer for addresses to work-around - // peers not participating in PEX. - if len(toDial) == 0 { - r.Logger.Info("No addresses to dial. Falling back to seeds") - r.dialSeeds() - } - } -} - -func (r *Reactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastDialed time.Time) { - _attempts, ok := r.attemptsToDial.Load(addr.DialString()) - if !ok { - return - } - atd := _attempts.(_attemptsToDial) - return atd.number, atd.lastDialed -} - -func (r *Reactor) dialPeer(addr *p2p.NetAddress) error { - attempts, lastDialed := r.dialAttemptsInfo(addr) - if !r.Switch.IsPeerPersistent(addr) && attempts > maxAttemptsToDial { - r.book.MarkBad(addr, defaultBanTime) - return errMaxAttemptsToDial{} - } - - // exponential backoff if it's not our first attempt to dial given address - if attempts > 0 { - rand := tmrand.NewRand() - jitter := time.Duration(rand.Float64() * float64(time.Second)) // 1s == (1e9 ns) - backoffDuration := jitter + ((1 << uint(attempts)) * time.Second) - backoffDuration = r.maxBackoffDurationForPeer(addr, backoffDuration) - sinceLastDialed := time.Since(lastDialed) - if sinceLastDialed < backoffDuration { - return errTooEarlyToDial{backoffDuration, lastDialed} - } - } - - err := r.Switch.DialPeerWithAddress(addr) - if err != nil { - if _, ok := err.(p2p.ErrCurrentlyDialingOrExistingAddress); ok { - return err - } - - markAddrInBookBasedOnErr(addr, r.book, err) - switch err.(type) { - case p2p.ErrSwitchAuthenticationFailure: - // NOTE: addr is removed from addrbook in markAddrInBookBasedOnErr - r.attemptsToDial.Delete(addr.DialString()) - default: - r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()}) - } - return fmt.Errorf("dialing failed (attempts: %d): %w", attempts+1, err) - } - - // cleanup any history - r.attemptsToDial.Delete(addr.DialString()) - r.Logger.Debug("dialing success", "address", addr.DialString()) - return nil -} - -// maxBackoffDurationForPeer caps the backoff duration for persistent peers. -func (r *Reactor) maxBackoffDurationForPeer(addr *p2p.NetAddress, planned time.Duration) time.Duration { - if r.config.PersistentPeersMaxDialPeriod > 0 && - planned > r.config.PersistentPeersMaxDialPeriod && - r.Switch.IsPeerPersistent(addr) { - return r.config.PersistentPeersMaxDialPeriod - } - return planned -} - -// checkSeeds checks that addresses are well formed. -// Returns number of seeds we can connect to, along with all seeds addrs. -// return err if user provided any badly formatted seed addresses. -// Doesn't error if the seed node can't be reached. -// numOnline returns -1 if no seed nodes were in the initial configuration. -func (r *Reactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, err error) { - lSeeds := len(r.config.Seeds) - if lSeeds == 0 { - return -1, nil, nil - } - netAddrs, errs := p2p.NewNetAddressStrings(r.config.Seeds) - numOnline = lSeeds - len(errs) - for _, err := range errs { - switch e := err.(type) { - case types.ErrNetAddressLookup: - r.Logger.Error("Connecting to seed failed", "err", e) - default: - return 0, nil, fmt.Errorf("seed node configuration has error: %w", e) - } - } - return numOnline, netAddrs, nil -} - -// randomly dial seeds until we connect to one or exhaust them -func (r *Reactor) dialSeeds() bool { - rand := tmrand.NewRand() - perm := rand.Perm(len(r.seedAddrs)) - // perm := r.Switch.rng.Perm(lSeeds) - for _, i := range perm { - // dial a random seed - seedAddr := r.seedAddrs[i] - err := r.Switch.DialPeerWithAddress(seedAddr) - - switch err.(type) { - case nil, p2p.ErrCurrentlyDialingOrExistingAddress: - return true - } - r.Switch.Logger.Error("Error dialing seed", "err", err, "seed", seedAddr) - } - // do not write error message if there were no seeds specified in config - if len(r.seedAddrs) > 0 { - r.Switch.Logger.Error("Couldn't connect to any seeds") - } - return false -} - -// AttemptsToDial returns the number of attempts to dial specific address. It -// returns 0 if never attempted or successfully connected. -func (r *Reactor) AttemptsToDial(addr *p2p.NetAddress) int { - lAttempts, attempted := r.attemptsToDial.Load(addr.DialString()) - if attempted { - return lAttempts.(_attemptsToDial).number - } - return 0 -} - -//---------------------------------------------------------- - -// Explores the network searching for more peers. (continuous) -// Seed/Crawler Mode causes this node to quickly disconnect -// from peers, except other seed nodes. -func (r *Reactor) crawlPeersRoutine() { - // If we have any seed nodes, consult them first - if len(r.seedAddrs) > 0 { - for try := 0; try < seedInitMaxAttemptToDial; try++ { - // we sleep a few (random) secs to avoid connection storm when whole network restarts - delay := time.Duration(rand.Int63n(seedConnectMaxDelayPeriod.Nanoseconds())) // nolint:gosec - time.Sleep(delay) - - if r.dialSeeds() { - break - } - } - } else { - // Do an initial crawl - r.crawlPeers(r.book.GetSelection()) - } - - // Fire periodically - ticker := time.NewTicker(crawlPeerPeriod) - - for { - select { - case <-ticker.C: - r.attemptDisconnects() - - // If we got disconnected from the whole network, we reconnect to seeds - out, in, dial := r.Switch.NumPeers() - if out+in+dial < 1 { - r.Logger.Info("All peers disconnected, dialing seeds") - r.dialSeeds() - } - - r.crawlPeers(r.book.GetSelection()) - r.cleanupCrawlPeerInfos() - case <-r.Quit(): - return - } - } -} - -// nodeHasSomePeersOrDialingAny returns true if the node is connected to some -// peers or dialing them currently. -func (r *Reactor) nodeHasSomePeersOrDialingAny() bool { - out, in, dial := r.Switch.NumPeers() - return out+in+dial > 0 -} - -// crawlPeerInfo handles temporary data needed for the network crawling -// performed during seed/crawler mode. -type crawlPeerInfo struct { - Addr *p2p.NetAddress `json:"addr"` - // The last time we crawled the peer or attempted to do so. - LastCrawled time.Time `json:"last_crawled"` -} - -// crawlPeers will crawl the network looking for new peer addresses. -func (r *Reactor) crawlPeers(addrs []*p2p.NetAddress) { - now := time.Now() - - for _, addr := range addrs { - peerInfo, ok := r.crawlPeerInfos[addr.ID] - - // Do not attempt to connect with peers we recently crawled. - if ok && now.Sub(peerInfo.LastCrawled) < minTimeBetweenCrawls { - continue - } - - // Record crawling attempt. - r.crawlPeerInfos[addr.ID] = crawlPeerInfo{ - Addr: addr, - LastCrawled: now, - } - - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial, p2p.ErrCurrentlyDialingOrExistingAddress: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Error(err.Error(), "addr", addr) - } - continue - } - - peer := r.Switch.Peers().Get(addr.ID) - if peer != nil { - r.RequestAddrs(peer) - } - } -} - -func (r *Reactor) cleanupCrawlPeerInfos() { - for id, info := range r.crawlPeerInfos { - // If we did not crawl a peer for 24 hours, it means the peer was removed - // from the addrbook => remove - // - // 10000 addresses / maxGetSelection = 40 cycles to get all addresses in - // the ideal case, - // 40 * crawlPeerPeriod ~ 20 minutes - if time.Since(info.LastCrawled) > 24*time.Hour { - delete(r.crawlPeerInfos, id) - } - } -} - -// attemptDisconnects checks if we've been with each peer long enough to disconnect -func (r *Reactor) attemptDisconnects() { - for _, peer := range r.Switch.Peers().List() { - if peer.Status().Duration < r.config.SeedDisconnectWaitPeriod { - continue - } - if peer.IsPersistent() { - continue - } - r.Switch.StopPeerGracefully(peer) - } -} - -func markAddrInBookBasedOnErr(addr *p2p.NetAddress, book AddrBook, err error) { - // TODO: detect more "bad peer" scenarios - switch err.(type) { - case p2p.ErrSwitchAuthenticationFailure: - book.MarkBad(addr, defaultBanTime) - default: - book.MarkAttempt(addr) - } -} - -//----------------------------------------------------------------------------- -// Messages - -// mustEncode proto encodes a tmp2p.Message -func mustEncode(pb proto.Message) []byte { - msg := tmp2p.PexMessage{} - switch pb := pb.(type) { - case *tmp2p.PexRequest: - msg.Sum = &tmp2p.PexMessage_PexRequest{PexRequest: pb} - case *tmp2p.PexResponse: - msg.Sum = &tmp2p.PexMessage_PexResponse{PexResponse: pb} - default: - panic(fmt.Sprintf("Unknown message type %T", pb)) - } - - bz, err := msg.Marshal() - if err != nil { - panic(fmt.Errorf("unable to marshal %T: %w", pb, err)) - } - return bz -} - -func decodeMsg(bz []byte) (proto.Message, error) { - pb := &tmp2p.PexMessage{} - - err := pb.Unmarshal(bz) - if err != nil { - return nil, err - } - - switch msg := pb.Sum.(type) { - case *tmp2p.PexMessage_PexRequest: - return msg.PexRequest, nil - case *tmp2p.PexMessage_PexResponse: - return msg.PexResponse, nil - default: - return nil, fmt.Errorf("unknown message: %T", msg) - } -} - -//----------------------------------------------------------------------------- -// address converters - -// NetAddressFromProto converts a Protobuf PexAddress into a native struct. -func NetAddressFromProto(pb tmp2p.PexAddress) (*types.NetAddress, error) { - ip := net.ParseIP(pb.IP) - if ip == nil { - return nil, fmt.Errorf("invalid IP address %v", pb.IP) - } - if pb.Port >= 1<<16 { - return nil, fmt.Errorf("invalid port number %v", pb.Port) - } - return &types.NetAddress{ - ID: types.NodeID(pb.ID), - IP: ip, - Port: uint16(pb.Port), - }, nil -} - -// NetAddressesFromProto converts a slice of Protobuf PexAddresses into a native slice. -func NetAddressesFromProto(pbs []tmp2p.PexAddress) ([]*types.NetAddress, error) { - nas := make([]*types.NetAddress, 0, len(pbs)) - for _, pb := range pbs { - na, err := NetAddressFromProto(pb) - if err != nil { - return nil, err - } - nas = append(nas, na) - } - return nas, nil -} - -// NetAddressesToProto converts a slice of NetAddresses into a Protobuf PexAddress slice. -func NetAddressesToProto(nas []*types.NetAddress) []tmp2p.PexAddress { - pbs := make([]tmp2p.PexAddress, 0, len(nas)) - for _, na := range nas { - if na != nil { - pbs = append(pbs, tmp2p.PexAddress{ - ID: string(na.ID), - IP: na.IP.String(), - Port: uint32(na.Port), - }) - } - } - return pbs -} diff --git a/internal/p2p/pex/pex_reactor_test.go b/internal/p2p/pex/pex_reactor_test.go deleted file mode 100644 index e2e32b4046..0000000000 --- a/internal/p2p/pex/pex_reactor_test.go +++ /dev/null @@ -1,682 +0,0 @@ -package pex - -import ( - "encoding/hex" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" - - "github.com/gogo/protobuf/proto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/mock" - "github.com/tendermint/tendermint/libs/log" - tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -var ( - cfg *config.P2PConfig -) - -func init() { - cfg = config.DefaultP2PConfig() - cfg.PexReactor = true - cfg.AllowDuplicateIP = true -} - -func TestPEXReactorBasic(t *testing.T) { - r, _ := createReactor(t, &ReactorConfig{}) - - assert.NotNil(t, r) - assert.NotEmpty(t, r.GetChannels()) -} - -func TestPEXReactorAddRemovePeer(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - - size := book.Size() - peer := p2p.CreateRandomPeer(false) - - r.AddPeer(peer) - assert.Equal(t, size+1, book.Size()) - - r.RemovePeer(peer, "peer not available") - - outboundPeer := p2p.CreateRandomPeer(true) - - r.AddPeer(outboundPeer) - assert.Equal(t, size+1, book.Size(), "outbound peers should not be added to the address book") - - r.RemovePeer(outboundPeer, "peer not available") -} - -// --- FAIL: TestPEXReactorRunning (11.10s) -// pex_reactor_test.go:411: expected all switches to be connected to at -// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 => -// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, ) -// -// EXPLANATION: peers are getting rejected because in switch#addPeer we check -// if any peer (who we already connected to) has the same IP. Even though local -// peers have different IP addresses, they all have the same underlying remote -// IP: 127.0.0.1. -// -func TestPEXReactorRunning(t *testing.T) { - N := 3 - switches := make([]*p2p.Switch, N) - - // directory to store address books - dir := tempDir(t) - - books := make([]AddrBook, N) - logger := log.TestingLogger() - - // create switches - for i := 0; i < N; i++ { - switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", nil, func(i int, sw *p2p.Switch) *p2p.Switch { - books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) - books[i].SetLogger(logger.With("pex", i)) - sw.SetAddrBook(books[i]) - - sw.SetLogger(logger.With("pex", i)) - - r := NewReactor(books[i], &ReactorConfig{}) - r.SetLogger(logger.With("pex", i)) - r.SetEnsurePeersPeriod(250 * time.Millisecond) - sw.AddReactor("pex", r) - - return sw - }, logger) - } - - for _, sw := range switches { - err := sw.Start() // start switch and reactors - require.Nil(t, err) - } - - addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) { - addr := switches[otherSwitchIndex].NetAddress() - err := books[switchIndex].AddAddress(addr, addr) - require.NoError(t, err) - } - - addOtherNodeAddrToAddrBook(0, 1) - addOtherNodeAddrToAddrBook(1, 0) - addOtherNodeAddrToAddrBook(2, 1) - - assertPeersWithTimeout(t, switches, 10*time.Millisecond, 10*time.Second, N-1) - - // stop them - for _, s := range switches { - err := s.Stop() - require.NoError(t, err) - } -} - -func TestPEXReactorReceive(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - peer := p2p.CreateRandomPeer(false) - - // we have to send a request to receive responses - r.RequestAddrs(peer) - - size := book.Size() - na, err := peer.NodeInfo().NetAddress() - require.NoError(t, err) - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{na})}) - r.Receive(PexChannel, peer, msg) - assert.Equal(t, size+1, book.Size()) - - msg = mustEncode(&tmp2p.PexRequest{}) - r.Receive(PexChannel, peer, msg) // should not panic. -} - -func TestPEXReactorRequestMessageAbuse(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - sw := createSwitchAndAddReactors(r) - sw.SetAddrBook(book) - - peer := mock.NewPeer(nil) - peerAddr := peer.SocketAddr() - p2p.AddPeerToSwitchPeerSet(sw, peer) - assert.True(t, sw.Peers().Has(peer.ID())) - err := book.AddAddress(peerAddr, peerAddr) - require.NoError(t, err) - require.True(t, book.HasAddress(peerAddr)) - - id := string(peer.ID()) - msg := mustEncode(&tmp2p.PexRequest{}) - - // first time creates the entry - r.Receive(PexChannel, peer, msg) - assert.True(t, r.lastReceivedRequests.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // next time sets the last time value - r.Receive(PexChannel, peer, msg) - assert.True(t, r.lastReceivedRequests.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // third time is too many too soon - peer is removed - r.Receive(PexChannel, peer, msg) - assert.False(t, r.lastReceivedRequests.Has(id)) - assert.False(t, sw.Peers().Has(peer.ID())) - assert.True(t, book.IsBanned(peerAddr)) -} - -func TestPEXReactorAddrsMessageAbuse(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - sw := createSwitchAndAddReactors(r) - sw.SetAddrBook(book) - - peer := mock.NewPeer(nil) - p2p.AddPeerToSwitchPeerSet(sw, peer) - assert.True(t, sw.Peers().Has(peer.ID())) - - id := string(peer.ID()) - - // request addrs from the peer - r.RequestAddrs(peer) - assert.True(t, r.requestsSent.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{peer.SocketAddr()})}) - - // receive some addrs. should clear the request - r.Receive(PexChannel, peer, msg) - assert.False(t, r.requestsSent.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // receiving more unsolicited addrs causes a disconnect and ban - r.Receive(PexChannel, peer, msg) - assert.False(t, sw.Peers().Has(peer.ID())) - assert.True(t, book.IsBanned(peer.SocketAddr())) -} - -func TestCheckSeeds(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - // 1. test creating peer with no seeds works - peerSwitch := testCreateDefaultPeer(dir, 0) - require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests - - // 2. create seed - seed := testCreateSeed(dir, 1, []*p2p.NetAddress{}, []*p2p.NetAddress{}) - - // 3. test create peer with online seed works - peerSwitch = testCreatePeerWithSeed(dir, 2, seed) - require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests - - // 4. test create peer with all seeds having unresolvable DNS fails - badPeerConfig := &ReactorConfig{ - Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", - "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657"}, - } - peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) - require.Error(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests - - // 5. test create peer with one good seed address succeeds - badPeerConfig = &ReactorConfig{ - Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", - "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657", - seed.NetAddress().String()}, - } - peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) - require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests -} - -func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - // 1. create seed - seed := testCreateSeed(dir, 0, []*p2p.NetAddress{}, []*p2p.NetAddress{}) - require.Nil(t, seed.Start()) - t.Cleanup(func() { _ = seed.Stop() }) - - // 2. create usual peer with only seed configured. - peer := testCreatePeerWithSeed(dir, 1, seed) - require.Nil(t, peer.Start()) - t.Cleanup(func() { _ = peer.Stop() }) - - // 3. check that the peer connects to seed immediately - assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 3*time.Second, 1) -} - -func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - // 1. create peer - peerSwitch := testCreateDefaultPeer(dir, 1) - require.Nil(t, peerSwitch.Start()) - t.Cleanup(func() { _ = peerSwitch.Stop() }) - - // 2. Create seed which knows about the peer - peerAddr := peerSwitch.NetAddress() - seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peerAddr}, []*p2p.NetAddress{peerAddr}) - require.Nil(t, seed.Start()) - t.Cleanup(func() { _ = seed.Stop() }) - - // 3. create another peer with only seed configured. - secondPeer := testCreatePeerWithSeed(dir, 3, seed) - require.Nil(t, secondPeer.Start()) - t.Cleanup(func() { _ = secondPeer.Stop() }) - - // 4. check that the second peer connects to seed immediately - assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 3*time.Second, 1) - - // 5. check that the second peer connects to the first peer immediately - assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 1*time.Second, 2) -} - -func TestPEXReactorSeedMode(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond} - pexR, book := createReactor(t, pexRConfig) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - require.NoError(t, sw.Start()) - t.Cleanup(func() { _ = sw.Stop() }) - - assert.Zero(t, sw.Peers().Size()) - - peerSwitch := testCreateDefaultPeer(dir, 1) - require.NoError(t, peerSwitch.Start()) - t.Cleanup(func() { _ = peerSwitch.Stop() }) - - // 1. Test crawlPeers dials the peer - pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) - assert.Equal(t, 1, sw.Peers().Size()) - assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID())) - - // 2. attemptDisconnects should not disconnect because of wait period - pexR.attemptDisconnects() - assert.Equal(t, 1, sw.Peers().Size()) - - // sleep for SeedDisconnectWaitPeriod - time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond) - - // 3. attemptDisconnects should disconnect after wait period - pexR.attemptDisconnects() - assert.Equal(t, 0, sw.Peers().Size()) -} - -func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 1 * time.Millisecond} - pexR, book := createReactor(t, pexRConfig) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - require.NoError(t, sw.Start()) - t.Cleanup(func() { _ = sw.Stop() }) - - assert.Zero(t, sw.Peers().Size()) - - peerSwitch := testCreatePeerWithConfig(dir, 1, pexRConfig) - require.NoError(t, peerSwitch.Start()) - t.Cleanup(func() { _ = peerSwitch.Stop() }) - - require.NoError(t, sw.AddPersistentPeers([]string{peerSwitch.NetAddress().String()})) - - // 1. Test crawlPeers dials the peer - pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) - assert.Equal(t, 1, sw.Peers().Size()) - assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID())) - - // sleep for SeedDisconnectWaitPeriod - time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond) - - // 2. attemptDisconnects should not disconnect because the peer is persistent - pexR.attemptDisconnects() - assert.Equal(t, 1, sw.Peers().Size()) -} - -func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) { - // directory to store address books - pexR, book := createReactor(t, &ReactorConfig{SeedMode: true}) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - // No need to start sw since crawlPeers is called manually here. - - peer := mock.NewPeer(nil) - addr := peer.SocketAddr() - - require.NoError(t, book.AddAddress(addr, addr)) - - assert.True(t, book.HasAddress(addr)) - - // imitate maxAttemptsToDial reached - pexR.attemptsToDial.Store(addr.DialString(), _attemptsToDial{maxAttemptsToDial + 1, time.Now()}) - pexR.crawlPeers([]*p2p.NetAddress{addr}) - - assert.False(t, book.HasAddress(addr)) -} - -// connect a peer to a seed, wait a bit, then stop it. -// this should give it time to request addrs and for the seed -// to call FlushStop, and allows us to test calling Stop concurrently -// with FlushStop. Before a fix, this non-deterministically reproduced -// https://github.com/tendermint/tendermint/issues/3231. -func TestPEXReactorSeedModeFlushStop(t *testing.T) { - t.Skip("flaky test, will be replaced by new P2P stack") - N := 2 - switches := make([]*p2p.Switch, N) - - // directory to store address books - dir := tempDir(t) - - books := make([]AddrBook, N) - logger := log.TestingLogger() - - // create switches - for i := 0; i < N; i++ { - switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", nil, func(i int, sw *p2p.Switch) *p2p.Switch { - books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) - books[i].SetLogger(logger.With("pex", i)) - sw.SetAddrBook(books[i]) - - sw.SetLogger(logger.With("pex", i)) - - config := &ReactorConfig{} - if i == 0 { - // first one is a seed node - config = &ReactorConfig{SeedMode: true} - } - r := NewReactor(books[i], config) - r.SetLogger(logger.With("pex", i)) - r.SetEnsurePeersPeriod(250 * time.Millisecond) - sw.AddReactor("pex", r) - - return sw - }, logger) - } - - for _, sw := range switches { - err := sw.Start() // start switch and reactors - require.Nil(t, err) - } - - reactor := switches[0].Reactors()["pex"].(*Reactor) - peerID := switches[1].NodeInfo().ID() - - assert.NoError(t, switches[1].DialPeerWithAddress(switches[0].NetAddress())) - - // sleep up to a second while waiting for the peer to send us a message. - // this isn't perfect since it's possible the peer sends us a msg and we FlushStop - // before this loop catches it. but non-deterministically it works pretty well. - for i := 0; i < 1000; i++ { - v := reactor.lastReceivedRequests.Get(string(peerID)) - if v != nil { - break - } - time.Sleep(time.Millisecond) - } - - // by now the FlushStop should have happened. Try stopping the peer. - // it should be safe to do this. - peers := switches[0].Peers().List() - for _, peer := range peers { - err := peer.Stop() - require.NoError(t, err) - } - - // stop the switches - for _, s := range switches { - err := s.Stop() - require.NoError(t, err) - } -} - -func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { - peer := p2p.CreateRandomPeer(false) - - pexR, book := createReactor(t, &ReactorConfig{}) - book.AddPrivateIDs([]string{string(peer.NodeInfo().ID())}) - - // we have to send a request to receive responses - pexR.RequestAddrs(peer) - - size := book.Size() - na, err := peer.NodeInfo().NetAddress() - require.NoError(t, err) - - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{na})}) - pexR.Receive(PexChannel, peer, msg) - assert.Equal(t, size, book.Size()) - - pexR.AddPeer(peer) - assert.Equal(t, size, book.Size()) -} - -func TestPEXReactorDialPeer(t *testing.T) { - pexR, book := createReactor(t, &ReactorConfig{}) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - - peer := mock.NewPeer(nil) - addr := peer.SocketAddr() - - assert.Equal(t, 0, pexR.AttemptsToDial(addr)) - - // 1st unsuccessful attempt - err := pexR.dialPeer(addr) - require.Error(t, err) - - assert.Equal(t, 1, pexR.AttemptsToDial(addr)) - - // 2nd unsuccessful attempt - err = pexR.dialPeer(addr) - require.Error(t, err) - - // must be skipped because it is too early - assert.Equal(t, 1, pexR.AttemptsToDial(addr)) - - if !testing.Short() { - time.Sleep(3 * time.Second) - - // 3rd attempt - err = pexR.dialPeer(addr) - require.Error(t, err) - - assert.Equal(t, 2, pexR.AttemptsToDial(addr)) - } -} - -func assertPeersWithTimeout( - t *testing.T, - switches []*p2p.Switch, - checkPeriod, timeout time.Duration, - nPeers int, -) { - var ( - ticker = time.NewTicker(checkPeriod) - remaining = timeout - ) - - for { - select { - case <-ticker.C: - // check peers are connected - allGood := true - for _, s := range switches { - outbound, inbound, _ := s.NumPeers() - if outbound+inbound < nPeers { - allGood = false - break - } - } - remaining -= checkPeriod - if remaining < 0 { - remaining = 0 - } - if allGood { - return - } - case <-time.After(remaining): - numPeersStr := "" - for i, s := range switches { - outbound, inbound, _ := s.NumPeers() - numPeersStr += fmt.Sprintf("%d => {outbound: %d, inbound: %d}, ", i, outbound, inbound) - } - t.Errorf( - "expected all switches to be connected to at least %d peer(s) (switches: %s)", - nPeers, numPeersStr, - ) - return - } - } -} - -// Creates a peer with the provided config -func testCreatePeerWithConfig(dir string, id int, config *ReactorConfig) *p2p.Switch { - peer := p2p.MakeSwitch( - cfg, - id, - "127.0.0.1", - "123.123.123", - nil, - func(i int, sw *p2p.Switch) *p2p.Switch { - book := NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", id)), false) - book.SetLogger(log.TestingLogger()) - sw.SetAddrBook(book) - - sw.SetLogger(log.TestingLogger()) - - r := NewReactor( - book, - config, - ) - r.SetLogger(log.TestingLogger()) - sw.AddReactor("pex", r) - return sw - }, - log.TestingLogger(), - ) - return peer -} - -// Creates a peer with the default config -func testCreateDefaultPeer(dir string, id int) *p2p.Switch { - return testCreatePeerWithConfig(dir, id, &ReactorConfig{}) -} - -// Creates a seed which knows about the provided addresses / source address pairs. -// Starting and stopping the seed is left to the caller -func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) *p2p.Switch { - seed := p2p.MakeSwitch( - cfg, - id, - "127.0.0.1", - "123.123.123", - nil, - func(i int, sw *p2p.Switch) *p2p.Switch { - book := NewAddrBook(filepath.Join(dir, "addrbookSeed.json"), false) - book.SetLogger(log.TestingLogger()) - for j := 0; j < len(knownAddrs); j++ { - book.AddAddress(knownAddrs[j], srcAddrs[j]) // nolint:errcheck // ignore for tests - book.MarkGood(knownAddrs[j].ID) - } - sw.SetAddrBook(book) - - sw.SetLogger(log.TestingLogger()) - - r := NewReactor(book, &ReactorConfig{}) - r.SetLogger(log.TestingLogger()) - sw.AddReactor("pex", r) - return sw - }, - log.TestingLogger(), - ) - return seed -} - -// Creates a peer which knows about the provided seed. -// Starting and stopping the peer is left to the caller -func testCreatePeerWithSeed(dir string, id int, seed *p2p.Switch) *p2p.Switch { - conf := &ReactorConfig{ - Seeds: []string{seed.NetAddress().String()}, - } - return testCreatePeerWithConfig(dir, id, conf) -} - -func createReactor(t *testing.T, conf *ReactorConfig) (r *Reactor, book AddrBook) { - // directory to store address book - book = NewAddrBook(filepath.Join(tempDir(t), "addrbook.json"), true) - book.SetLogger(log.TestingLogger()) - - r = NewReactor(book, conf) - r.SetLogger(log.TestingLogger()) - return -} - -func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { - sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", nil, func(i int, sw *p2p.Switch) *p2p.Switch { - for _, r := range reactors { - sw.AddReactor(r.String(), r) - } - return sw - }, log.TestingLogger()) - return sw -} - -func TestPexVectors(t *testing.T) { - addr := tmp2p.PexAddress{ - ID: "1", - IP: "127.0.0.1", - Port: 9090, - } - - testCases := []struct { - testName string - msg proto.Message - expBytes string - }{ - {"PexRequest", &tmp2p.PexRequest{}, "0a00"}, - {"PexAddrs", &tmp2p.PexResponse{Addresses: []tmp2p.PexAddress{addr}}, "12130a110a013112093132372e302e302e31188247"}, - } - - for _, tc := range testCases { - tc := tc - - bz := mustEncode(tc.msg) - - require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) - } -} - -// FIXME: This function is used in place of testing.TB.TempDir() -// as the latter seems to cause test cases to fail when it is -// unable to remove the temporary directory once the test case -// execution terminates. This seems to happen often with pex -// reactor test cases. -// -// References: -// https://github.com/tendermint/tendermint/pull/5733 -// https://github.com/tendermint/tendermint/issues/5732 -func tempDir(t *testing.T) string { - t.Helper() - dir, err := ioutil.TempDir("", "") - require.NoError(t, err) - t.Cleanup(func() { _ = os.RemoveAll(dir) }) - return dir -} diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index 8cff2f95b8..bd47373265 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -3,35 +3,44 @@ package pex import ( "context" "fmt" - "runtime/debug" "sync" "time" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/libs/service" protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p" "github.com/tendermint/tendermint/types" ) var ( - _ service.Service = (*ReactorV2)(nil) + _ service.Service = (*Reactor)(nil) _ p2p.Wrapper = (*protop2p.PexMessage)(nil) ) -// TODO: Consolidate with params file. -// See https://github.com/tendermint/tendermint/issues/6371 const ( + // PexChannel is a channel for PEX messages + PexChannel = 0x00 + + // over-estimate of max NetAddress size + // hexID (40) + IP (16) + Port (2) + Name (100) ... + // NOTE: dont use massive DNS name .. + maxAddressSize = 256 + + // max addresses returned by GetSelection + // NOTE: this must match "maxMsgSize" + maxGetSelection = 250 + + // NOTE: amplification factor! + // small request results in up to maxMsgSize response + maxMsgSize = maxAddressSize * maxGetSelection + // the minimum time one peer can send another request to the same peer minReceiveRequestInterval = 100 * time.Millisecond // the maximum amount of addresses that can be included in a response - maxAddresses uint16 = 100 - - // allocated time to resolve a node address into a set of endpoints - resolveTimeout = 3 * time.Second + maxAddresses = 100 // How long to wait when there are no peers available before trying again noAvailablePeersWaitPeriod = 1 * time.Second @@ -46,22 +55,18 @@ const ( // within each reactor (as they are now) or, considering that the reactor doesn't // really need to care about the channel descriptors, if they should be housed // in the node module. -func ChannelDescriptor() conn.ChannelDescriptor { - return conn.ChannelDescriptor{ +func ChannelDescriptor() *conn.ChannelDescriptor { + return &conn.ChannelDescriptor{ ID: PexChannel, + MessageType: new(protop2p.PexMessage), Priority: 1, SendQueueCapacity: 10, RecvMessageCapacity: maxMsgSize, - RecvBufferCapacity: 32, - MaxSendBytes: 200, + RecvBufferCapacity: 128, + Name: "pex", } } -// ReactorV2 is a PEX reactor for the new P2P stack. The legacy reactor -// is Reactor. -// -// FIXME: Rename this when Reactor is removed, and consider moving to p2p/. -// // The peer exchange or PEX reactor supports the peer manager by sending // requests to other peers for addresses that can be given to the peer manager // and at the same time advertises addresses to peers that need more. @@ -70,14 +75,13 @@ func ChannelDescriptor() conn.ChannelDescriptor { // increasing the interval between each request. It tracks connected peers via // a linked list, sending a request to the node at the front of the list and // adding it to the back of the list once a response is received. -type ReactorV2 struct { +type Reactor struct { service.BaseService + logger log.Logger peerManager *p2p.PeerManager - pexCh *p2p.Channel - peerUpdates *p2p.PeerUpdates - closeCh chan struct{} - + chCreator p2p.ChannelCreator + peerEvents p2p.PeerEventSubscriber // list of available peers to loop through and send peer requests to availablePeers map[types.NodeID]struct{} @@ -94,33 +98,22 @@ type ReactorV2 struct { // minReceiveRequestInterval). lastReceivedRequests map[types.NodeID]time.Time - // the time when another request will be sent - nextRequestTime time.Time - - // keep track of how many new peers to existing peers we have received to - // extrapolate the size of the network - newPeers uint32 - totalPeers uint32 - - // discoveryRatio is the inverse ratio of new peers to old peers squared. - // This is multiplied by the minimum duration to calculate how long to wait - // between each request. - discoveryRatio float32 + // the total number of unique peers added + totalPeers int } // NewReactor returns a reference to a new reactor. -func NewReactorV2( +func NewReactor( logger log.Logger, peerManager *p2p.PeerManager, - pexCh *p2p.Channel, - peerUpdates *p2p.PeerUpdates, -) *ReactorV2 { - - r := &ReactorV2{ + channelCreator p2p.ChannelCreator, + peerEvents p2p.PeerEventSubscriber, +) *Reactor { + r := &Reactor{ + logger: logger, peerManager: peerManager, - pexCh: pexCh, - peerUpdates: peerUpdates, - closeCh: make(chan struct{}), + chCreator: channelCreator, + peerEvents: peerEvents, availablePeers: make(map[types.NodeID]struct{}), requestsSent: make(map[types.NodeID]struct{}), lastReceivedRequests: make(map[types.NodeID]time.Time), @@ -134,50 +127,82 @@ func NewReactorV2( // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. -func (r *ReactorV2) OnStart() error { - go r.processPexCh() - go r.processPeerUpdates() +func (r *Reactor) OnStart(ctx context.Context) error { + channel, err := r.chCreator(ctx, ChannelDescriptor()) + if err != nil { + return err + } + + peerUpdates := r.peerEvents(ctx) + go r.processPexCh(ctx, channel) + go r.processPeerUpdates(ctx, peerUpdates) return nil } // OnStop stops the reactor by signaling to all spawned goroutines to exit and // blocking until they all exit. -func (r *ReactorV2) OnStop() { - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - // Wait for all p2p Channels to be closed before returning. This ensures we - // can easily reason about synchronization of all p2p Channels and ensure no - // panics will occur. - <-r.pexCh.Done() - <-r.peerUpdates.Done() -} +func (r *Reactor) OnStop() {} // processPexCh implements a blocking event loop where we listen for p2p // Envelope messages from the pexCh. -func (r *ReactorV2) processPexCh() { - defer r.pexCh.Close() +func (r *Reactor) processPexCh(ctx context.Context, pexCh *p2p.Channel) { + incoming := make(chan *p2p.Envelope) + go func() { + defer close(incoming) + iter := pexCh.Receive(ctx) + for iter.Next(ctx) { + select { + case <-ctx.Done(): + return + case incoming <- iter.Envelope(): + } + } + }() + + // Initially, we will request peers quickly to bootstrap. This duration + // will be adjusted upward as knowledge of the network grows. + var nextPeerRequest = minReceiveRequestInterval + + timer := time.NewTimer(0) + defer timer.Stop() for { + timer.Reset(nextPeerRequest) + select { - case <-r.closeCh: - r.Logger.Debug("stopped listening on PEX channel; closing...") + case <-ctx.Done(): return - // outbound requests for new peers - case <-r.waitUntilNextRequest(): - r.sendRequestForPeers() + case <-timer.C: + // Send a request for more peer addresses. + if err := r.sendRequestForPeers(ctx, pexCh); err != nil { + return + // TODO(creachadair): Do we really want to stop processing the PEX + // channel just because of an error here? + } + + // Note we do not update the poll timer upon making a request, only + // when we receive an update that updates our priors. + + case envelope, ok := <-incoming: + if !ok { + return // channel closed + } - // inbound requests for new peers or responses to requests sent by this - // reactor - case envelope := <-r.pexCh.In: - if err := r.handleMessage(r.pexCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.pexCh.ID, "envelope", envelope, "err", err) - r.pexCh.Error <- p2p.PeerError{ + // A request from another peer, or a response to one of our requests. + dur, err := r.handlePexMessage(ctx, envelope, pexCh) + if err != nil { + r.logger.Error("failed to process message", + "ch_id", envelope.ChannelID, "envelope", envelope, "err", err) + if serr := pexCh.SendError(ctx, p2p.PeerError{ NodeID: envelope.From, Err: err, + }); serr != nil { + return } + } else if dur != 0 { + // We got a useful result; update the poll timer. + nextPeerRequest = dur } } } @@ -186,109 +211,57 @@ func (r *ReactorV2) processPexCh() { // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *ReactorV2) processPeerUpdates() { - defer r.peerUpdates.Close() - +func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates) { for { select { - case peerUpdate := <-r.peerUpdates.Updates(): - r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.Logger.Debug("stopped listening on peer updates channel; closing...") + case <-ctx.Done(): return + case peerUpdate := <-peerUpdates.Updates(): + r.processPeerUpdate(peerUpdate) } } } // handlePexMessage handles envelopes sent from peers on the PexChannel. -func (r *ReactorV2) handlePexMessage(envelope p2p.Envelope) error { - logger := r.Logger.With("peer", envelope.From) +// If an update was received, a new polling interval is returned; otherwise the +// duration is 0. +func (r *Reactor) handlePexMessage(ctx context.Context, envelope *p2p.Envelope, pexCh *p2p.Channel) (time.Duration, error) { + logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { - case *protop2p.PexRequest: - // Check if the peer hasn't sent a prior request too close to this one - // in time. + // Verify that this peer hasn't sent us another request too recently. if err := r.markPeerRequest(envelope.From); err != nil { - return err - } - - // parse and send the legacy PEX addresses - pexAddresses := r.resolve(r.peerManager.Advertise(envelope.From, maxAddresses)) - r.pexCh.Out <- p2p.Envelope{ - To: envelope.From, - Message: &protop2p.PexResponse{Addresses: pexAddresses}, - } - - case *protop2p.PexResponse: - // check if the response matches a request that was made to that peer - if err := r.markPeerResponse(envelope.From); err != nil { - return err - } - - // check the size of the response - if len(msg.Addresses) > int(maxAddresses) { - return fmt.Errorf("peer sent too many addresses (max: %d, got: %d)", - maxAddresses, - len(msg.Addresses), - ) + return 0, err } - for _, pexAddress := range msg.Addresses { - // no protocol is prefixed so we assume the default (mconn) - peerAddress, err := p2p.ParseNodeAddress( - fmt.Sprintf("%s@%s:%d", pexAddress.ID, pexAddress.IP, pexAddress.Port)) - if err != nil { - continue - } - added, err := r.peerManager.Add(peerAddress) - if err != nil { - logger.Error("failed to add PEX address", "address", peerAddress, "err", err) - } - if added { - r.newPeers++ - logger.Debug("added PEX address", "address", peerAddress) - } - r.totalPeers++ - } - - // V2 PEX MESSAGES - case *protop2p.PexRequestV2: - // check if the peer hasn't sent a prior request too close to this one - // in time - if err := r.markPeerRequest(envelope.From); err != nil { - return err - } - - // request peers from the peer manager and parse the NodeAddresses into - // URL strings + // Fetch peers from the peer manager, convert NodeAddresses into URL + // strings, and send them back to the caller. nodeAddresses := r.peerManager.Advertise(envelope.From, maxAddresses) - pexAddressesV2 := make([]protop2p.PexAddressV2, len(nodeAddresses)) + pexAddresses := make([]protop2p.PexAddress, len(nodeAddresses)) for idx, addr := range nodeAddresses { - pexAddressesV2[idx] = protop2p.PexAddressV2{ + pexAddresses[idx] = protop2p.PexAddress{ URL: addr.String(), } } - r.pexCh.Out <- p2p.Envelope{ + return 0, pexCh.Send(ctx, p2p.Envelope{ To: envelope.From, - Message: &protop2p.PexResponseV2{Addresses: pexAddressesV2}, - } + Message: &protop2p.PexResponse{Addresses: pexAddresses}, + }) - case *protop2p.PexResponseV2: - // check if the response matches a request that was made to that peer + case *protop2p.PexResponse: + // Verify that this response corresponds to one of our pending requests. if err := r.markPeerResponse(envelope.From); err != nil { - return err + return 0, err } - // check the size of the response - if len(msg.Addresses) > int(maxAddresses) { - return fmt.Errorf("peer sent too many addresses (max: %d, got: %d)", - maxAddresses, - len(msg.Addresses), - ) + // Verify that the response does not exceed the safety limit. + if len(msg.Addresses) > maxAddresses { + return 0, fmt.Errorf("peer sent too many addresses (%d > maxiumum %d)", + len(msg.Addresses), maxAddresses) } + var numAdded int for _, pexAddress := range msg.Addresses { peerAddress, err := p2p.ParseNodeAddress(pexAddress.URL) if err != nil { @@ -296,103 +269,26 @@ func (r *ReactorV2) handlePexMessage(envelope p2p.Envelope) error { } added, err := r.peerManager.Add(peerAddress) if err != nil { - logger.Error("failed to add V2 PEX address", "address", peerAddress, "err", err) + logger.Error("failed to add PEX address", "address", peerAddress, "err", err) + continue } if added { - r.newPeers++ - logger.Debug("added V2 PEX address", "address", peerAddress) - } - r.totalPeers++ - } - - default: - return fmt.Errorf("received unknown message: %T", msg) - } - - return nil -} - -// resolve resolves a set of peer addresses into PEX addresses. -// -// FIXME: This is necessary because the current PEX protocol only supports -// IP/port pairs, while the P2P stack uses NodeAddress URLs. The PEX protocol -// should really use URLs too, to exchange DNS names instead of IPs and allow -// different transport protocols (e.g. QUIC and MemoryTransport). -// -// FIXME: We may want to cache and parallelize this, but for now we'll just rely -// on the operating system to cache it for us. -func (r *ReactorV2) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress { - limit := len(addresses) - pexAddresses := make([]protop2p.PexAddress, 0, limit) - - for _, address := range addresses { - ctx, cancel := context.WithTimeout(context.Background(), resolveTimeout) - endpoints, err := address.Resolve(ctx) - r.Logger.Debug("resolved node address", "endpoints", endpoints) - cancel() - - if err != nil { - r.Logger.Debug("failed to resolve address", "address", address, "err", err) - continue - } - - for _, endpoint := range endpoints { - r.Logger.Debug("checking endpint", "IP", endpoint.IP, "Port", endpoint.Port) - if len(pexAddresses) >= limit { - return pexAddresses - - } else if endpoint.IP != nil { - r.Logger.Debug("appending pex address") - // PEX currently only supports IP-networked transports (as - // opposed to e.g. p2p.MemoryTransport). - // - // FIXME: as the PEX address contains no information about the - // protocol, we jam this into the ID. We won't need to this once - // we support URLs - pexAddresses = append(pexAddresses, protop2p.PexAddress{ - ID: string(address.NodeID), - IP: endpoint.IP.String(), - Port: uint32(endpoint.Port), - }) + numAdded++ + logger.Debug("added PEX address", "address", peerAddress) } } - } - return pexAddresses -} - -// handleMessage handles an Envelope sent from a peer on a specific p2p Channel. -// It will handle errors and any possible panics gracefully. A caller can handle -// any error returned by sending a PeerError on the respective channel. -func (r *ReactorV2) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("panic in processing message: %v", e) - r.Logger.Error( - "recovering from processing message panic", - "err", err, - "stack", string(debug.Stack()), - ) - } - }() - - r.Logger.Debug("received PEX message", "peer", envelope.From) - - switch chID { - case p2p.ChannelID(PexChannel): - err = r.handlePexMessage(envelope) + return r.calculateNextRequestTime(numAdded), nil default: - err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) + return 0, fmt.Errorf("received unknown message: %T", msg) } - - return err } // processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we // send a request for addresses. -func (r *ReactorV2) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Debug("received PEX peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) +func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { + r.logger.Debug("received PEX peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) r.mtx.Lock() defer r.mtx.Unlock() @@ -408,112 +304,94 @@ func (r *ReactorV2) processPeerUpdate(peerUpdate p2p.PeerUpdate) { } } -func (r *ReactorV2) waitUntilNextRequest() <-chan time.Time { - return time.After(time.Until(r.nextRequestTime)) -} - -// sendRequestForPeers pops the first peerID off the list and sends the -// peer a request for more peer addresses. The function then moves the -// peer into the requestsSent bucket and calculates when the next request -// time should be -func (r *ReactorV2) sendRequestForPeers() { +// sendRequestForPeers chooses a peer from the set of available peers and sends +// that peer a request for more peer addresses. The chosen peer is moved into +// the requestsSent bucket so that we will not attempt to contact them again +// until they've replied or updated. +func (r *Reactor) sendRequestForPeers(ctx context.Context, pexCh *p2p.Channel) error { r.mtx.Lock() defer r.mtx.Unlock() if len(r.availablePeers) == 0 { // no peers are available - r.Logger.Debug("no available peers to send request to, waiting...") - r.nextRequestTime = time.Now().Add(noAvailablePeersWaitPeriod) - - return + r.logger.Debug("no available peers to send a PEX request to (retrying)") + return nil } - var peerID types.NodeID - // use range to get a random peer. + // Select an arbitrary peer from the available set. + var peerID types.NodeID for peerID = range r.availablePeers { break } - // The node accommodates for both pex systems - if r.isLegacyPeer(peerID) { - r.pexCh.Out <- p2p.Envelope{ - To: peerID, - Message: &protop2p.PexRequest{}, - } - } else { - r.pexCh.Out <- p2p.Envelope{ - To: peerID, - Message: &protop2p.PexRequestV2{}, - } + if err := pexCh.Send(ctx, p2p.Envelope{ + To: peerID, + Message: &protop2p.PexRequest{}, + }); err != nil { + return err } - // remove the peer from the abvailable peers list and mark it in the requestsSent map + // Move the peer from available to pending. delete(r.availablePeers, peerID) r.requestsSent[peerID] = struct{}{} - r.calculateNextRequestTime() - r.Logger.Debug("peer request sent", "next_request_time", r.nextRequestTime) + return nil } -// calculateNextRequestTime implements something of a proportional controller -// to estimate how often the reactor should be requesting new peer addresses. -// The dependent variable in this calculation is the ratio of new peers to -// all peers that the reactor receives. The interval is thus calculated as the -// inverse squared. In the beginning, all peers should be new peers. -// We expect this ratio to be near 1 and thus the interval to be as short -// as possible. As the node becomes more familiar with the network the ratio of -// new nodes will plummet to a very small number, meaning the interval expands -// to its upper bound. -// CONTRACT: Must use a write lock as nextRequestTime is updated -func (r *ReactorV2) calculateNextRequestTime() { - // check if the peer store is full. If so then there is no need - // to send peer requests too often +// calculateNextRequestTime selects how long we should wait before attempting +// to send out another request for peer addresses. +// +// This implements a simplified proportional control mechanism to poll more +// often when our knowledge of the network is incomplete, and less often as our +// knowledge grows. To estimate our knowledge of the network, we use the +// fraction of "new" peers (addresses we have not previously seen) to the total +// so far observed. When we first join the network, this fraction will be close +// to 1, meaning most new peers are "new" to us, and as we discover more peers, +// the fraction will go toward zero. +// +// The minimum interval will be minReceiveRequestInterval to ensure we will not +// request from any peer more often than we would allow them to do from us. +func (r *Reactor) calculateNextRequestTime(added int) time.Duration { + r.mtx.Lock() + defer r.mtx.Unlock() + + r.totalPeers += added + + // If the peer store is nearly full, wait the maximum interval. if ratio := r.peerManager.PeerRatio(); ratio >= 0.95 { - r.Logger.Debug("peer manager near full ratio, sleeping...", + r.logger.Debug("Peer manager is nearly full", "sleep_period", fullCapacityInterval, "ratio", ratio) - r.nextRequestTime = time.Now().Add(fullCapacityInterval) - return + return fullCapacityInterval } - // baseTime represents the shortest interval that we can send peer requests - // in. For example if we have 10 peers and we can't send a message to the - // same peer every 500ms, then we can send a request every 50ms. In practice - // we use a safety margin of 2, ergo 100ms - peers := tmmath.MinInt(len(r.availablePeers), 50) - baseTime := minReceiveRequestInterval - if peers > 0 { - baseTime = minReceiveRequestInterval * 2 / time.Duration(peers) + // If there are no available peers to query, poll less aggressively. + if len(r.availablePeers) == 0 { + r.logger.Debug("No available peers to send a PEX request", + "sleep_period", noAvailablePeersWaitPeriod) + return noAvailablePeersWaitPeriod } - if r.totalPeers > 0 || r.discoveryRatio == 0 { - // find the ratio of new peers. NOTE: We add 1 to both sides to avoid - // divide by zero problems - ratio := float32(r.totalPeers+1) / float32(r.newPeers+1) - // square the ratio in order to get non linear time intervals - // NOTE: The longest possible interval for a network with 100 or more peers - // where a node is connected to 50 of them is 2 minutes. - r.discoveryRatio = ratio * ratio - r.newPeers = 0 - r.totalPeers = 0 - } - // NOTE: As ratio is always >= 1, discovery ratio is >= 1. Therefore we don't need to worry - // about the next request time being less than the minimum time - r.nextRequestTime = time.Now().Add(baseTime * time.Duration(r.discoveryRatio)) + // Reaching here, there are available peers to query and the peer store + // still has space. Estimate our knowledge of the network from the latest + // update and choose a new interval. + base := float64(minReceiveRequestInterval) / float64(len(r.availablePeers)) + multiplier := float64(r.totalPeers+1) / float64(added+1) // +1 to avert zero division + return time.Duration(base*multiplier*multiplier) + minReceiveRequestInterval } -func (r *ReactorV2) markPeerRequest(peer types.NodeID) error { +func (r *Reactor) markPeerRequest(peer types.NodeID) error { r.mtx.Lock() defer r.mtx.Unlock() if lastRequestTime, ok := r.lastReceivedRequests[peer]; ok { - if time.Now().Before(lastRequestTime.Add(minReceiveRequestInterval)) { - return fmt.Errorf("peer sent a request too close after a prior one. Minimum interval: %v", - minReceiveRequestInterval) + if d := time.Since(lastRequestTime); d < minReceiveRequestInterval { + return fmt.Errorf("peer %v sent PEX request too soon (%v < minimum %v)", + peer, d, minReceiveRequestInterval) } } r.lastReceivedRequests[peer] = time.Now() return nil } -func (r *ReactorV2) markPeerResponse(peer types.NodeID) error { +func (r *Reactor) markPeerResponse(peer types.NodeID) error { r.mtx.Lock() defer r.mtx.Unlock() // check if a request to this peer was sent @@ -527,14 +405,3 @@ func (r *ReactorV2) markPeerResponse(peer types.NodeID) error { r.availablePeers[peer] = struct{}{} return nil } - -// all addresses must use a MCONN protocol for the peer to be considered part of the -// legacy p2p pex system -func (r *ReactorV2) isLegacyPeer(peer types.NodeID) bool { - for _, addr := range r.peerManager.Addresses(peer) { - if addr.Protocol != p2p.MConnProtocol { - return false - } - } - return true -} diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index f97fcecd31..840562ab43 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -1,11 +1,9 @@ -// Temporarily disabled pending ttps://github.com/tendermint/tendermint/issues/7626. -//go:build issue7626 -//+build issue7626 - +//nolint:unused package pex_test import ( "context" + "errors" "strings" "testing" "time" @@ -25,42 +23,46 @@ import ( const ( checkFrequency = 500 * time.Millisecond defaultBufferSize = 2 - shortWait = 10 * time.Second - longWait = 60 * time.Second + shortWait = 5 * time.Second + longWait = 20 * time.Second firstNode = 0 secondNode = 1 thirdNode = 2 - fourthNode = 3 ) func TestReactorBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // start a network with one mock reactor and one "real" reactor - testNet := setupNetwork(t, testOptions{ + testNet := setupNetwork(ctx, t, testOptions{ MockNodes: 1, TotalNodes: 2, }) - testNet.connectAll(t) - testNet.start(t) + testNet.connectAll(ctx, t) + testNet.start(ctx, t) // assert that the mock node receives a request from the real node - testNet.listenForRequest(t, secondNode, firstNode, shortWait) + testNet.listenForRequest(ctx, t, secondNode, firstNode, shortWait) // assert that when a mock node sends a request it receives a response (and // the correct one) - testNet.sendRequest(t, firstNode, secondNode, true) - testNet.listenForResponse(t, secondNode, firstNode, shortWait, []p2pproto.PexAddressV2(nil)) + testNet.sendRequest(ctx, t, firstNode, secondNode) + testNet.listenForResponse(ctx, t, secondNode, firstNode, shortWait, []p2pproto.PexAddress(nil)) } func TestReactorConnectFullNetwork(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ TotalNodes: 4, }) // make every node be only connected with one other node (it actually ends up // being two because of two way connections but oh well) - testNet.connectN(t, 1) - testNet.start(t) + testNet.connectN(ctx, t, 1) + testNet.start(ctx, t) // assert that all nodes add each other in the network for idx := 0; idx < len(testNet.nodes); idx++ { @@ -69,59 +71,70 @@ func TestReactorConnectFullNetwork(t *testing.T) { } func TestReactorSendsRequestsTooOften(t *testing.T) { - r := setupSingle(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + r := setupSingle(ctx, t) badNode := newNodeID(t, "b") r.pexInCh <- p2p.Envelope{ From: badNode, - Message: &p2pproto.PexRequestV2{}, + Message: &p2pproto.PexRequest{}, } resp := <-r.pexOutCh - msg, ok := resp.Message.(*p2pproto.PexResponseV2) + msg, ok := resp.Message.(*p2pproto.PexResponse) require.True(t, ok) require.Empty(t, msg.Addresses) r.pexInCh <- p2p.Envelope{ From: badNode, - Message: &p2pproto.PexRequestV2{}, + Message: &p2pproto.PexRequest{}, } peerErr := <-r.pexErrCh require.Error(t, peerErr.Err) require.Empty(t, r.pexOutCh) - require.Contains(t, peerErr.Err.Error(), "peer sent a request too close after a prior one") + require.Contains(t, peerErr.Err.Error(), "sent PEX request too soon") require.Equal(t, badNode, peerErr.NodeID) } func TestReactorSendsResponseWithoutRequest(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + t.Skip("This test needs updated https://github.com/tendermint/tendermint/issue/7634") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ MockNodes: 1, TotalNodes: 3, }) - testNet.connectAll(t) - testNet.start(t) + testNet.connectAll(ctx, t) + testNet.start(ctx, t) // firstNode sends the secondNode an unrequested response // NOTE: secondNode will send a request by default during startup so we send // two responses to counter that. - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}, true) - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}, true) + testNet.sendResponse(ctx, t, firstNode, secondNode, []int{thirdNode}) + testNet.sendResponse(ctx, t, firstNode, secondNode, []int{thirdNode}) // secondNode should evict the firstNode - testNet.listenForPeerUpdate(t, secondNode, firstNode, p2p.PeerStatusDown, shortWait) + testNet.listenForPeerUpdate(ctx, t, secondNode, firstNode, p2p.PeerStatusDown, shortWait) } func TestReactorNeverSendsTooManyPeers(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + t.Skip("This test needs updated https://github.com/tendermint/tendermint/issue/7634") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ MockNodes: 1, TotalNodes: 2, }) - testNet.connectAll(t) - testNet.start(t) + testNet.connectAll(ctx, t) + testNet.start(ctx, t) - testNet.addNodes(t, 110) + testNet.addNodes(ctx, t, 110) nodes := make([]int, 110) for i := 0; i < len(nodes); i++ { nodes[i] = i + 2 @@ -130,20 +143,23 @@ func TestReactorNeverSendsTooManyPeers(t *testing.T) { // first we check that even although we have 110 peers, honest pex reactors // only send 100 (test if secondNode sends firstNode 100 addresses) - testNet.pingAndlistenForNAddresses(t, secondNode, firstNode, shortWait, 100) + testNet.pingAndlistenForNAddresses(ctx, t, secondNode, firstNode, shortWait, 100) } func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { - r := setupSingle(t) - peer := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + r := setupSingle(ctx, t) + peer := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID()} added, err := r.manager.Add(peer) require.NoError(t, err) require.True(t, added) - addresses := make([]p2pproto.PexAddressV2, 101) + addresses := make([]p2pproto.PexAddress, 101) for i := 0; i < len(addresses); i++ { - nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)} - addresses[i] = p2pproto.PexAddressV2{ + nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID()} + addresses[i] = p2pproto.PexAddress{ URL: nodeAddress.String(), } } @@ -156,12 +172,12 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { select { // wait for a request and then send a response with too many addresses case req := <-r.pexOutCh: - if _, ok := req.Message.(*p2pproto.PexRequestV2); !ok { + if _, ok := req.Message.(*p2pproto.PexRequest); !ok { t.Fatal("expected v2 pex request") } r.pexInCh <- p2p.Envelope{ From: peer.NodeID, - Message: &p2pproto.PexResponseV2{ + Message: &p2pproto.PexResponse{ Addresses: addresses, }, } @@ -178,33 +194,40 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { } func TestReactorSmallPeerStoreInALargeNetwork(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ TotalNodes: 8, MaxPeers: 4, MaxConnected: 3, BufferSize: 8, }) - testNet.connectN(t, 1) - testNet.start(t) + testNet.connectN(ctx, t, 1) + testNet.start(ctx, t) // test that all nodes reach full capacity for _, nodeID := range testNet.nodes { require.Eventually(t, func() bool { // nolint:scopelint return testNet.network.Nodes[nodeID].PeerManager.PeerRatio() >= 0.9 - }, longWait, checkFrequency) + }, longWait, checkFrequency, + "peer ratio is: %f", testNet.network.Nodes[nodeID].PeerManager.PeerRatio()) } } func TestReactorLargePeerStoreInASmallNetwork(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ TotalNodes: 3, MaxPeers: 25, MaxConnected: 25, BufferSize: 5, }) - testNet.connectN(t, 1) - testNet.start(t) + testNet.connectN(ctx, t, 1) + testNet.start(ctx, t) // assert that all nodes add each other in the network for idx := 0; idx < len(testNet.nodes); idx++ { @@ -213,12 +236,16 @@ func TestReactorLargePeerStoreInASmallNetwork(t *testing.T) { } func TestReactorWithNetworkGrowth(t *testing.T) { - testNet := setupNetwork(t, testOptions{ + t.Skip("This test needs updated https://github.com/tendermint/tendermint/issue/7634") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testNet := setupNetwork(ctx, t, testOptions{ TotalNodes: 5, BufferSize: 5, }) - testNet.connectAll(t) - testNet.start(t) + testNet.connectAll(ctx, t) + testNet.start(ctx, t) // assert that all nodes add each other in the network for idx := 0; idx < len(testNet.nodes); idx++ { @@ -226,14 +253,14 @@ func TestReactorWithNetworkGrowth(t *testing.T) { } // now we inject 10 more nodes - testNet.addNodes(t, 10) + testNet.addNodes(ctx, t, 10) for i := 5; i < testNet.total; i++ { node := testNet.nodes[i] - require.NoError(t, testNet.reactors[node].Start()) + require.NoError(t, testNet.reactors[node].Start(ctx)) require.True(t, testNet.reactors[node].IsRunning()) // we connect all new nodes to a single entry point and check that the // node can distribute the addresses to all the others - testNet.connectPeers(t, 0, i) + testNet.connectPeers(ctx, t, 0, i) } require.Len(t, testNet.reactors, 15) @@ -243,40 +270,8 @@ func TestReactorWithNetworkGrowth(t *testing.T) { } } -func TestReactorIntegrationWithLegacyHandleRequest(t *testing.T) { - testNet := setupNetwork(t, testOptions{ - MockNodes: 1, - TotalNodes: 3, - }) - testNet.connectAll(t) - testNet.start(t) - t.Log(testNet.nodes) - - // mock node sends a V1 Pex message to the second node - testNet.sendRequest(t, firstNode, secondNode, false) - addrs := testNet.getAddressesFor(t, []int{thirdNode}) - testNet.listenForLegacyResponse(t, secondNode, firstNode, shortWait, addrs) -} - -func TestReactorIntegrationWithLegacyHandleResponse(t *testing.T) { - testNet := setupNetwork(t, testOptions{ - MockNodes: 1, - TotalNodes: 4, - BufferSize: 4, - }) - testNet.connectPeers(t, firstNode, secondNode) - testNet.connectPeers(t, firstNode, thirdNode) - testNet.connectPeers(t, firstNode, fourthNode) - testNet.start(t) - - testNet.listenForRequest(t, secondNode, firstNode, shortWait) - // send a v1 response instead - testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode, fourthNode}, false) - testNet.requireNumberOfPeers(t, secondNode, len(testNet.nodes)-1, shortWait) -} - type singleTestReactor struct { - reactor *pex.ReactorV2 + reactor *pex.Reactor pexInCh chan p2p.Envelope pexOutCh chan p2p.Envelope pexErrCh chan p2p.PeerError @@ -285,7 +280,7 @@ type singleTestReactor struct { manager *p2p.PeerManager } -func setupSingle(t *testing.T) *singleTestReactor { +func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor { t.Helper() nodeID := newNodeID(t, "a") chBuf := 2 @@ -305,16 +300,14 @@ func setupSingle(t *testing.T) *singleTestReactor { peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - reactor := pex.NewReactorV2(log.TestingLogger(), peerManager, pexCh, peerUpdates) - require.NoError(t, reactor.Start()) - t.Cleanup(func() { - err := reactor.Stop() - if err != nil { - t.Fatal(err) - } - pexCh.Close() - peerUpdates.Close() - }) + chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) { + return pexCh, nil + } + + reactor := pex.NewReactor(log.NewNopLogger(), peerManager, chCreator, func(_ context.Context) *p2p.PeerUpdates { return peerUpdates }) + + require.NoError(t, reactor.Start(ctx)) + t.Cleanup(reactor.Wait) return &singleTestReactor{ reactor: reactor, @@ -331,7 +324,7 @@ type reactorTestSuite struct { network *p2ptest.Network logger log.Logger - reactors map[types.NodeID]*pex.ReactorV2 + reactors map[types.NodeID]*pex.Reactor pexChannels map[types.NodeID]*p2p.Channel peerChans map[types.NodeID]chan p2p.PeerUpdate @@ -353,7 +346,7 @@ type testOptions struct { // setup setups a test suite with a network of nodes. Mocknodes represent the // hollow nodes that the test can listen and send on -func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { +func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorTestSuite { t.Helper() require.Greater(t, opts.TotalNodes, opts.MockNodes) @@ -372,9 +365,9 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { realNodes := opts.TotalNodes - opts.MockNodes rts := &reactorTestSuite{ - logger: log.TestingLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(t, networkOpts), - reactors: make(map[types.NodeID]*pex.ReactorV2, realNodes), + logger: log.NewNopLogger().With("testCase", t.Name()), + network: p2ptest.MakeNetwork(ctx, t, networkOpts), + reactors: make(map[types.NodeID]*pex.Reactor, realNodes), pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes), @@ -384,25 +377,31 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { // NOTE: we don't assert that the channels get drained after stopping the // reactor - rts.pexChannels = rts.network.MakeChannelsNoCleanup( - t, pex.ChannelDescriptor(), new(p2pproto.PexMessage), chBuf, - ) + rts.pexChannels = rts.network.MakeChannelsNoCleanup(ctx, t, pex.ChannelDescriptor()) idx := 0 for nodeID := range rts.network.Nodes { + // make a copy to avoid getting hit by the range ref + // confusion: + nodeID := nodeID + rts.peerChans[nodeID] = make(chan p2p.PeerUpdate, chBuf) rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], chBuf) - rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID]) + rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID]) + + chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) { + return rts.pexChannels[nodeID], nil + } // the first nodes in the array are always mock nodes if idx < opts.MockNodes { rts.mocks = append(rts.mocks, nodeID) } else { - rts.reactors[nodeID] = pex.NewReactorV2( + rts.reactors[nodeID] = pex.NewReactor( rts.logger.With("nodeID", nodeID), rts.network.Nodes[nodeID].PeerManager, - rts.pexChannels[nodeID], - rts.peerUpdates[nodeID], + chCreator, + func(_ context.Context) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] }, ) } rts.nodes = append(rts.nodes, nodeID) @@ -413,17 +412,11 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { require.Len(t, rts.reactors, realNodes) t.Cleanup(func() { - for nodeID, reactor := range rts.reactors { + for _, reactor := range rts.reactors { if reactor.IsRunning() { - require.NoError(t, reactor.Stop()) + reactor.Wait() require.False(t, reactor.IsRunning()) } - rts.pexChannels[nodeID].Close() - rts.peerUpdates[nodeID].Close() - } - for _, nodeID := range rts.mocks { - rts.pexChannels[nodeID].Close() - rts.peerUpdates[nodeID].Close() } }) @@ -431,36 +424,41 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { } // starts up the pex reactors for each node -func (r *reactorTestSuite) start(t *testing.T) { +func (r *reactorTestSuite) start(ctx context.Context, t *testing.T) { t.Helper() - for _, reactor := range r.reactors { - require.NoError(t, reactor.Start()) + for name, reactor := range r.reactors { + require.NoError(t, reactor.Start(ctx)) require.True(t, reactor.IsRunning()) + t.Log("started", name) } } -func (r *reactorTestSuite) addNodes(t *testing.T, nodes int) { +func (r *reactorTestSuite) addNodes(ctx context.Context, t *testing.T, nodes int) { t.Helper() for i := 0; i < nodes; i++ { - node := r.network.MakeNode(t, nil, p2ptest.NodeOptions{ + nodeCtx := p2ptest.WithLoggerAttrs(ctx, "validator", i) + node := r.network.MakeNode(nodeCtx, t, nil, p2ptest.NodeOptions{ MaxPeers: r.opts.MaxPeers, MaxConnected: r.opts.MaxConnected, - }, r.logger.With("validator", i)) + }) r.network.Nodes[node.NodeID] = node nodeID := node.NodeID - r.pexChannels[nodeID] = node.MakeChannelNoCleanup( - t, pex.ChannelDescriptor(), new(p2pproto.PexMessage), r.opts.BufferSize, - ) + r.pexChannels[nodeID] = node.MakeChannelNoCleanup(ctx, t, pex.ChannelDescriptor()) r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize) r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize) - r.network.Nodes[nodeID].PeerManager.Register(r.peerUpdates[nodeID]) - r.reactors[nodeID] = pex.NewReactorV2( + r.network.Nodes[nodeID].PeerManager.Register(ctx, r.peerUpdates[nodeID]) + + chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) { + return r.pexChannels[nodeID], nil + } + + r.reactors[nodeID] = pex.NewReactor( r.logger.With("nodeID", nodeID), r.network.Nodes[nodeID].PeerManager, - r.pexChannels[nodeID], - r.peerUpdates[nodeID], + chCreator, + func(_ context.Context) *p2p.PeerUpdates { return r.peerUpdates[nodeID] }, ) r.nodes = append(r.nodes, nodeID) r.total++ @@ -468,54 +466,59 @@ func (r *reactorTestSuite) addNodes(t *testing.T, nodes int) { } func (r *reactorTestSuite) listenFor( + ctx context.Context, t *testing.T, node types.NodeID, - conditional func(msg p2p.Envelope) bool, - assertion func(t *testing.T, msg p2p.Envelope) bool, + conditional func(msg *p2p.Envelope) bool, + assertion func(t *testing.T, msg *p2p.Envelope) bool, waitPeriod time.Duration, ) { - timesUp := time.After(waitPeriod) - for { - select { - case envelope := <-r.pexChannels[node].In: - if conditional(envelope) && assertion(t, envelope) { - return - } - case <-timesUp: - require.Fail(t, "timed out waiting for message", - "node=%v, waitPeriod=%s", node, waitPeriod) + ctx, cancel := context.WithTimeout(ctx, waitPeriod) + defer cancel() + iter := r.pexChannels[node].Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + if conditional(envelope) && assertion(t, envelope) { + return } } + + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + require.Fail(t, "timed out waiting for message", + "node=%v, waitPeriod=%s", node, waitPeriod) + } + } -func (r *reactorTestSuite) listenForRequest(t *testing.T, fromNode, toNode int, waitPeriod time.Duration) { - r.logger.Info("Listening for request", "from", fromNode, "to", toNode) +func (r *reactorTestSuite) listenForRequest(ctx context.Context, t *testing.T, fromNode, toNode int, waitPeriod time.Duration) { to, from := r.checkNodePair(t, toNode, fromNode) - conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*p2pproto.PexRequestV2) + conditional := func(msg *p2p.Envelope) bool { + _, ok := msg.Message.(*p2pproto.PexRequest) return ok && msg.From == from } - assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &p2pproto.PexRequestV2{}, msg.Message) + assertion := func(t *testing.T, msg *p2p.Envelope) bool { + require.Equal(t, &p2pproto.PexRequest{}, msg.Message) return true } - r.listenFor(t, to, conditional, assertion, waitPeriod) + r.listenFor(ctx, t, to, conditional, assertion, waitPeriod) } func (r *reactorTestSuite) pingAndlistenForNAddresses( + ctx context.Context, t *testing.T, fromNode, toNode int, waitPeriod time.Duration, addresses int, ) { - r.logger.Info("Listening for addresses", "from", fromNode, "to", toNode) + t.Helper() + to, from := r.checkNodePair(t, toNode, fromNode) - conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*p2pproto.PexResponseV2) + conditional := func(msg *p2p.Envelope) bool { + _, ok := msg.Message.(*p2pproto.PexResponse) return ok && msg.From == from } - assertion := func(t *testing.T, msg p2p.Envelope) bool { - m, ok := msg.Message.(*p2pproto.PexResponseV2) + assertion := func(t *testing.T, msg *p2p.Envelope) bool { + m, ok := msg.Message.(*p2pproto.PexResponse) if !ok { require.Fail(t, "expected pex response v2") return true @@ -527,64 +530,47 @@ func (r *reactorTestSuite) pingAndlistenForNAddresses( // if we didn't get the right length, we wait and send the // request again time.Sleep(300 * time.Millisecond) - r.sendRequest(t, toNode, fromNode, true) + r.sendRequest(ctx, t, toNode, fromNode) return false } - r.sendRequest(t, toNode, fromNode, true) - r.listenFor(t, to, conditional, assertion, waitPeriod) + r.sendRequest(ctx, t, toNode, fromNode) + r.listenFor(ctx, t, to, conditional, assertion, waitPeriod) } func (r *reactorTestSuite) listenForResponse( - t *testing.T, - fromNode, toNode int, - waitPeriod time.Duration, - addresses []p2pproto.PexAddressV2, -) { - r.logger.Info("Listening for response", "from", fromNode, "to", toNode) - to, from := r.checkNodePair(t, toNode, fromNode) - conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*p2pproto.PexResponseV2) - r.logger.Info("message", msg, "ok", ok) - return ok && msg.From == from - } - assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &p2pproto.PexResponseV2{Addresses: addresses}, msg.Message) - return true - } - r.listenFor(t, to, conditional, assertion, waitPeriod) -} - -func (r *reactorTestSuite) listenForLegacyResponse( + ctx context.Context, t *testing.T, fromNode, toNode int, waitPeriod time.Duration, addresses []p2pproto.PexAddress, ) { - r.logger.Info("Listening for response", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) - conditional := func(msg p2p.Envelope) bool { + conditional := func(msg *p2p.Envelope) bool { _, ok := msg.Message.(*p2pproto.PexResponse) return ok && msg.From == from } - assertion := func(t *testing.T, msg p2p.Envelope) bool { + assertion := func(t *testing.T, msg *p2p.Envelope) bool { require.Equal(t, &p2pproto.PexResponse{Addresses: addresses}, msg.Message) return true } - r.listenFor(t, to, conditional, assertion, waitPeriod) + r.listenFor(ctx, t, to, conditional, assertion, waitPeriod) } func (r *reactorTestSuite) listenForPeerUpdate( + ctx context.Context, t *testing.T, onNode, withNode int, status p2p.PeerStatus, waitPeriod time.Duration, ) { on, with := r.checkNodePair(t, onNode, withNode) - sub := r.network.Nodes[on].PeerManager.Subscribe() - defer sub.Close() + sub := r.network.Nodes[on].PeerManager.Subscribe(ctx) timesUp := time.After(waitPeriod) for { select { + case <-ctx.Done(): + require.Fail(t, "operation canceled") + return case peerUpdate := <-sub.Updates(): if peerUpdate.NodeID == with { require.Equal(t, status, peerUpdate.Status) @@ -599,73 +585,41 @@ func (r *reactorTestSuite) listenForPeerUpdate( } } -func (r *reactorTestSuite) getV2AddressesFor(nodes []int) []p2pproto.PexAddressV2 { - addresses := make([]p2pproto.PexAddressV2, len(nodes)) - for idx, node := range nodes { - nodeID := r.nodes[node] - addresses[idx] = p2pproto.PexAddressV2{ - URL: r.network.Nodes[nodeID].NodeAddress.String(), - } - } - return addresses -} - -func (r *reactorTestSuite) getAddressesFor(t *testing.T, nodes []int) []p2pproto.PexAddress { +func (r *reactorTestSuite) getAddressesFor(nodes []int) []p2pproto.PexAddress { addresses := make([]p2pproto.PexAddress, len(nodes)) for idx, node := range nodes { nodeID := r.nodes[node] - nodeAddrs := r.network.Nodes[nodeID].NodeAddress - endpoints, err := nodeAddrs.Resolve(context.Background()) - require.NoError(t, err) - require.Len(t, endpoints, 1) addresses[idx] = p2pproto.PexAddress{ - ID: string(nodeAddrs.NodeID), - IP: endpoints[0].IP.String(), - Port: uint32(endpoints[0].Port), + URL: r.network.Nodes[nodeID].NodeAddress.String(), } } return addresses } -func (r *reactorTestSuite) sendRequest(t *testing.T, fromNode, toNode int, v2 bool) { +func (r *reactorTestSuite) sendRequest(ctx context.Context, t *testing.T, fromNode, toNode int) { + t.Helper() to, from := r.checkNodePair(t, toNode, fromNode) - if v2 { - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &p2pproto.PexRequestV2{}, - } - } else { - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &p2pproto.PexRequest{}, - } - } + require.NoError(t, r.pexChannels[from].Send(ctx, p2p.Envelope{ + To: to, + Message: &p2pproto.PexRequest{}, + })) } func (r *reactorTestSuite) sendResponse( + ctx context.Context, t *testing.T, fromNode, toNode int, withNodes []int, - v2 bool, ) { + t.Helper() from, to := r.checkNodePair(t, fromNode, toNode) - if v2 { - addrs := r.getV2AddressesFor(withNodes) - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &p2pproto.PexResponseV2{ - Addresses: addrs, - }, - } - } else { - addrs := r.getAddressesFor(t, withNodes) - r.pexChannels[from].Out <- p2p.Envelope{ - To: to, - Message: &p2pproto.PexResponse{ - Addresses: addrs, - }, - } - } + addrs := r.getAddressesFor(withNodes) + require.NoError(t, r.pexChannels[from].Send(ctx, p2p.Envelope{ + To: to, + Message: &p2pproto.PexResponse{ + Addresses: addrs, + }, + })) } func (r *reactorTestSuite) requireNumberOfPeers( @@ -684,28 +638,27 @@ func (r *reactorTestSuite) requireNumberOfPeers( ) } -func (r *reactorTestSuite) connectAll(t *testing.T) { - r.connectN(t, r.total-1) +func (r *reactorTestSuite) connectAll(ctx context.Context, t *testing.T) { + r.connectN(ctx, t, r.total-1) } // connects all nodes with n other nodes -func (r *reactorTestSuite) connectN(t *testing.T, n int) { +func (r *reactorTestSuite) connectN(ctx context.Context, t *testing.T, n int) { if n >= r.total { require.Fail(t, "connectN: n must be less than the size of the network - 1") } for i := 0; i < r.total; i++ { for j := 0; j < n; j++ { - r.connectPeers(t, i, (i+j+1)%r.total) + r.connectPeers(ctx, t, i, (i+j+1)%r.total) } } } // connects node1 to node2 -func (r *reactorTestSuite) connectPeers(t *testing.T, sourceNode, targetNode int) { +func (r *reactorTestSuite) connectPeers(ctx context.Context, t *testing.T, sourceNode, targetNode int) { t.Helper() node1, node2 := r.checkNodePair(t, sourceNode, targetNode) - r.logger.Info("connecting peers", "sourceNode", sourceNode, "targetNode", targetNode) n1 := r.network.Nodes[node1] if n1 == nil { @@ -719,44 +672,31 @@ func (r *reactorTestSuite) connectPeers(t *testing.T, sourceNode, targetNode int return } - sourceSub := n1.PeerManager.Subscribe() - defer sourceSub.Close() - targetSub := n2.PeerManager.Subscribe() - defer targetSub.Close() + sourceSub := n1.PeerManager.Subscribe(ctx) + targetSub := n2.PeerManager.Subscribe(ctx) sourceAddress := n1.NodeAddress - r.logger.Debug("source address", "address", sourceAddress) targetAddress := n2.NodeAddress - r.logger.Debug("target address", "address", targetAddress) added, err := n1.PeerManager.Add(targetAddress) require.NoError(t, err) if !added { - r.logger.Debug("nodes already know about one another", - "sourceNode", sourceNode, "targetNode", targetNode) return } select { case peerUpdate := <-targetSub.Updates(): - require.Equal(t, p2p.PeerUpdate{ - NodeID: node1, - Status: p2p.PeerStatusUp, - }, peerUpdate) - r.logger.Debug("target connected with source") + require.Equal(t, peerUpdate.NodeID, node1) + require.Equal(t, peerUpdate.Status, p2p.PeerStatusUp) case <-time.After(2 * time.Second): require.Fail(t, "timed out waiting for peer", "%v accepting %v", targetNode, sourceNode) } - select { case peerUpdate := <-sourceSub.Updates(): - require.Equal(t, p2p.PeerUpdate{ - NodeID: node2, - Status: p2p.PeerStatusUp, - }, peerUpdate) - r.logger.Debug("source connected with target") + require.Equal(t, peerUpdate.NodeID, node2) + require.Equal(t, peerUpdate.Status, p2p.PeerStatusUp) case <-time.After(2 * time.Second): require.Fail(t, "timed out waiting for peer", "%v dialing %v", sourceNode, targetNode) @@ -767,32 +707,6 @@ func (r *reactorTestSuite) connectPeers(t *testing.T, sourceNode, targetNode int require.True(t, added) } -// nolint: unused -func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []p2pproto.PexAddress { - var addresses []p2pproto.PexAddress - for _, i := range nodeIndices { - if i < len(r.nodes) { - require.Fail(t, "index for pex address is greater than number of nodes") - } - nodeAddrs := r.network.Nodes[r.nodes[i]].NodeAddress - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - endpoints, err := nodeAddrs.Resolve(ctx) - cancel() - require.NoError(t, err) - for _, endpoint := range endpoints { - if endpoint.IP != nil { - addresses = append(addresses, p2pproto.PexAddress{ - ID: string(nodeAddrs.NodeID), - IP: endpoint.IP.String(), - Port: uint32(endpoint.Port), - }) - } - } - - } - return addresses -} - func (r *reactorTestSuite) checkNodePair(t *testing.T, first, second int) (types.NodeID, types.NodeID) { require.NotEqual(t, first, second) require.Less(t, first, r.total) @@ -817,6 +731,6 @@ func newNodeID(t *testing.T, id string) types.NodeID { return nodeID } -func randomNodeID(t *testing.T) types.NodeID { +func randomNodeID() types.NodeID { return types.NodeIDFromPubKey(ed25519.GenPrivKey().PubKey()) } diff --git a/internal/p2p/pqueue.go b/internal/p2p/pqueue.go index d5051325f9..21c950dfb0 100644 --- a/internal/p2p/pqueue.go +++ b/internal/p2p/pqueue.go @@ -2,12 +2,14 @@ package p2p import ( "container/heap" + "context" "sort" "strconv" + "sync" "time" "github.com/gogo/protobuf/proto" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" ) @@ -71,25 +73,27 @@ type pqScheduler struct { size uint sizes map[uint]uint // cumulative priority sizes pq *priorityQueue - chDescs []ChannelDescriptor + chDescs []*ChannelDescriptor capacity uint chPriorities map[ChannelID]uint enqueueCh chan Envelope dequeueCh chan Envelope - closer *tmsync.Closer - done *tmsync.Closer + + closeFn func() + closeCh <-chan struct{} + done chan struct{} } func newPQScheduler( logger log.Logger, m *Metrics, - chDescs []ChannelDescriptor, + chDescs []*ChannelDescriptor, enqueueBuf, dequeueBuf, capacity uint, ) *pqScheduler { // copy each ChannelDescriptor and sort them by ascending channel priority - chDescsCopy := make([]ChannelDescriptor, len(chDescs)) + chDescsCopy := make([]*ChannelDescriptor, len(chDescs)) copy(chDescsCopy, chDescs) sort.Slice(chDescsCopy, func(i, j int) bool { return chDescsCopy[i].Priority < chDescsCopy[j].Priority }) @@ -99,7 +103,7 @@ func newPQScheduler( ) for _, chDesc := range chDescsCopy { - chID := ChannelID(chDesc.ID) + chID := chDesc.ID chPriorities[chID] = uint(chDesc.Priority) sizes[uint(chDesc.Priority)] = 0 } @@ -107,6 +111,9 @@ func newPQScheduler( pq := make(priorityQueue, 0) heap.Init(&pq) + closeCh := make(chan struct{}) + once := &sync.Once{} + return &pqScheduler{ logger: logger.With("router", "scheduler"), metrics: m, @@ -117,32 +124,18 @@ func newPQScheduler( sizes: sizes, enqueueCh: make(chan Envelope, enqueueBuf), dequeueCh: make(chan Envelope, dequeueBuf), - closer: tmsync.NewCloser(), - done: tmsync.NewCloser(), + closeFn: func() { once.Do(func() { close(closeCh) }) }, + closeCh: closeCh, + done: make(chan struct{}), } } -func (s *pqScheduler) enqueue() chan<- Envelope { - return s.enqueueCh -} - -func (s *pqScheduler) dequeue() <-chan Envelope { - return s.dequeueCh -} - -func (s *pqScheduler) close() { - s.closer.Close() - <-s.done.Done() -} - -func (s *pqScheduler) closed() <-chan struct{} { - return s.closer.Done() -} - // start starts non-blocking process that starts the priority queue scheduler. -func (s *pqScheduler) start() { - go s.process() -} +func (s *pqScheduler) start(ctx context.Context) { go s.process(ctx) } +func (s *pqScheduler) enqueue() chan<- Envelope { return s.enqueueCh } +func (s *pqScheduler) dequeue() <-chan Envelope { return s.dequeueCh } +func (s *pqScheduler) close() { s.closeFn() } +func (s *pqScheduler) closed() <-chan struct{} { return s.done } // process starts a block process where we listen for Envelopes to enqueue. If // there is sufficient capacity, it will be enqueued into the priority queue, @@ -153,27 +146,26 @@ func (s *pqScheduler) start() { // // After we attempt to enqueue the incoming Envelope, if the priority queue is // non-empty, we pop the top Envelope and send it on the dequeueCh. -func (s *pqScheduler) process() { - defer s.done.Close() +func (s *pqScheduler) process(ctx context.Context) { + defer close(s.done) for { select { case e := <-s.enqueueCh: - chIDStr := strconv.Itoa(int(e.channelID)) + chIDStr := strconv.Itoa(int(e.ChannelID)) pqEnv := &pqEnvelope{ envelope: e, size: uint(proto.Size(e.Message)), - priority: s.chPriorities[e.channelID], + priority: s.chPriorities[e.ChannelID], timestamp: time.Now().UTC(), } - s.metrics.PeerPendingSendBytes.With("peer_id", string(pqEnv.envelope.To)).Add(float64(pqEnv.size)) - // enqueue // Check if we have sufficient capacity to simply enqueue the incoming // Envelope. if s.size+pqEnv.size <= s.capacity { + s.metrics.PeerPendingSendBytes.With("peer_id", string(pqEnv.envelope.To)).Add(float64(pqEnv.size)) // enqueue the incoming Envelope s.push(pqEnv) } else { @@ -203,7 +195,7 @@ func (s *pqScheduler) process() { if tmpSize+pqEnv.size <= s.capacity { canEnqueue = true } else { - pqEnvTmpChIDStr := strconv.Itoa(int(pqEnvTmp.envelope.channelID)) + pqEnvTmpChIDStr := strconv.Itoa(int(pqEnvTmp.envelope.ChannelID)) s.metrics.PeerQueueDroppedMsgs.With("ch_id", pqEnvTmpChIDStr).Add(1) s.logger.Debug( "dropped envelope", @@ -213,6 +205,8 @@ func (s *pqScheduler) process() { "capacity", s.capacity, ) + s.metrics.PeerPendingSendBytes.With("peer_id", string(pqEnvTmp.envelope.To)).Add(float64(-pqEnvTmp.size)) + // dequeue/drop from the priority queue heap.Remove(s.pq, pqEnvTmp.index) @@ -258,21 +252,24 @@ func (s *pqScheduler) process() { "chID", chIDStr, "peer_id", string(pqEnv.envelope.To), "message_type", s.metrics.ValueToMetricLabel(pqEnv.envelope.Message)).Add(float64(pqEnv.size)) + s.metrics.PeerPendingSendBytes.With( + "peer_id", string(pqEnv.envelope.To)).Add(float64(-pqEnv.size)) select { case s.dequeueCh <- pqEnv.envelope: - case <-s.closer.Done(): + case <-s.closeCh: return } } - - case <-s.closer.Done(): + case <-ctx.Done(): + return + case <-s.closeCh: return } } } func (s *pqScheduler) push(pqEnv *pqEnvelope) { - chIDStr := strconv.Itoa(int(pqEnv.envelope.channelID)) + chIDStr := strconv.Itoa(int(pqEnv.envelope.ChannelID)) // enqueue the incoming Envelope heap.Push(s.pq, pqEnv) diff --git a/internal/p2p/pqueue_test.go b/internal/p2p/pqueue_test.go index ddb7addbe6..22ecbcecb5 100644 --- a/internal/p2p/pqueue_test.go +++ b/internal/p2p/pqueue_test.go @@ -1,27 +1,35 @@ package p2p import ( + "context" "testing" "time" + gogotypes "github.com/gogo/protobuf/types" + "github.com/tendermint/tendermint/libs/log" ) +type testMessage = gogotypes.StringValue + func TestCloseWhileDequeueFull(t *testing.T) { enqueueLength := 5 - chDescs := []ChannelDescriptor{ - {ID: 0x01, Priority: 1, MaxSendBytes: 4}, + chDescs := []*ChannelDescriptor{ + {ID: 0x01, Priority: 1}, } pqueue := newPQScheduler(log.NewNopLogger(), NopMetrics(), chDescs, uint(enqueueLength), 1, 120) for i := 0; i < enqueueLength; i++ { pqueue.enqueue() <- Envelope{ - channelID: 0x01, + ChannelID: 0x01, Message: &testMessage{Value: "foo"}, // 5 bytes } } - go pqueue.process() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go pqueue.process(ctx) // sleep to allow context switch for process() to run time.Sleep(10 * time.Millisecond) diff --git a/internal/p2p/queue.go b/internal/p2p/queue.go index cf36d3ca67..2ce2f23fe8 100644 --- a/internal/p2p/queue.go +++ b/internal/p2p/queue.go @@ -1,7 +1,7 @@ package p2p import ( - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "sync" ) // default capacity for the size of a queue @@ -32,28 +32,22 @@ type queue interface { // in the order they were received, and blocks until message is received. type fifoQueue struct { queueCh chan Envelope - closer *tmsync.Closer + closeFn func() + closeCh <-chan struct{} } func newFIFOQueue(size int) queue { + closeCh := make(chan struct{}) + once := &sync.Once{} + return &fifoQueue{ queueCh: make(chan Envelope, size), - closer: tmsync.NewCloser(), + closeFn: func() { once.Do(func() { close(closeCh) }) }, + closeCh: closeCh, } } -func (q *fifoQueue) enqueue() chan<- Envelope { - return q.queueCh -} - -func (q *fifoQueue) dequeue() <-chan Envelope { - return q.queueCh -} - -func (q *fifoQueue) close() { - q.closer.Close() -} - -func (q *fifoQueue) closed() <-chan struct{} { - return q.closer.Done() -} +func (q *fifoQueue) enqueue() chan<- Envelope { return q.queueCh } +func (q *fifoQueue) dequeue() <-chan Envelope { return q.queueCh } +func (q *fifoQueue) close() { q.closeFn() } +func (q *fifoQueue) closed() <-chan struct{} { return q.closeCh } diff --git a/internal/p2p/router.go b/internal/p2p/router.go index a9e68cb861..4a5461fc12 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -21,126 +21,6 @@ import ( const queueBufferDefault = 32 -var ( - // ErrPeerChannelClosed is for when the send/receive an envelope but a peer was already disconnected - ErrPeerChannelClosed = errors.New("a peer channel is closed") -) - -// ChannelID is an arbitrary channel ID. -type ChannelID uint16 - -// Envelope contains a message with sender/receiver routing info. -type Envelope struct { - From types.NodeID // sender (empty if outbound) - To types.NodeID // receiver (empty if inbound) - Broadcast bool // send to all connected peers (ignores To) - Message proto.Message // message payload - - // channelID is for internal Router use, set on outbound messages to inform - // the sendPeer() goroutine which transport channel to use. - // - // FIXME: If we migrate the Transport API to a byte-oriented multi-stream - // API, this will no longer be necessary since each channel will be mapped - // onto a stream during channel/peer setup. See: - // https://github.com/tendermint/spec/pull/227 - channelID ChannelID -} - -// PeerError is a peer error reported via Channel.Error. -// -// FIXME: This currently just disconnects the peer, which is too simplistic. -// For example, some errors should be logged, some should cause disconnects, -// and some should ban the peer. -// -// FIXME: This should probably be replaced by a more general PeerBehavior -// concept that can mark good and bad behavior and contributes to peer scoring. -// It should possibly also allow reactors to request explicit actions, e.g. -// disconnection or banning, in addition to doing this based on aggregates. -type PeerError struct { - NodeID types.NodeID - Err error -} - -// Channel is a bidirectional channel to exchange Protobuf messages with peers, -// wrapped in Envelope to specify routing info (i.e. sender/receiver). -type Channel struct { - ID ChannelID - In <-chan Envelope // inbound messages (peers to reactors) - Out chan<- Envelope // outbound messages (reactors to peers) - Error chan<- PeerError // peer error reporting - - mtx sync.RWMutex - messageType proto.Message // the channel's message type, used for unmarshaling - closeCh chan struct{} -} - -// NewChannel creates a new channel. It is primarily for internal and test -// use, reactors should use Router.OpenChannel(). -func NewChannel( - id ChannelID, - messageType proto.Message, - inCh <-chan Envelope, - outCh chan<- Envelope, - errCh chan<- PeerError, -) *Channel { - return &Channel{ - ID: id, - messageType: messageType, - In: inCh, - Out: outCh, - Error: errCh, - closeCh: make(chan struct{}), - } -} - -// Send sends an envelope to a peer through a channel -func (c *Channel) Send(e Envelope) error { - c.mtx.RLock() - defer c.mtx.RUnlock() - select { - case <-c.closeCh: - return ErrPeerChannelClosed - default: - c.Out <- e - } - return nil -} - -// Close closes the channel. Future sends on Out and Error will panic. The In -// channel remains open to avoid having to synchronize Router senders, which -// should use Done() to detect channel closure instead. -func (c *Channel) Close() { - c.mtx.Lock() - defer c.mtx.Unlock() - select { - case <-c.closeCh: - return - default: - close(c.closeCh) - close(c.Out) - close(c.Error) - } -} - -// Done returns a channel that's closed when Channel.Close() is called. -func (c *Channel) Done() <-chan struct{} { - return c.closeCh -} - -// Wrapper is a Protobuf message that can contain a variety of inner messages -// (e.g. via oneof fields). If a Channel's message type implements Wrapper, the -// Router will automatically wrap outbound messages and unwrap inbound messages, -// such that reactors do not have to do this themselves. -type Wrapper interface { - proto.Message - - // Wrap will take a message and wrap it in this one if possible. - Wrap(proto.Message) error - - // Unwrap will unwrap the inner message contained in this message. - Unwrap() (proto.Message, error) -} - // RouterOptions specifies options for a Router. type RouterOptions struct { // ResolveTimeout is the timeout for resolving NodeAddress URLs. @@ -154,8 +34,8 @@ type RouterOptions struct { // no timeout. HandshakeTimeout time.Duration - // QueueType must be "wdrr" (Weighed Deficit Round Robin), "priority", or - // "fifo". Defaults to "fifo". + // QueueType must be, "priority", or "fifo". Defaults to + // "fifo". QueueType string // MaxIncomingConnectionAttempts rate limits the number of incoming connection @@ -197,7 +77,6 @@ type RouterOptions struct { const ( queueTypeFifo = "fifo" queueTypePriority = "priority" - queueTypeWDRR = "wdrr" ) // Validate validates router options. @@ -205,8 +84,8 @@ func (o *RouterOptions) Validate() error { switch o.QueueType { case "": o.QueueType = queueTypeFifo - case queueTypeFifo, queueTypeWDRR, queueTypePriority: - // passI me + case queueTypeFifo, queueTypePriority: + // pass default: return fmt.Errorf("queue type %q is not supported", o.QueueType) } @@ -267,24 +146,23 @@ func (o *RouterOptions) Validate() error { // quality of service. type Router struct { *service.BaseService + logger log.Logger - logger log.Logger - metrics *Metrics - options RouterOptions - nodeInfo types.NodeInfo - privKey crypto.PrivKey - peerManager *PeerManager - chDescs []ChannelDescriptor - transports []Transport - connTracker connectionTracker - protocolTransports map[Protocol]Transport - stopCh chan struct{} // signals Router shutdown + metrics *Metrics + options RouterOptions + privKey crypto.PrivKey + peerManager *PeerManager + chDescs []*ChannelDescriptor + transport Transport + endpoint *Endpoint + connTracker connectionTracker peerMtx sync.RWMutex peerQueues map[types.NodeID]queue // outbound messages per peer for all channels // the channels that the peer queue has open - peerChannels map[types.NodeID]ChannelIDSet - queueFactory func(int) queue + peerChannels map[types.NodeID]ChannelIDSet + queueFactory func(int) queue + nodeInfoProducer func() *types.NodeInfo // FIXME: We don't strictly need to use a mutex for this if we seal the // channels on router start. This depends on whether we want to allow @@ -300,10 +178,11 @@ type Router struct { func NewRouter( logger log.Logger, metrics *Metrics, - nodeInfo types.NodeInfo, privKey crypto.PrivKey, peerManager *PeerManager, - transports []Transport, + nodeInfoProducer func() *types.NodeInfo, + transport Transport, + endpoint *Endpoint, options RouterOptions, ) (*Router, error) { @@ -312,47 +191,31 @@ func NewRouter( } router := &Router{ - logger: logger, - metrics: metrics, - nodeInfo: nodeInfo, - privKey: privKey, + logger: logger, + metrics: metrics, + privKey: privKey, + nodeInfoProducer: nodeInfoProducer, connTracker: newConnTracker( options.MaxIncomingConnectionAttempts, options.IncomingConnectionWindow, ), - chDescs: make([]ChannelDescriptor, 0), - transports: transports, - protocolTransports: map[Protocol]Transport{}, - peerManager: peerManager, - options: options, - stopCh: make(chan struct{}), - channelQueues: map[ChannelID]queue{}, - channelMessages: map[ChannelID]proto.Message{}, - peerQueues: map[types.NodeID]queue{}, - peerChannels: make(map[types.NodeID]ChannelIDSet), + chDescs: make([]*ChannelDescriptor, 0), + transport: transport, + endpoint: endpoint, + peerManager: peerManager, + options: options, + channelQueues: map[ChannelID]queue{}, + channelMessages: map[ChannelID]proto.Message{}, + peerQueues: map[types.NodeID]queue{}, + peerChannels: make(map[types.NodeID]ChannelIDSet), } router.BaseService = service.NewBaseService(logger, "router", router) - qf, err := router.createQueueFactory() - if err != nil { - return nil, err - } - - router.queueFactory = qf - - for _, transport := range transports { - for _, protocol := range transport.Protocols() { - if _, ok := router.protocolTransports[protocol]; !ok { - router.protocolTransports[protocol] = transport - } - } - } - return router, nil } -func (r *Router) createQueueFactory() (func(int) queue, error) { +func (r *Router) createQueueFactory(ctx context.Context) (func(int) queue, error) { switch r.options.QueueType { case queueTypeFifo: return newFIFOQueue, nil @@ -364,18 +227,7 @@ func (r *Router) createQueueFactory() (func(int) queue, error) { } q := newPQScheduler(r.logger, r.metrics, r.chDescs, uint(size)/2, uint(size)/2, defaultCapacity) - q.start() - return q - }, nil - - case queueTypeWDRR: - return func(size int) queue { - if size%2 != 0 { - size++ - } - - q := newWDRRScheduler(r.logger, r.metrics, r.chDescs, uint(size)/2, uint(size)/2, defaultCapacity) - q.start() + q.start(ctx) return q }, nil @@ -384,26 +236,34 @@ func (r *Router) createQueueFactory() (func(int) queue, error) { } } +// ChannelCreator allows routers to construct their own channels, +// either by receiving a reference to Router.OpenChannel or using some +// kind shim for testing purposes. +type ChannelCreator func(context.Context, *ChannelDescriptor) (*Channel, error) + // OpenChannel opens a new channel for the given message type. The caller must // close the channel when done, before stopping the Router. messageType is the // type of message passed through the channel (used for unmarshaling), which can // implement Wrapper to automatically (un)wrap multiple message types in a // wrapper message. The caller may provide a size to make the channel buffered, // which internally makes the inbound, outbound, and error channel buffered. -func (r *Router) OpenChannel(chDesc ChannelDescriptor, messageType proto.Message, size int) (*Channel, error) { +func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (*Channel, error) { r.channelMtx.Lock() defer r.channelMtx.Unlock() - id := ChannelID(chDesc.ID) + id := chDesc.ID if _, ok := r.channelQueues[id]; ok { return nil, fmt.Errorf("channel %v already exists", id) } r.chDescs = append(r.chDescs, chDesc) - queue := r.queueFactory(size) - outCh := make(chan Envelope, size) - errCh := make(chan PeerError, size) + messageType := chDesc.MessageType + + queue := r.queueFactory(chDesc.RecvBufferCapacity) + outCh := make(chan Envelope, chDesc.RecvBufferCapacity) + errCh := make(chan PeerError, chDesc.RecvBufferCapacity) channel := NewChannel(id, messageType, queue.dequeue(), outCh, errCh) + channel.name = chDesc.Name var wrapper Wrapper if w, ok := messageType.(Wrapper); ok { @@ -414,7 +274,9 @@ func (r *Router) OpenChannel(chDesc ChannelDescriptor, messageType proto.Message r.channelMessages[id] = messageType // add the channel to the nodeInfo if it's not already there. - r.nodeInfo.AddChannel(uint16(chDesc.ID)) + r.nodeInfoProducer().AddChannel(uint16(chDesc.ID)) + + r.transport.AddChannelDescriptors([]*ChannelDescriptor{chDesc}) go func() { defer func() { @@ -425,7 +287,7 @@ func (r *Router) OpenChannel(chDesc ChannelDescriptor, messageType proto.Message queue.close() }() - r.routeChannel(id, outCh, errCh, wrapper) + r.routeChannel(ctx, id, outCh, errCh, wrapper) }() return channel, nil @@ -437,6 +299,7 @@ func (r *Router) OpenChannel(chDesc ChannelDescriptor, messageType proto.Message // closed, or the Router is stopped. wrapper is an optional message wrapper // for messages, see Wrapper for details. func (r *Router) routeChannel( + ctx context.Context, chID ChannelID, outCh <-chan Envelope, errCh <-chan PeerError, @@ -451,13 +314,13 @@ func (r *Router) routeChannel( // Mark the envelope with the channel ID to allow sendPeer() to pass // it on to Transport.SendMessage(). - envelope.channelID = chID + envelope.ChannelID = chID // wrap the message in a wrapper message, if requested if wrapper != nil { msg := proto.Clone(wrapper) if err := msg.(Wrapper).Wrap(envelope.Message); err != nil { - r.Logger.Error("failed to wrap message", "channel", chID, "err", err) + r.logger.Error("failed to wrap message", "channel", chID, "err", err) continue } @@ -520,7 +383,7 @@ func (r *Router) routeChannel( case <-q.closed(): r.logger.Debug("dropping message for unconnected peer", "peer", envelope.To, "channel", chID) - case <-r.stopCh: + case <-ctx.Done(): return } } @@ -533,8 +396,7 @@ func (r *Router) routeChannel( r.logger.Error("peer error, evicting", "peer", peerError.NodeID, "err", peerError.Err) r.peerManager.Errored(peerError.NodeID, peerError.Err) - - case <-r.stopCh: + case <-ctx.Done(): return } } @@ -566,8 +428,15 @@ func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error { func (r *Router) dialSleep(ctx context.Context) { if r.options.DialSleep == nil { + const ( + maxDialerInterval = 3000 + minDialerInterval = 250 + ) + // nolint:gosec // G404: Use of weak random number generator - timer := time.NewTimer(time.Duration(rand.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond) + dur := time.Duration(rand.Int63n(maxDialerInterval-minDialerInterval+1) + minDialerInterval) + + timer := time.NewTimer(dur * time.Millisecond) defer timer.Stop() select { @@ -583,11 +452,9 @@ func (r *Router) dialSleep(ctx context.Context) { // acceptPeers accepts inbound connections from peers on the given transport, // and spawns goroutines that route messages to/from them. -func (r *Router) acceptPeers(transport Transport) { - r.logger.Debug("starting accept routine", "transport", transport) - ctx := r.stopCtx() +func (r *Router) acceptPeers(ctx context.Context, transport Transport) { for { - conn, err := transport.Accept() + conn, err := transport.Accept(ctx) switch err { case nil: case io.EOF: @@ -643,7 +510,7 @@ func (r *Router) openConnection(ctx context.Context, conn Connection) { // The Router should do the handshake and have a final ack/fail // message to make sure both ends have accepted the connection, such // that it can be coordinated with the peer manager. - peerInfo, _, err := r.handshakePeer(ctx, conn, "") + peerInfo, err := r.handshakePeer(ctx, conn, "") switch { case errors.Is(err, context.Canceled): return @@ -663,14 +530,11 @@ func (r *Router) openConnection(ctx context.Context, conn Connection) { return } - r.routePeer(peerInfo.NodeID, conn, toChannelIDs(peerInfo.Channels)) + r.routePeer(ctx, peerInfo.NodeID, conn, toChannelIDs(peerInfo.Channels)) } // dialPeers maintains outbound connections to peers by dialing them. -func (r *Router) dialPeers() { - r.logger.Debug("starting dial routine") - ctx := r.stopCtx() - +func (r *Router) dialPeers(ctx context.Context) { addresses := make(chan NodeAddress) wg := &sync.WaitGroup{} @@ -701,7 +565,6 @@ LOOP: address, err := r.peerManager.DialNext(ctx) switch { case errors.Is(err, context.Canceled): - r.logger.Debug("stopping dial routine") break LOOP case err != nil: r.logger.Error("failed to find next peer to dial", "err", err) @@ -732,20 +595,20 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) { return case err != nil: r.logger.Error("failed to dial peer", "peer", address, "err", err) - if err = r.peerManager.DialFailed(address); err != nil { + if err = r.peerManager.DialFailed(ctx, address); err != nil { r.logger.Error("failed to report dial failure", "peer", address, "err", err) } return } - peerInfo, _, err := r.handshakePeer(ctx, conn, address.NodeID) + peerInfo, err := r.handshakePeer(ctx, conn, address.NodeID) switch { case errors.Is(err, context.Canceled): conn.Close() return case err != nil: r.logger.Error("failed to handshake with peer", "peer", address, "err", err) - if err = r.peerManager.DialFailed(address); err != nil { + if err = r.peerManager.DialFailed(ctx, address); err != nil { r.logger.Error("failed to report dial failure", "peer", address, "err", err) } conn.Close() @@ -761,7 +624,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) { } // routePeer (also) calls connection close - go r.routePeer(address.NodeID, conn, toChannelIDs(peerInfo.Channels)) + go r.routePeer(ctx, address.NodeID, conn, toChannelIDs(peerInfo.Channels)) } func (r *Router) getOrMakeQueue(peerID types.NodeID, channels ChannelIDSet) queue { @@ -797,12 +660,6 @@ func (r *Router) dialPeer(ctx context.Context, address NodeAddress) (Connection, } for _, endpoint := range endpoints { - transport, ok := r.protocolTransports[endpoint.Protocol] - if !ok { - r.logger.Error("no transport found for protocol", "endpoint", endpoint) - continue - } - dialCtx := ctx if r.options.DialTimeout > 0 { var cancel context.CancelFunc @@ -817,7 +674,7 @@ func (r *Router) dialPeer(ctx context.Context, address NodeAddress) (Connection, // by the peer's endpoint, since e.g. a peer on 192.168.0.0 can reach us // on a private address on this endpoint, but a peer on the public // Internet can't and needs a different public address. - conn, err := transport.Dial(dialCtx, endpoint) + conn, err := r.transport.Dial(dialCtx, endpoint) if err != nil { r.logger.Error("failed to dial endpoint", "peer", address.NodeID, "endpoint", endpoint, "err", err) } else { @@ -834,7 +691,7 @@ func (r *Router) handshakePeer( ctx context.Context, conn Connection, expectID types.NodeID, -) (types.NodeInfo, crypto.PubKey, error) { +) (types.NodeInfo, error) { if r.options.HandshakeTimeout > 0 { var cancel context.CancelFunc @@ -842,29 +699,30 @@ func (r *Router) handshakePeer( defer cancel() } - peerInfo, peerKey, err := conn.Handshake(ctx, r.nodeInfo, r.privKey) + nodeInfo := r.nodeInfoProducer() + peerInfo, peerKey, err := conn.Handshake(ctx, *nodeInfo, r.privKey) if err != nil { - return peerInfo, peerKey, err + return peerInfo, err } if err = peerInfo.Validate(); err != nil { - return peerInfo, peerKey, fmt.Errorf("invalid handshake NodeInfo: %w", err) + return peerInfo, fmt.Errorf("invalid handshake NodeInfo: %w", err) } if types.NodeIDFromPubKey(peerKey) != peerInfo.NodeID { - return peerInfo, peerKey, fmt.Errorf("peer's public key did not match its node ID %q (expected %q)", + return peerInfo, fmt.Errorf("peer's public key did not match its node ID %q (expected %q)", peerInfo.NodeID, types.NodeIDFromPubKey(peerKey)) } if expectID != "" && expectID != peerInfo.NodeID { - return peerInfo, peerKey, fmt.Errorf("expected to connect with peer %q, got %q", + return peerInfo, fmt.Errorf("expected to connect with peer %q, got %q", expectID, peerInfo.NodeID) } - if err := r.nodeInfo.CompatibleWith(peerInfo); err != nil { - return peerInfo, peerKey, ErrRejected{ + if err := nodeInfo.CompatibleWith(peerInfo); err != nil { + return peerInfo, ErrRejected{ err: err, id: peerInfo.ID(), isIncompatible: true, } } - return peerInfo, peerKey, nil + return peerInfo, nil } func (r *Router) runWithPeerMutex(fn func() error) error { @@ -876,9 +734,9 @@ func (r *Router) runWithPeerMutex(fn func() error) error { // routePeer routes inbound and outbound messages between a peer and the reactor // channels. It will close the given connection and send queue when done, or if // they are closed elsewhere it will cause this method to shut down and return. -func (r *Router) routePeer(peerID types.NodeID, conn Connection, channels ChannelIDSet) { +func (r *Router) routePeer(ctx context.Context, peerID types.NodeID, conn Connection, channels ChannelIDSet) { r.metrics.Peers.Add(1) - r.peerManager.Ready(peerID, channels) + r.peerManager.Ready(ctx, peerID, channels) sendQueue := r.getOrMakeQueue(peerID, channels) defer func() { @@ -889,7 +747,7 @@ func (r *Router) routePeer(peerID types.NodeID, conn Connection, channels Channe sendQueue.close() - r.peerManager.Disconnected(peerID) + r.peerManager.Disconnected(ctx, peerID) r.metrics.Peers.Add(-1) }() @@ -898,27 +756,46 @@ func (r *Router) routePeer(peerID types.NodeID, conn Connection, channels Channe errCh := make(chan error, 2) go func() { - errCh <- r.receivePeer(peerID, conn) + select { + case errCh <- r.receivePeer(ctx, peerID, conn): + case <-ctx.Done(): + } }() go func() { - errCh <- r.sendPeer(peerID, conn, sendQueue) + select { + case errCh <- r.sendPeer(ctx, peerID, conn, sendQueue): + case <-ctx.Done(): + } }() - err := <-errCh + var err error + select { + case err = <-errCh: + case <-ctx.Done(): + } + _ = conn.Close() sendQueue.close() - if e := <-errCh; err == nil { + select { + case <-ctx.Done(): + case e := <-errCh: // The first err was nil, so we update it with the second err, which may // or may not be nil. + if err == nil { + err = e + } + } + + // if the context was canceled + if e := ctx.Err(); err == nil && e != nil { err = e } switch err { case nil, io.EOF: r.logger.Info("peer disconnected", "peer", peerID, "endpoint", conn) - default: r.logger.Error("peer failure", "peer", peerID, "endpoint", conn, "err", err) } @@ -926,9 +803,9 @@ func (r *Router) routePeer(peerID types.NodeID, conn Connection, channels Channe // receivePeer receives inbound messages from a peer, deserializes them and // passes them on to the appropriate channel. -func (r *Router) receivePeer(peerID types.NodeID, conn Connection) error { +func (r *Router) receivePeer(ctx context.Context, peerID types.NodeID, conn Connection) error { for { - chID, bz, err := conn.ReceiveMessage() + chID, bz, err := conn.ReceiveMessage(ctx) if err != nil { return err } @@ -960,7 +837,7 @@ func (r *Router) receivePeer(peerID types.NodeID, conn Connection) error { start := time.Now().UTC() select { - case queue.enqueue() <- Envelope{From: peerID, Message: msg}: + case queue.enqueue() <- Envelope{From: peerID, Message: msg, ChannelID: chID}: r.metrics.PeerReceiveBytesTotal.With( "chID", fmt.Sprint(chID), "peer_id", string(peerID), @@ -971,14 +848,14 @@ func (r *Router) receivePeer(peerID types.NodeID, conn Connection) error { case <-queue.closed(): r.logger.Debug("channel closed, dropping message", "peer", peerID, "channel", chID) - case <-r.stopCh: + case <-ctx.Done(): return nil } } } // sendPeer sends queued messages to a peer. -func (r *Router) sendPeer(peerID types.NodeID, conn Connection, peerQueue queue) error { +func (r *Router) sendPeer(ctx context.Context, peerID types.NodeID, conn Connection, peerQueue queue) error { for { start := time.Now().UTC() @@ -996,8 +873,7 @@ func (r *Router) sendPeer(peerID types.NodeID, conn Connection, peerQueue queue) continue } - _, err = conn.SendMessage(envelope.channelID, bz) - if err != nil { + if err = conn.SendMessage(ctx, envelope.ChannelID, bz); err != nil { return err } @@ -1006,25 +882,20 @@ func (r *Router) sendPeer(peerID types.NodeID, conn Connection, peerQueue queue) case <-peerQueue.closed(): return nil - case <-r.stopCh: + case <-ctx.Done(): return nil } } } // evictPeers evicts connected peers as requested by the peer manager. -func (r *Router) evictPeers() { - r.logger.Debug("starting evict routine") - ctx := r.stopCtx() - +func (r *Router) evictPeers(ctx context.Context) { for { peerID, err := r.peerManager.EvictNext(ctx) switch { case errors.Is(err, context.Canceled): - r.logger.Debug("stopping evict routine") return - case err != nil: r.logger.Error("failed to find next peer to evict", "err", err) return @@ -1042,29 +913,30 @@ func (r *Router) evictPeers() { } } -// NodeInfo returns a copy of the current NodeInfo. Used for testing. -func (r *Router) NodeInfo() types.NodeInfo { - return r.nodeInfo.Copy() +func (r *Router) setupQueueFactory(ctx context.Context) error { + qf, err := r.createQueueFactory(ctx) + if err != nil { + return err + } + + r.queueFactory = qf + return nil } // OnStart implements service.Service. -func (r *Router) OnStart() error { - netAddr, _ := r.nodeInfo.NetAddress() - r.Logger.Info( - "starting router", - "node_id", r.nodeInfo.NodeID, - "channels", r.nodeInfo.Channels, - "listen_addr", r.nodeInfo.ListenAddr, - "net_addr", netAddr, - ) - - go r.dialPeers() - go r.evictPeers() - - for _, transport := range r.transports { - go r.acceptPeers(transport) +func (r *Router) OnStart(ctx context.Context) error { + if err := r.setupQueueFactory(ctx); err != nil { + return err + } + + if err := r.transport.Listen(r.endpoint); err != nil { + return err } + go r.dialPeers(ctx) + go r.evictPeers(ctx) + go r.acceptPeers(ctx, r.transport) + return nil } @@ -1075,14 +947,9 @@ func (r *Router) OnStart() error { // here, since that would cause any reactor senders to panic, so it is the // sender's responsibility. func (r *Router) OnStop() { - // Signal router shutdown. - close(r.stopCh) - // Close transport listeners (unblocks Accept calls). - for _, transport := range r.transports { - if err := transport.Close(); err != nil { - r.logger.Error("failed to close transport", "transport", transport, "err", err) - } + if err := r.transport.Close(); err != nil { + r.logger.Error("failed to close transport", "err", err) } // Collect all remaining queues, and wait for them to close. @@ -1101,22 +968,11 @@ func (r *Router) OnStop() { r.peerMtx.RUnlock() for _, q := range queues { + q.close() <-q.closed() } } -// stopCtx returns a new context that is canceled when the router stops. -func (r *Router) stopCtx() context.Context { - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - <-r.stopCh - cancel() - }() - - return ctx -} - type ChannelIDSet map[ChannelID]struct{} func (cs ChannelIDSet) Contains(id ChannelID) bool { diff --git a/internal/p2p/router_filter_test.go b/internal/p2p/router_filter_test.go index 4082dc9281..217be8d322 100644 --- a/internal/p2p/router_filter_test.go +++ b/internal/p2p/router_filter_test.go @@ -8,14 +8,14 @@ import ( "time" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" ) func TestConnectionFiltering(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - logger := log.TestingLogger() + logger := log.NewNopLogger() filterByIPCount := 0 router := &Router{ @@ -29,6 +29,6 @@ func TestConnectionFiltering(t *testing.T) { }, } require.Equal(t, 0, filterByIPCount) - router.openConnection(ctx, &MemoryConnection{logger: logger, closer: sync.NewCloser()}) + router.openConnection(ctx, &MemoryConnection{logger: logger, closeFn: func() {}}) require.Equal(t, 1, filterByIPCount) } diff --git a/internal/p2p/router_init_test.go b/internal/p2p/router_init_test.go index 3622c0cc12..20c3cb6dc9 100644 --- a/internal/p2p/router_init_test.go +++ b/internal/p2p/router_init_test.go @@ -1,15 +1,20 @@ package p2p import ( + "context" "os" "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) func TestRouter_ConstructQueueFactory(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Run("ValidateOptionsPopulatesDefaultQueue", func(t *testing.T) { opts := RouterOptions{} require.NoError(t, opts.Validate()) @@ -18,37 +23,35 @@ func TestRouter_ConstructQueueFactory(t *testing.T) { t.Run("Default", func(t *testing.T) { require.Zero(t, os.Getenv("TM_P2P_QUEUE")) opts := RouterOptions{} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + r, err := NewRouter(log.NewNopLogger(), nil, nil, nil, func() *types.NodeInfo { return &types.NodeInfo{} }, nil, nil, opts) require.NoError(t, err) + require.NoError(t, r.setupQueueFactory(ctx)) + _, ok := r.queueFactory(1).(*fifoQueue) require.True(t, ok) }) t.Run("Fifo", func(t *testing.T) { opts := RouterOptions{QueueType: queueTypeFifo} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + r, err := NewRouter(log.NewNopLogger(), nil, nil, nil, func() *types.NodeInfo { return &types.NodeInfo{} }, nil, nil, opts) require.NoError(t, err) + require.NoError(t, r.setupQueueFactory(ctx)) + _, ok := r.queueFactory(1).(*fifoQueue) require.True(t, ok) }) t.Run("Priority", func(t *testing.T) { opts := RouterOptions{QueueType: queueTypePriority} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + r, err := NewRouter(log.NewNopLogger(), nil, nil, nil, func() *types.NodeInfo { return &types.NodeInfo{} }, nil, nil, opts) require.NoError(t, err) + require.NoError(t, r.setupQueueFactory(ctx)) + q, ok := r.queueFactory(1).(*pqScheduler) require.True(t, ok) defer q.close() }) - t.Run("WDRR", func(t *testing.T) { - opts := RouterOptions{QueueType: queueTypeWDRR} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) - require.NoError(t, err) - q, ok := r.queueFactory(1).(*wdrrScheduler) - require.True(t, ok) - defer q.close() - }) t.Run("NonExistant", func(t *testing.T) { opts := RouterOptions{QueueType: "fast"} - _, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + _, err := NewRouter(log.NewNopLogger(), nil, nil, nil, func() *types.NodeInfo { return &types.NodeInfo{} }, nil, nil, opts) require.Error(t, err) require.Contains(t, err.Error(), "fast") }) @@ -56,7 +59,7 @@ func TestRouter_ConstructQueueFactory(t *testing.T) { r := &Router{} require.Zero(t, r.options.QueueType) - fn, err := r.createQueueFactory() + fn, err := r.createQueueFactory(ctx) require.Error(t, err) require.Nil(t, fn) }) diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 436e3f0045..663e6b81c9 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -19,7 +19,6 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/crypto" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/mocks" "github.com/tendermint/tendermint/internal/p2p/p2ptest" @@ -27,67 +26,69 @@ import ( "github.com/tendermint/tendermint/types" ) -func echoReactor(channel *p2p.Channel) { - for { - select { - case envelope := <-channel.In: - value := envelope.Message.(*p2ptest.Message).Value - channel.Out <- p2p.Envelope{ - To: envelope.From, - Message: &p2ptest.Message{Value: value}, - } - - case <-channel.Done(): +func echoReactor(ctx context.Context, channel *p2p.Channel) { + iter := channel.Receive(ctx) + for iter.Next(ctx) { + envelope := iter.Envelope() + value := envelope.Message.(*p2ptest.Message).Value + if err := channel.Send(ctx, p2p.Envelope{ + To: envelope.From, + Message: &p2ptest.Message{Value: value}, + }); err != nil { return } } } func TestRouter_Network(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Cleanup(leaktest.Check(t)) // Create a test network and open a channel where all peers run echoReactor. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 8}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 8}) local := network.RandomNode() peers := network.Peers(local.NodeID) - channels := network.MakeChannels(t, chDesc, &p2ptest.Message{}, 0) + channels := network.MakeChannels(ctx, t, chDesc) - network.Start(t) + network.Start(ctx, t) channel := channels[local.NodeID] for _, peer := range peers { - go echoReactor(channels[peer.NodeID]) + go echoReactor(ctx, channels[peer.NodeID]) } // Sending a message to each peer should work. for _, peer := range peers { - p2ptest.RequireSendReceive(t, channel, peer.NodeID, + p2ptest.RequireSendReceive(ctx, t, channel, peer.NodeID, &p2ptest.Message{Value: "foo"}, &p2ptest.Message{Value: "foo"}, ) } // Sending a broadcast should return back a message from all peers. - p2ptest.RequireSend(t, channel, p2p.Envelope{ + p2ptest.RequireSend(ctx, t, channel, p2p.Envelope{ Broadcast: true, Message: &p2ptest.Message{Value: "bar"}, }) - expect := []p2p.Envelope{} + expect := []*p2p.Envelope{} for _, peer := range peers { - expect = append(expect, p2p.Envelope{ - From: peer.NodeID, - Message: &p2ptest.Message{Value: "bar"}, + expect = append(expect, &p2p.Envelope{ + From: peer.NodeID, + ChannelID: 1, + Message: &p2ptest.Message{Value: "bar"}, }) } - p2ptest.RequireReceiveUnordered(t, channel, expect) + p2ptest.RequireReceiveUnordered(ctx, t, channel, expect) // We then submit an error for a peer, and watch it get disconnected and // then reconnected as the router retries it. - peerUpdates := local.MakePeerUpdatesNoRequireEmpty(t) - channel.Error <- p2p.PeerError{ + peerUpdates := local.MakePeerUpdatesNoRequireEmpty(ctx, t) + require.NoError(t, channel.SendError(ctx, p2p.PeerError{ NodeID: peers[0].NodeID, Err: errors.New("boom"), - } + })) p2ptest.RequireUpdates(t, peerUpdates, []p2p.PeerUpdate{ {NodeID: peers[0].NodeID, Status: p2p.PeerStatusDown}, {NodeID: peers[0].NodeID, Status: p2p.PeerStatusUp}, @@ -97,181 +98,203 @@ func TestRouter_Network(t *testing.T) { func TestRouter_Channel_Basic(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Set up a router with no transports (so no peers). peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() + + testnet := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 1}) router, err := p2p.NewRouter( - log.TestingLogger(), + log.NewNopLogger(), p2p.NopMetrics(), - selfInfo, selfKey, peerManager, - nil, + func() *types.NodeInfo { return &selfInfo }, + testnet.RandomNode().Transport, + &p2p.Endpoint{}, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) - t.Cleanup(func() { - require.NoError(t, router.Stop()) - }) + require.NoError(t, router.Start(ctx)) + t.Cleanup(router.Wait) // Opening a channel should work. - channel, err := router.OpenChannel(chDesc, &p2ptest.Message{}, 0) + chctx, chcancel := context.WithCancel(ctx) + defer chcancel() + + channel, err := router.OpenChannel(chctx, chDesc) require.NoError(t, err) - require.Contains(t, router.NodeInfo().Channels, chDesc.ID) + require.NotNil(t, channel) // Opening the same channel again should fail. - _, err = router.OpenChannel(chDesc, &p2ptest.Message{}, 0) + _, err = router.OpenChannel(ctx, chDesc) require.Error(t, err) // Opening a different channel should work. - chDesc2 := p2p.ChannelDescriptor{ID: byte(2)} - _, err = router.OpenChannel(chDesc2, &p2ptest.Message{}, 0) + chDesc2 := &p2p.ChannelDescriptor{ID: 2, MessageType: &p2ptest.Message{}} + _, err = router.OpenChannel(ctx, chDesc2) require.NoError(t, err) - require.Contains(t, router.NodeInfo().Channels, chDesc2.ID) // Closing the channel, then opening it again should be fine. - channel.Close() - time.Sleep(100 * time.Millisecond) // yes yes, but Close() is async... + chcancel() + time.Sleep(200 * time.Millisecond) // yes yes, but Close() is async... - channel, err = router.OpenChannel(chDesc, &p2ptest.Message{}, 0) + channel, err = router.OpenChannel(ctx, chDesc) require.NoError(t, err) // We should be able to send on the channel, even though there are no peers. - p2ptest.RequireSend(t, channel, p2p.Envelope{ + p2ptest.RequireSend(ctx, t, channel, p2p.Envelope{ To: types.NodeID(strings.Repeat("a", 40)), Message: &p2ptest.Message{Value: "foo"}, }) // A message to ourselves should be dropped. - p2ptest.RequireSend(t, channel, p2p.Envelope{ + p2ptest.RequireSend(ctx, t, channel, p2p.Envelope{ To: selfID, Message: &p2ptest.Message{Value: "self"}, }) - p2ptest.RequireEmpty(t, channel) + p2ptest.RequireEmpty(ctx, t, channel) } // Channel tests are hairy to mock, so we use an in-memory network instead. func TestRouter_Channel_SendReceive(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Cleanup(leaktest.Check(t)) // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 3}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 3}) ids := network.NodeIDs() aID, bID, cID := ids[0], ids[1], ids[2] - channels := network.MakeChannels(t, chDesc, &p2ptest.Message{}, 0) + channels := network.MakeChannels(ctx, t, chDesc) a, b, c := channels[aID], channels[bID], channels[cID] - otherChannels := network.MakeChannels(t, p2ptest.MakeChannelDesc(9), &p2ptest.Message{}, 0) + otherChannels := network.MakeChannels(ctx, t, p2ptest.MakeChannelDesc(9)) - network.Start(t) + network.Start(ctx, t) // Sending a message a->b should work, and not send anything // further to a, b, or c. - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireReceive(t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireReceive(ctx, t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Sending a nil message a->b should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: nil}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: nil}) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Sending a different message type should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: &gogotypes.BoolValue{Value: true}}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &gogotypes.BoolValue{Value: true}}) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Sending to an unknown peer should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{ + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{ To: types.NodeID(strings.Repeat("a", 40)), Message: &p2ptest.Message{Value: "a"}, }) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Sending without a recipient should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{Message: &p2ptest.Message{Value: "noto"}}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{Message: &p2ptest.Message{Value: "noto"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Sending to self should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{To: aID, Message: &p2ptest.Message{Value: "self"}}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: aID, Message: &p2ptest.Message{Value: "self"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c) // Removing b and sending to it should be dropped. - network.Remove(t, bID) - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "nob"}}) - p2ptest.RequireEmpty(t, a, b, c) + network.Remove(ctx, t, bID) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "nob"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c) // After all this, sending a message c->a should work. - p2ptest.RequireSend(t, c, p2p.Envelope{To: aID, Message: &p2ptest.Message{Value: "bar"}}) - p2ptest.RequireReceive(t, a, p2p.Envelope{From: cID, Message: &p2ptest.Message{Value: "bar"}}) - p2ptest.RequireEmpty(t, a, b, c) + p2ptest.RequireSend(ctx, t, c, p2p.Envelope{To: aID, Message: &p2ptest.Message{Value: "bar"}}) + p2ptest.RequireReceive(ctx, t, a, p2p.Envelope{From: cID, Message: &p2ptest.Message{Value: "bar"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c) // None of these messages should have made it onto the other channels. for _, other := range otherChannels { - p2ptest.RequireEmpty(t, other) + p2ptest.RequireEmpty(ctx, t, other) } } func TestRouter_Channel_Broadcast(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 4}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 4}) ids := network.NodeIDs() aID, bID, cID, dID := ids[0], ids[1], ids[2], ids[3] - channels := network.MakeChannels(t, chDesc, &p2ptest.Message{}, 0) + channels := network.MakeChannels(ctx, t, chDesc) a, b, c, d := channels[aID], channels[bID], channels[cID], channels[dID] - network.Start(t) + network.Start(ctx, t) // Sending a broadcast from b should work. - p2ptest.RequireSend(t, b, p2p.Envelope{Broadcast: true, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireReceive(t, a, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireReceive(t, c, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireReceive(t, d, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireEmpty(t, a, b, c, d) + p2ptest.RequireSend(ctx, t, b, p2p.Envelope{Broadcast: true, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireReceive(ctx, t, a, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireReceive(ctx, t, c, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireReceive(ctx, t, d, p2p.Envelope{From: bID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c, d) // Removing one node from the network shouldn't prevent broadcasts from working. - network.Remove(t, dID) - p2ptest.RequireSend(t, a, p2p.Envelope{Broadcast: true, Message: &p2ptest.Message{Value: "bar"}}) - p2ptest.RequireReceive(t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "bar"}}) - p2ptest.RequireReceive(t, c, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "bar"}}) - p2ptest.RequireEmpty(t, a, b, c, d) + network.Remove(ctx, t, dID) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{Broadcast: true, Message: &p2ptest.Message{Value: "bar"}}) + p2ptest.RequireReceive(ctx, t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "bar"}}) + p2ptest.RequireReceive(ctx, t, c, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "bar"}}) + p2ptest.RequireEmpty(ctx, t, a, b, c, d) } func TestRouter_Channel_Wrapper(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 2}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 2}) ids := network.NodeIDs() aID, bID := ids[0], ids[1] - channels := network.MakeChannels(t, chDesc, &wrapperMessage{}, 0) + chDesc := &p2p.ChannelDescriptor{ + ID: chID, + MessageType: &wrapperMessage{}, + Priority: 5, + SendQueueCapacity: 10, + RecvMessageCapacity: 10, + } + + channels := network.MakeChannels(ctx, t, chDesc) a, b := channels[aID], channels[bID] - network.Start(t) + network.Start(ctx, t) // Since wrapperMessage implements p2p.Wrapper and handles Message, it // should automatically wrap and unwrap sent messages -- we prepend the // wrapper actions to the message value to signal this. - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "foo"}}) - p2ptest.RequireReceive(t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "unwrap:wrap:foo"}}) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: "foo"}}) + p2ptest.RequireReceive(ctx, t, b, p2p.Envelope{From: aID, Message: &p2ptest.Message{Value: "unwrap:wrap:foo"}}) // If we send a different message that can't be wrapped, it should be dropped. - p2ptest.RequireSend(t, a, p2p.Envelope{To: bID, Message: &gogotypes.BoolValue{Value: true}}) - p2ptest.RequireEmpty(t, b) + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{To: bID, Message: &gogotypes.BoolValue{Value: true}}) + p2ptest.RequireEmpty(ctx, t, b) // If we send the wrapper message itself, it should also be passed through // since WrapperMessage supports it, and should only be unwrapped at the receiver. - p2ptest.RequireSend(t, a, p2p.Envelope{ + p2ptest.RequireSend(ctx, t, a, p2p.Envelope{ To: bID, Message: &wrapperMessage{Message: p2ptest.Message{Value: "foo"}}, }) - p2ptest.RequireReceive(t, b, p2p.Envelope{ + p2ptest.RequireReceive(ctx, t, b, p2p.Envelope{ From: aID, Message: &p2ptest.Message{Value: "unwrap:foo"}, }) @@ -304,18 +327,21 @@ func (w *wrapperMessage) Unwrap() (proto.Message, error) { func TestRouter_Channel_Error(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 3}) - network.Start(t) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 3}) + network.Start(ctx, t) ids := network.NodeIDs() aID, bID := ids[0], ids[1] - channels := network.MakeChannels(t, chDesc, &p2ptest.Message{}, 0) + channels := network.MakeChannels(ctx, t, chDesc) a := channels[aID] // Erroring b should cause it to be disconnected. It will reconnect shortly after. - sub := network.Nodes[aID].MakePeerUpdates(t) - p2ptest.RequireError(t, a, p2p.PeerError{NodeID: bID, Err: errors.New("boom")}) + sub := network.Nodes[aID].MakePeerUpdates(ctx, t) + p2ptest.RequireError(ctx, t, a, p2p.PeerError{NodeID: bID, Err: errors.New("boom")}) p2ptest.RequireUpdates(t, sub, []p2p.PeerUpdate{ {NodeID: bID, Status: p2p.PeerStatusDown}, {NodeID: bID, Status: p2p.PeerStatusUp}, @@ -343,51 +369,55 @@ func TestRouter_AcceptPeers(t *testing.T) { false, }, } + + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() + t.Cleanup(leaktest.Check(t)) // Set up a mock transport that handshakes. - closer := tmsync.NewCloser() + connCtx, connCancel := context.WithCancel(context.Background()) mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). Return(tc.peerInfo, tc.peerKey, nil) - mockConnection.On("Close").Run(func(_ mock.Arguments) { closer.Close() }).Return(nil) + mockConnection.On("Close").Run(func(_ mock.Arguments) { connCancel() }).Return(nil).Maybe() mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) if tc.ok { - // without the sleep after RequireUpdate this method isn't - // always called. Consider making this call optional. - mockConnection.On("ReceiveMessage").Return(chID, nil, io.EOF) + mockConnection.On("ReceiveMessage", mock.Anything).Return(chID, nil, io.EOF).Maybe() } mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") - mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) - mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Close").Return(nil).Maybe() + mockTransport.On("Accept", mock.Anything).Once().Return(mockConnection, nil) + mockTransport.On("Accept", mock.Anything).Maybe().Return(nil, io.EOF) + mockTransport.On("Listen", mock.Anything).Return(nil) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() - sub := peerManager.Subscribe() - defer sub.Close() + sub := peerManager.Subscribe(ctx) router, err := p2p.NewRouter( - log.TestingLogger(), + log.NewNopLogger(), p2p.NopMetrics(), - selfInfo, selfKey, peerManager, - []p2p.Transport{mockTransport}, + func() *types.NodeInfo { return &selfInfo }, + mockTransport, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) if tc.ok { p2ptest.RequireUpdate(t, sub, p2p.PeerUpdate{ @@ -397,16 +427,15 @@ func TestRouter_AcceptPeers(t *testing.T) { // force a context switch so that the // connection is handled. time.Sleep(time.Millisecond) - sub.Close() } else { select { - case <-closer.Done(): + case <-connCtx.Done(): case <-time.After(100 * time.Millisecond): require.Fail(t, "connection not closed") } } - require.NoError(t, router.Stop()) + router.Stop() mockTransport.AssertExpectations(t) mockConnection.AssertExpectations(t) }) @@ -416,33 +445,36 @@ func TestRouter_AcceptPeers(t *testing.T) { func TestRouter_AcceptPeers_Error(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Set up a mock transport that returns an error, which should prevent // the router from calling Accept again. mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") - mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) - mockTransport.On("Accept").Once().Return(nil, errors.New("boom")) + mockTransport.On("Accept", mock.Anything).Once().Return(nil, errors.New("boom")) mockTransport.On("Close").Return(nil) + mockTransport.On("Listen", mock.Anything).Return(nil) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() router, err := p2p.NewRouter( - log.TestingLogger(), + log.NewNopLogger(), p2p.NopMetrics(), - selfInfo, selfKey, peerManager, - []p2p.Transport{mockTransport}, + func() *types.NodeInfo { return &selfInfo }, + mockTransport, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) time.Sleep(time.Second) - require.NoError(t, router.Stop()) + router.Stop() mockTransport.AssertExpectations(t) } @@ -450,33 +482,36 @@ func TestRouter_AcceptPeers_Error(t *testing.T) { func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Set up a mock transport that returns io.EOF once, which should prevent // the router from calling Accept again. mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") - mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF) mockTransport.On("Close").Return(nil) + mockTransport.On("Listen", mock.Anything).Return(nil) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() router, err := p2p.NewRouter( - log.TestingLogger(), + log.NewNopLogger(), p2p.NopMetrics(), - selfInfo, selfKey, peerManager, - []p2p.Transport{mockTransport}, + func() *types.NodeInfo { return &selfInfo }, + mockTransport, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) time.Sleep(time.Second) - require.NoError(t, router.Stop()) + router.Stop() mockTransport.AssertExpectations(t) } @@ -484,6 +519,9 @@ func TestRouter_AcceptPeers_ErrorEOF(t *testing.T) { func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Set up a mock transport that returns a connection that blocks during the // handshake. It should be able to accept several of these in parallel, i.e. // a single connection can't halt other connections being accepted. @@ -499,37 +537,37 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") - mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Times(3).Run(func(_ mock.Arguments) { + mockTransport.On("Accept", mock.Anything).Times(3).Run(func(_ mock.Arguments) { acceptCh <- true }).Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF) + mockTransport.On("Listen", mock.Anything).Return(nil) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() router, err := p2p.NewRouter( - log.TestingLogger(), + log.NewNopLogger(), p2p.NopMetrics(), - selfInfo, selfKey, peerManager, - []p2p.Transport{mockTransport}, + func() *types.NodeInfo { return &selfInfo }, + mockTransport, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) require.Eventually(t, func() bool { return len(acceptCh) == 3 - }, time.Second, 10*time.Millisecond) + }, time.Second, 10*time.Millisecond, "num", len(acceptCh)) close(closeCh) time.Sleep(100 * time.Millisecond) - require.NoError(t, router.Stop()) + router.Stop() mockTransport.AssertExpectations(t) mockConnection.AssertExpectations(t) } @@ -560,34 +598,39 @@ func TestRouter_DialPeers(t *testing.T) { false, }, } + + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(bctx) + defer cancel() address := p2p.NodeAddress{Protocol: "mock", NodeID: tc.dialID} - endpoint := p2p.Endpoint{Protocol: "mock", Path: string(tc.dialID)} + endpoint := &p2p.Endpoint{Protocol: "mock", Path: string(tc.dialID)} // Set up a mock transport that handshakes. - closer := tmsync.NewCloser() + connCtx, connCancel := context.WithCancel(context.Background()) + defer connCancel() mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") if tc.dialErr == nil { mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). Return(tc.peerInfo, tc.peerKey, nil) - mockConnection.On("Close").Run(func(_ mock.Arguments) { closer.Close() }).Return(nil) + mockConnection.On("Close").Run(func(_ mock.Arguments) { connCancel() }).Return(nil).Maybe() } if tc.ok { - // without the sleep after RequireUpdate this method isn't - // always called. Consider making this call optional. - mockConnection.On("ReceiveMessage").Return(chID, nil, io.EOF) + mockConnection.On("ReceiveMessage", mock.Anything).Return(chID, nil, io.EOF).Maybe() } mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") - mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) - mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Maybe().Return(nil, io.EOF) + mockTransport.On("Close").Return(nil).Maybe() + mockTransport.On("Listen", mock.Anything).Return(nil) + mockTransport.On("Accept", mock.Anything).Maybe().Return(nil, io.EOF) if tc.dialErr == nil { mockTransport.On("Dial", mock.Anything, endpoint).Once().Return(mockConnection, nil) // This handles the retry when a dialed connection gets closed after ReceiveMessage @@ -595,32 +638,31 @@ func TestRouter_DialPeers(t *testing.T) { mockTransport.On("Dial", mock.Anything, endpoint).Maybe().Return(nil, io.EOF) } else { mockTransport.On("Dial", mock.Anything, endpoint).Once(). - Run(func(_ mock.Arguments) { closer.Close() }). + Run(func(_ mock.Arguments) { connCancel() }). Return(nil, tc.dialErr) } // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() added, err := peerManager.Add(address) require.NoError(t, err) require.True(t, added) - sub := peerManager.Subscribe() - defer sub.Close() + sub := peerManager.Subscribe(ctx) router, err := p2p.NewRouter( - log.TestingLogger(), + log.NewNopLogger(), p2p.NopMetrics(), - selfInfo, selfKey, peerManager, - []p2p.Transport{mockTransport}, + func() *types.NodeInfo { return &selfInfo }, + mockTransport, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) if tc.ok { p2ptest.RequireUpdate(t, sub, p2p.PeerUpdate{ @@ -630,16 +672,15 @@ func TestRouter_DialPeers(t *testing.T) { // force a context switch so that the // connection is handled. time.Sleep(time.Millisecond) - sub.Close() } else { select { - case <-closer.Done(): + case <-connCtx.Done(): case <-time.After(100 * time.Millisecond): require.Fail(t, "connection not closed") } } - require.NoError(t, router.Stop()) + router.Stop() mockTransport.AssertExpectations(t) mockConnection.AssertExpectations(t) }) @@ -649,6 +690,9 @@ func TestRouter_DialPeers(t *testing.T) { func TestRouter_DialPeers_Parallel(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + a := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("a", 40))} b := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("b", 40))} c := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("c", 40))} @@ -666,11 +710,11 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") - mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Listen", mock.Anything).Return(nil) + mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF) for _, address := range []p2p.NodeAddress{a, b, c} { - endpoint := p2p.Endpoint{Protocol: address.Protocol, Path: string(address.NodeID)} + endpoint := &p2p.Endpoint{Protocol: address.Protocol, Path: string(address.NodeID)} mockTransport.On("Dial", mock.Anything, endpoint).Run(func(_ mock.Arguments) { dialCh <- true }).Return(mockConnection, nil) @@ -679,7 +723,6 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() added, err := peerManager.Add(a) require.NoError(t, err) @@ -694,12 +737,13 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { require.True(t, added) router, err := p2p.NewRouter( - log.TestingLogger(), + log.NewNopLogger(), p2p.NopMetrics(), - selfInfo, selfKey, peerManager, - []p2p.Transport{mockTransport}, + func() *types.NodeInfo { return &selfInfo }, + mockTransport, + nil, p2p.RouterOptions{ DialSleep: func(_ context.Context) {}, NumConcurrentDials: func() int { @@ -713,7 +757,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) require.Eventually(t, func() bool { @@ -726,7 +770,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { close(closeCh) time.Sleep(500 * time.Millisecond) - require.NoError(t, router.Stop()) + router.Stop() mockTransport.AssertExpectations(t) mockConnection.AssertExpectations(t) } @@ -734,6 +778,9 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { func TestRouter_EvictPeers(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Set up a mock transport that we can evict. closeCh := make(chan time.Time) closeOnce := sync.Once{} @@ -742,7 +789,7 @@ func TestRouter_EvictPeers(t *testing.T) { mockConnection.On("String").Maybe().Return("mock") mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). Return(peerInfo, peerKey.PubKey(), nil) - mockConnection.On("ReceiveMessage").WaitUntil(closeCh).Return(chID, nil, io.EOF) + mockConnection.On("ReceiveMessage", mock.Anything).WaitUntil(closeCh).Return(chID, nil, io.EOF) mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) mockConnection.On("Close").Run(func(_ mock.Arguments) { closeOnce.Do(func() { @@ -752,30 +799,29 @@ func TestRouter_EvictPeers(t *testing.T) { mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") - mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(mockConnection, nil) + mockTransport.On("Accept", mock.Anything).Maybe().Return(nil, io.EOF) + mockTransport.On("Listen", mock.Anything).Return(nil) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() - sub := peerManager.Subscribe() - defer sub.Close() + sub := peerManager.Subscribe(ctx) router, err := p2p.NewRouter( - log.TestingLogger(), + log.NewNopLogger(), p2p.NopMetrics(), - selfInfo, selfKey, peerManager, - []p2p.Transport{mockTransport}, + func() *types.NodeInfo { return &selfInfo }, + mockTransport, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) // Wait for the mock peer to connect, then evict it by reporting an error. p2ptest.RequireUpdate(t, sub, p2p.PeerUpdate{ @@ -789,15 +835,16 @@ func TestRouter_EvictPeers(t *testing.T) { NodeID: peerInfo.NodeID, Status: p2p.PeerStatusDown, }) - sub.Close() - require.NoError(t, router.Stop()) + router.Stop() mockTransport.AssertExpectations(t) mockConnection.AssertExpectations(t) } func TestRouter_ChannelCompatability(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() incompatiblePeer := types.NodeInfo{ NodeID: peerID, @@ -816,29 +863,29 @@ func TestRouter_ChannelCompatability(t *testing.T) { mockTransport := &mocks.Transport{} mockTransport.On("String").Maybe().Return("mock") - mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(mockConnection, nil) + mockTransport.On("Accept", mock.Anything).Once().Return(nil, io.EOF) + mockTransport.On("Listen", mock.Anything).Return(nil) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() router, err := p2p.NewRouter( - log.TestingLogger(), + log.NewNopLogger(), p2p.NopMetrics(), - selfInfo, selfKey, peerManager, - []p2p.Transport{mockTransport}, + func() *types.NodeInfo { return &selfInfo }, + mockTransport, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) time.Sleep(1 * time.Second) - require.NoError(t, router.Stop()) + router.Stop() require.Empty(t, peerManager.Peers()) mockConnection.AssertExpectations(t) @@ -847,6 +894,8 @@ func TestRouter_ChannelCompatability(t *testing.T) { func TestRouter_DontSendOnInvalidChannel(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() peer := types.NodeInfo{ NodeID: peerID, @@ -862,48 +911,48 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { Return(peer, peerKey.PubKey(), nil) mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) mockConnection.On("Close").Return(nil) - mockConnection.On("ReceiveMessage").Return(chID, nil, io.EOF) + mockConnection.On("ReceiveMessage", mock.Anything).Return(chID, nil, io.EOF) mockTransport := &mocks.Transport{} + mockTransport.On("AddChannelDescriptors", mock.Anything).Return() mockTransport.On("String").Maybe().Return("mock") - mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) - mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept", mock.Anything).Once().Return(mockConnection, nil) + mockTransport.On("Accept", mock.Anything).Maybe().Return(nil, io.EOF) + mockTransport.On("Listen", mock.Anything).Return(nil) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - defer peerManager.Close() - sub := peerManager.Subscribe() - defer sub.Close() + sub := peerManager.Subscribe(ctx) router, err := p2p.NewRouter( - log.TestingLogger(), + log.NewNopLogger(), p2p.NopMetrics(), - selfInfo, selfKey, peerManager, - []p2p.Transport{mockTransport}, + func() *types.NodeInfo { return &selfInfo }, + mockTransport, + nil, p2p.RouterOptions{}, ) require.NoError(t, err) - require.NoError(t, router.Start()) + require.NoError(t, router.Start(ctx)) p2ptest.RequireUpdate(t, sub, p2p.PeerUpdate{ NodeID: peerInfo.NodeID, Status: p2p.PeerStatusUp, }) - channel, err := router.OpenChannel(chDesc, &p2ptest.Message{}, 0) + channel, err := router.OpenChannel(ctx, chDesc) require.NoError(t, err) - channel.Out <- p2p.Envelope{ + require.NoError(t, channel.Send(ctx, p2p.Envelope{ To: peer.NodeID, Message: &p2ptest.Message{Value: "Hi"}, - } + })) - require.NoError(t, router.Stop()) + router.Stop() mockTransport.AssertExpectations(t) } diff --git a/internal/p2p/shim.go b/internal/p2p/shim.go deleted file mode 100644 index 0998dd6ec3..0000000000 --- a/internal/p2p/shim.go +++ /dev/null @@ -1,341 +0,0 @@ -package p2p - -import ( - "errors" - "sort" - - "github.com/gogo/protobuf/proto" - - "github.com/tendermint/tendermint/libs/log" -) - -// ============================================================================ -// TODO: Types and business logic below are temporary and will be removed once -// the legacy p2p stack is removed in favor of the new model. -// -// ref: https://github.com/tendermint/tendermint/issues/5670 -// ============================================================================ - -var _ Reactor = (*ReactorShim)(nil) - -type ( - messageValidator interface { - Validate() error - } - - // ReactorShim defines a generic shim wrapper around a BaseReactor. It is - // responsible for wiring up legacy p2p behavior to the new p2p semantics - // (e.g. proxying Envelope messages to legacy peers). - ReactorShim struct { - BaseReactor - - Name string - PeerUpdates *PeerUpdates - Channels map[ChannelID]*ChannelShim - } - - // ChannelShim defines a generic shim wrapper around a legacy p2p channel - // and the new p2p Channel. It also includes the raw bi-directional Go channels - // so we can proxy message delivery. - ChannelShim struct { - Descriptor *ChannelDescriptor - Channel *Channel - inCh chan<- Envelope - outCh <-chan Envelope - errCh <-chan PeerError - } - - // ChannelDescriptorShim defines a shim wrapper around a legacy p2p channel - // and the proto.Message the new p2p Channel is responsible for handling. - // A ChannelDescriptorShim is not contained in ReactorShim, but is rather - // used to construct a ReactorShim. - ChannelDescriptorShim struct { - MsgType proto.Message - Descriptor *ChannelDescriptor - } -) - -func NewReactorShim(logger log.Logger, name string, descriptors map[ChannelID]*ChannelDescriptorShim) *ReactorShim { - channels := make(map[ChannelID]*ChannelShim) - - for _, cds := range descriptors { - chShim := NewChannelShim(cds, 0) - channels[chShim.Channel.ID] = chShim - } - - rs := &ReactorShim{ - Name: name, - PeerUpdates: NewPeerUpdates(make(chan PeerUpdate), 0), - Channels: channels, - } - - rs.BaseReactor = *NewBaseReactor(name, rs) - rs.SetLogger(logger) - - return rs -} - -func NewChannelShim(cds *ChannelDescriptorShim, buf uint) *ChannelShim { - inCh := make(chan Envelope, buf) - outCh := make(chan Envelope, buf) - errCh := make(chan PeerError, buf) - return &ChannelShim{ - Descriptor: cds.Descriptor, - Channel: NewChannel( - ChannelID(cds.Descriptor.ID), - cds.MsgType, - inCh, - outCh, - errCh, - ), - inCh: inCh, - outCh: outCh, - errCh: errCh, - } -} - -// proxyPeerEnvelopes iterates over each p2p Channel and starts a separate -// go-routine where we listen for outbound envelopes sent during Receive -// executions (or anything else that may send on the Channel) and proxy them to -// the corresponding Peer using the To field from the envelope. -func (rs *ReactorShim) proxyPeerEnvelopes() { - for _, cs := range rs.Channels { - go func(cs *ChannelShim) { - for e := range cs.outCh { - msg := proto.Clone(cs.Channel.messageType) - msg.Reset() - - wrapper, ok := msg.(Wrapper) - if ok { - if err := wrapper.Wrap(e.Message); err != nil { - rs.Logger.Error( - "failed to proxy envelope; failed to wrap message", - "ch_id", cs.Descriptor.ID, - "err", err, - ) - continue - } - } else { - msg = e.Message - } - - bz, err := proto.Marshal(msg) - if err != nil { - rs.Logger.Error( - "failed to proxy envelope; failed to encode message", - "ch_id", cs.Descriptor.ID, - "err", err, - ) - continue - } - - switch { - case e.Broadcast: - rs.Switch.Broadcast(cs.Descriptor.ID, bz) - - case e.To != "": - src := rs.Switch.peers.Get(e.To) - if src == nil { - rs.Logger.Debug( - "failed to proxy envelope; failed to find peer", - "ch_id", cs.Descriptor.ID, - "peer", e.To, - ) - continue - } - - if !src.Send(cs.Descriptor.ID, bz) { - // This usually happens when we try to send across a channel - // that the peer doesn't have open. To avoid bloating the - // logs we set this to be Debug - rs.Logger.Debug( - "failed to proxy message to peer", - "ch_id", cs.Descriptor.ID, - "peer", e.To, - ) - } - - default: - rs.Logger.Error("failed to proxy envelope; missing peer ID", "ch_id", cs.Descriptor.ID) - } - } - }(cs) - } -} - -// handlePeerErrors iterates over each p2p Channel and starts a separate go-routine -// where we listen for peer errors. For each peer error, we find the peer from -// the legacy p2p Switch and execute a StopPeerForError call with the corresponding -// peer error. -func (rs *ReactorShim) handlePeerErrors() { - for _, cs := range rs.Channels { - go func(cs *ChannelShim) { - for pErr := range cs.errCh { - if pErr.NodeID != "" { - peer := rs.Switch.peers.Get(pErr.NodeID) - if peer == nil { - rs.Logger.Error("failed to handle peer error; failed to find peer", "peer", pErr.NodeID) - continue - } - - rs.Switch.StopPeerForError(peer, pErr.Err) - } - } - }(cs) - } -} - -// OnStart executes the reactor shim's OnStart hook where we start all the -// necessary go-routines in order to proxy peer envelopes and errors per p2p -// Channel. -func (rs *ReactorShim) OnStart() error { - if rs.Switch == nil { - return errors.New("proxyPeerEnvelopes: reactor shim switch is nil") - } - - // start envelope proxying and peer error handling in separate go routines - rs.proxyPeerEnvelopes() - rs.handlePeerErrors() - - return nil -} - -// GetChannel returns a p2p Channel reference for a given ChannelID. If no -// Channel exists, nil is returned. -func (rs *ReactorShim) GetChannel(cID ChannelID) *Channel { - channelShim, ok := rs.Channels[cID] - if ok { - return channelShim.Channel - } - - return nil -} - -// GetChannels implements the legacy Reactor interface for getting a slice of all -// the supported ChannelDescriptors. -func (rs *ReactorShim) GetChannels() []*ChannelDescriptor { - sortedChIDs := make([]ChannelID, 0, len(rs.Channels)) - for cID := range rs.Channels { - sortedChIDs = append(sortedChIDs, cID) - } - - sort.Slice(sortedChIDs, func(i, j int) bool { return sortedChIDs[i] < sortedChIDs[j] }) - - descriptors := make([]*ChannelDescriptor, len(rs.Channels)) - for i, cID := range sortedChIDs { - descriptors[i] = rs.Channels[cID].Descriptor - } - - return descriptors -} - -// AddPeer sends a PeerUpdate with status PeerStatusUp on the PeerUpdateCh. -// The embedding reactor must be sure to listen for messages on this channel to -// handle adding a peer. -func (rs *ReactorShim) AddPeer(peer Peer) { - proTxHash := peer.NodeInfo().ProTxHash - select { - case rs.PeerUpdates.reactorUpdatesCh <- PeerUpdate{ - NodeID: peer.ID(), - Status: PeerStatusUp, - ProTxHash: proTxHash, - Channels: toChannelIDs(peer.NodeInfo().Channels), - }: - rs.Logger.Debug("sent peer update", "reactor", rs.Name, "peer", peer.ID(), "status", PeerStatusUp) - case <-rs.PeerUpdates.Done(): - // NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel. - // This is because there may be numerous spawned goroutines that are - // attempting to send on the updateCh go channel and when the reactor stops - // we do not want to preemptively close the channel as that could result in - // panics sending on a closed channel. This also means that reactors MUST - // be certain there are NO listeners on the updateCh channel when closing or - // stopping. - } -} - -// RemovePeer sends a PeerUpdate with status PeerStatusDown on the PeerUpdateCh. -// The embedding reactor must be sure to listen for messages on this channel to -// handle removing a peer. -func (rs *ReactorShim) RemovePeer(peer Peer, reason interface{}) { - proTxHash := peer.NodeInfo().ProTxHash - select { - case rs.PeerUpdates.reactorUpdatesCh <- PeerUpdate{NodeID: peer.ID(), Status: PeerStatusDown, ProTxHash: proTxHash}: - rs.Logger.Debug( - "sent peer update", - "reactor", rs.Name, - "peer", peer.ID(), - "reason", reason, - "status", PeerStatusDown, - ) - - case <-rs.PeerUpdates.Done(): - // NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel. - // This is because there may be numerous spawned goroutines that are - // attempting to send on the updateCh go channel and when the reactor stops - // we do not want to preemptively close the channel as that could result in - // panics sending on a closed channel. This also means that reactors MUST - // be certain there are NO listeners on the updateCh channel when closing or - // stopping. - } -} - -// Receive implements a generic wrapper around implementing the Receive method -// on the legacy Reactor p2p interface. If the reactor is running, Receive will -// find the corresponding new p2p Channel, create and decode the appropriate -// proto.Message from the msgBytes, execute any validation and finally construct -// and send a p2p Envelope on the appropriate p2p Channel. -func (rs *ReactorShim) Receive(chID byte, src Peer, msgBytes []byte) { - if !rs.IsRunning() { - return - } - - cID := ChannelID(chID) - channelShim, ok := rs.Channels[cID] - if !ok { - rs.Logger.Error("unexpected channel", "peer", src, "ch_id", chID) - return - } - - msg := proto.Clone(channelShim.Channel.messageType) - msg.Reset() - - if err := proto.Unmarshal(msgBytes, msg); err != nil { - rs.Logger.Error("error decoding message", "peer", src, "ch_id", cID, "err", err) - rs.Switch.StopPeerForError(src, err) - return - } - - validator, ok := msg.(messageValidator) - if ok { - if err := validator.Validate(); err != nil { - rs.Logger.Error("invalid message", "peer", src, "ch_id", cID, "err", err) - rs.Switch.StopPeerForError(src, err) - return - } - } - - wrapper, ok := msg.(Wrapper) - if ok { - var err error - - msg, err = wrapper.Unwrap() - if err != nil { - rs.Logger.Error("failed to unwrap message", "peer", src, "ch_id", chID, "err", err) - return - } - } - - select { - case channelShim.inCh <- Envelope{From: src.ID(), Message: msg}: - rs.Logger.Debug("proxied envelope", "reactor", rs.Name, "ch_id", cID, "peer", src.ID()) - - case <-channelShim.Channel.Done(): - // NOTE: We explicitly DO NOT close the p2p Channel's inbound go channel. - // This is because there may be numerous spawned goroutines that are - // attempting to send on the inbound channel and when the reactor stops we - // do not want to preemptively close the channel as that could result in - // panics sending on a closed channel. This also means that reactors MUST - // be certain there are NO listeners on the inbound channel when closing or - // stopping. - } -} diff --git a/internal/p2p/shim_test.go b/internal/p2p/shim_test.go deleted file mode 100644 index 19c6a3b74e..0000000000 --- a/internal/p2p/shim_test.go +++ /dev/null @@ -1,210 +0,0 @@ -package p2p_test - -import ( - "sync" - "testing" - - "github.com/gogo/protobuf/proto" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p" - p2pmocks "github.com/tendermint/tendermint/internal/p2p/mocks" - "github.com/tendermint/tendermint/libs/log" - ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/types" -) - -var ( - channelID1 = byte(0x01) - channelID2 = byte(0x02) - - p2pCfg = config.DefaultP2PConfig() - - testChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - p2p.ChannelID(channelID1): { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: channelID1, - Priority: 3, - SendQueueCapacity: 10, - RecvMessageCapacity: int(4e6), - }, - }, - p2p.ChannelID(channelID2): { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: channelID2, - Priority: 1, - SendQueueCapacity: 4, - RecvMessageCapacity: int(16e6), - }, - }, - } -) - -type reactorShimTestSuite struct { - shim *p2p.ReactorShim - sw *p2p.Switch -} - -func setup(t *testing.T, peers []p2p.Peer) *reactorShimTestSuite { - t.Helper() - - rts := &reactorShimTestSuite{ - shim: p2p.NewReactorShim(log.TestingLogger(), "TestShim", testChannelShims), - } - - rts.sw = p2p.MakeSwitch(p2pCfg, 1, "testing", "123.123.123", nil, func(_ int, sw *p2p.Switch) *p2p.Switch { - for _, peer := range peers { - p2p.AddPeerToSwitchPeerSet(sw, peer) - } - - sw.AddReactor(rts.shim.Name, rts.shim) - return sw - }, log.TestingLogger()) - - // start the reactor shim - require.NoError(t, rts.shim.Start()) - - t.Cleanup(func() { - require.NoError(t, rts.shim.Stop()) - - for _, chs := range rts.shim.Channels { - chs.Channel.Close() - } - }) - - return rts -} - -func simplePeer(t *testing.T, id string) (*p2pmocks.Peer, types.NodeID) { - t.Helper() - - peerID := types.NodeID(id) - peer := &p2pmocks.Peer{} - peer.On("ID").Return(peerID) - peer.On("NodeInfo").Return(types.NodeInfo{NodeID: peerID}).Maybe() - - return peer, peerID -} - -func TestReactorShim_GetChannel(t *testing.T) { - rts := setup(t, nil) - - p2pCh := rts.shim.GetChannel(p2p.ChannelID(channelID1)) - require.NotNil(t, p2pCh) - require.Equal(t, p2pCh.ID, p2p.ChannelID(channelID1)) - - p2pCh = rts.shim.GetChannel(p2p.ChannelID(byte(0x03))) - require.Nil(t, p2pCh) -} - -func TestReactorShim_GetChannels(t *testing.T) { - rts := setup(t, nil) - - p2pChs := rts.shim.GetChannels() - require.Len(t, p2pChs, 2) - require.Equal(t, p2p.ChannelID(p2pChs[0].ID), p2p.ChannelID(channelID1)) - require.Equal(t, p2p.ChannelID(p2pChs[1].ID), p2p.ChannelID(channelID2)) -} - -func TestReactorShim_AddPeer(t *testing.T) { - peerA, peerIDA := simplePeer(t, "aa") - peerA.On("NodeInfo").Return(types.NodeInfo{}) - rts := setup(t, []p2p.Peer{peerA}) - - var wg sync.WaitGroup - wg.Add(1) - - var peerUpdate p2p.PeerUpdate - go func() { - peerUpdate = <-rts.shim.PeerUpdates.Updates() - wg.Done() - }() - - rts.shim.AddPeer(peerA) - wg.Wait() - - require.Equal(t, peerIDA, peerUpdate.NodeID) - require.Equal(t, p2p.PeerStatusUp, peerUpdate.Status) -} - -func TestReactorShim_RemovePeer(t *testing.T) { - peerA, peerIDA := simplePeer(t, "aa") - peerA.On("NodeInfo").Return(types.NodeInfo{}) - rts := setup(t, []p2p.Peer{peerA}) - - var wg sync.WaitGroup - wg.Add(1) - - var peerUpdate p2p.PeerUpdate - go func() { - peerUpdate = <-rts.shim.PeerUpdates.Updates() - wg.Done() - }() - - rts.shim.RemovePeer(peerA, "test reason") - wg.Wait() - - require.Equal(t, peerIDA, peerUpdate.NodeID) - require.Equal(t, p2p.PeerStatusDown, peerUpdate.Status) -} - -func TestReactorShim_Receive(t *testing.T) { - peerA, peerIDA := simplePeer(t, "aa") - rts := setup(t, []p2p.Peer{peerA}) - - msg := &ssproto.Message{ - Sum: &ssproto.Message_ChunkRequest{ - ChunkRequest: &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, - }, - } - - bz, err := proto.Marshal(msg) - require.NoError(t, err) - - var wg sync.WaitGroup - - var response *ssproto.Message - peerA.On("Send", channelID1, mock.Anything).Run(func(args mock.Arguments) { - m := &ssproto.Message{} - require.NoError(t, proto.Unmarshal(args[1].([]byte), m)) - - response = m - wg.Done() - }).Return(true) - - p2pCh := rts.shim.Channels[p2p.ChannelID(channelID1)] - - wg.Add(2) - - // Simulate receiving the envelope in some real reactor and replying back with - // the same envelope and then closing the Channel. - go func() { - e := <-p2pCh.Channel.In - require.Equal(t, peerIDA, e.From) - require.NotNil(t, e.Message) - - p2pCh.Channel.Out <- p2p.Envelope{To: e.From, Message: e.Message} - p2pCh.Channel.Close() - wg.Done() - }() - - rts.shim.Receive(channelID1, peerA, bz) - - // wait until the mock peer called Send and we (fake) proxied the envelope - wg.Wait() - require.NotNil(t, response) - - m, err := response.Unwrap() - require.NoError(t, err) - require.Equal(t, msg.GetChunkRequest(), m) - - // Since p2pCh was closed in the simulated reactor above, calling Receive - // should not block. - rts.shim.Receive(channelID1, peerA, bz) - require.Empty(t, p2pCh.Channel.In) - - peerA.AssertExpectations(t) -} diff --git a/internal/p2p/switch.go b/internal/p2p/switch.go deleted file mode 100644 index ea1272354e..0000000000 --- a/internal/p2p/switch.go +++ /dev/null @@ -1,1064 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - "io" - "math" - mrand "math/rand" - "net" - "sync" - "time" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/cmap" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -const ( - // wait a random amount of time from this interval - // before dialing peers or reconnecting to help prevent DoS - dialRandomizerIntervalMilliseconds = 3000 - - // repeatedly try to reconnect for a few minutes - // ie. 5 * 20 = 100s - reconnectAttempts = 20 - reconnectInterval = 5 * time.Second - - // then move into exponential backoff mode for ~1day - // ie. 3**10 = 16hrs - reconnectBackOffAttempts = 10 - reconnectBackOffBaseSeconds = 3 - - defaultFilterTimeout = 5 * time.Second -) - -// MConnConfig returns an MConnConfig with fields updated -// from the P2PConfig. -func MConnConfig(cfg *config.P2PConfig) conn.MConnConfig { - mConfig := conn.DefaultMConnConfig() - mConfig.FlushThrottle = cfg.FlushThrottleTimeout - mConfig.SendRate = cfg.SendRate - mConfig.RecvRate = cfg.RecvRate - mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize - return mConfig -} - -//----------------------------------------------------------------------------- - -// An AddrBook represents an address book from the pex package, which is used -// to store peer addresses. -type AddrBook interface { - AddAddress(addr *NetAddress, src *NetAddress) error - AddPrivateIDs([]string) - AddOurAddress(*NetAddress) - OurAddress(*NetAddress) bool - MarkGood(types.NodeID) - RemoveAddress(*NetAddress) - HasAddress(*NetAddress) bool - Save() -} - -// ConnFilterFunc is a callback for connection filtering. If it returns an -// error, the connection is rejected. The set of existing connections is passed -// along with the new connection and all resolved IPs. -type ConnFilterFunc func(ConnSet, net.Conn, []net.IP) error - -// PeerFilterFunc to be implemented by filter hooks after a new Peer has been -// fully setup. -type PeerFilterFunc func(IPeerSet, Peer) error - -// ConnDuplicateIPFilter resolves and keeps all ips for an incoming connection -// and refuses new ones if they come from a known ip. -var ConnDuplicateIPFilter ConnFilterFunc = func(cs ConnSet, c net.Conn, ips []net.IP) error { - for _, ip := range ips { - if cs.HasIP(ip) { - return ErrRejected{ - conn: c, - err: fmt.Errorf("ip<%v> already connected", ip), - isDuplicate: true, - } - } - } - return nil -} - -//----------------------------------------------------------------------------- - -// Switch handles peer connections and exposes an API to receive incoming messages -// on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one -// or more `Channels`. So while sending outgoing messages is typically performed on the peer, -// incoming messages are received on the reactor. -type Switch struct { - service.BaseService - - config *config.P2PConfig - reactors map[string]Reactor - chDescs []*conn.ChannelDescriptor - reactorsByCh map[byte]Reactor - peers *PeerSet - dialing *cmap.CMap - reconnecting *cmap.CMap - nodeInfo types.NodeInfo // our node info - nodeKey types.NodeKey // our node privkey - addrBook AddrBook - // peers addresses with whom we'll maintain constant connection - persistentPeersAddrs []*NetAddress - unconditionalPeerIDs map[types.NodeID]struct{} - - transport Transport - - filterTimeout time.Duration - peerFilters []PeerFilterFunc - connFilters []ConnFilterFunc - conns ConnSet - - metrics *Metrics -} - -// NetAddress returns the first address the switch is listening on, -// or nil if no addresses are found. -func (sw *Switch) NetAddress() *NetAddress { - endpoints := sw.transport.Endpoints() - if len(endpoints) == 0 { - return nil - } - return &NetAddress{ - ID: sw.nodeInfo.NodeID, - IP: endpoints[0].IP, - Port: endpoints[0].Port, - } -} - -// SwitchOption sets an optional parameter on the Switch. -type SwitchOption func(*Switch) - -// NewSwitch creates a new Switch with the given config. -func NewSwitch( - cfg *config.P2PConfig, - transport Transport, - options ...SwitchOption, -) *Switch { - sw := &Switch{ - config: cfg, - reactors: make(map[string]Reactor), - chDescs: make([]*conn.ChannelDescriptor, 0), - reactorsByCh: make(map[byte]Reactor), - peers: NewPeerSet(), - dialing: cmap.NewCMap(), - reconnecting: cmap.NewCMap(), - metrics: NopMetrics(), - transport: transport, - persistentPeersAddrs: make([]*NetAddress, 0), - unconditionalPeerIDs: make(map[types.NodeID]struct{}), - filterTimeout: defaultFilterTimeout, - conns: NewConnSet(), - } - - // Ensure PRNG is reseeded. - tmrand.Reseed() - - sw.BaseService = *service.NewBaseService(nil, "P2P Switch", sw) - - for _, option := range options { - option(sw) - } - - return sw -} - -// SwitchFilterTimeout sets the timeout used for peer filters. -func SwitchFilterTimeout(timeout time.Duration) SwitchOption { - return func(sw *Switch) { sw.filterTimeout = timeout } -} - -// SwitchPeerFilters sets the filters for rejection of new peers. -func SwitchPeerFilters(filters ...PeerFilterFunc) SwitchOption { - return func(sw *Switch) { sw.peerFilters = filters } -} - -// SwitchConnFilters sets the filters for rejection of connections. -func SwitchConnFilters(filters ...ConnFilterFunc) SwitchOption { - return func(sw *Switch) { sw.connFilters = filters } -} - -// WithMetrics sets the metrics. -func WithMetrics(metrics *Metrics) SwitchOption { - return func(sw *Switch) { sw.metrics = metrics } -} - -//--------------------------------------------------------------------- -// Switch setup - -// AddReactor adds the given reactor to the switch. -// NOTE: Not goroutine safe. -func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { - for _, chDesc := range reactor.GetChannels() { - chID := chDesc.ID - // No two reactors can share the same channel. - if sw.reactorsByCh[chID] != nil { - panic(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor)) - } - sw.chDescs = append(sw.chDescs, chDesc) - sw.reactorsByCh[chID] = reactor - } - sw.reactors[name] = reactor - reactor.SetSwitch(sw) - return reactor -} - -// RemoveReactor removes the given Reactor from the Switch. -// NOTE: Not goroutine safe. -func (sw *Switch) RemoveReactor(name string, reactor Reactor) { - for _, chDesc := range reactor.GetChannels() { - // remove channel description - for i := 0; i < len(sw.chDescs); i++ { - if chDesc.ID == sw.chDescs[i].ID { - sw.chDescs = append(sw.chDescs[:i], sw.chDescs[i+1:]...) - break - } - } - delete(sw.reactorsByCh, chDesc.ID) - } - delete(sw.reactors, name) - reactor.SetSwitch(nil) -} - -// Reactors returns a map of reactors registered on the switch. -// NOTE: Not goroutine safe. -func (sw *Switch) Reactors() map[string]Reactor { - return sw.reactors -} - -// Reactor returns the reactor with the given name. -// NOTE: Not goroutine safe. -func (sw *Switch) Reactor(name string) Reactor { - return sw.reactors[name] -} - -// SetNodeInfo sets the switch's NodeInfo for checking compatibility and handshaking with other nodes. -// NOTE: Not goroutine safe. -func (sw *Switch) SetNodeInfo(nodeInfo types.NodeInfo) { - sw.nodeInfo = nodeInfo -} - -// NodeInfo returns the switch's NodeInfo. -// NOTE: Not goroutine safe. -func (sw *Switch) NodeInfo() types.NodeInfo { - return sw.nodeInfo -} - -// SetNodeKey sets the switch's private key for authenticated encryption. -// NOTE: Not goroutine safe. -func (sw *Switch) SetNodeKey(nodeKey types.NodeKey) { - sw.nodeKey = nodeKey -} - -//--------------------------------------------------------------------- -// Service start/stop - -// OnStart implements BaseService. It starts all the reactors and peers. -func (sw *Switch) OnStart() error { - - // FIXME: Temporary hack to pass channel descriptors to MConn transport, - // since they are not available when it is constructed. This will be - // fixed when we implement the new router abstraction. - if t, ok := sw.transport.(*MConnTransport); ok { - t.channelDescs = sw.chDescs - } - - // Start reactors - for _, reactor := range sw.reactors { - err := reactor.Start() - if err != nil { - return fmt.Errorf("failed to start %v: %w", reactor, err) - } - } - - // Start accepting Peers. - go sw.acceptRoutine() - - return nil -} - -// OnStop implements BaseService. It stops all peers and reactors. -func (sw *Switch) OnStop() { - // Stop peers - for _, p := range sw.peers.List() { - sw.stopAndRemovePeer(p, nil) - } - - // Stop reactors - sw.Logger.Debug("Switch: Stopping reactors") - for _, reactor := range sw.reactors { - if err := reactor.Stop(); err != nil { - sw.Logger.Error("error while stopping reactor", "reactor", reactor, "error", err) - } - } -} - -//--------------------------------------------------------------------- -// Peers - -// Broadcast runs a go routine for each attempted send, which will block trying -// to send for defaultSendTimeoutSeconds. Returns a channel which receives -// success values for each attempted send (false if times out). Channel will be -// closed once msg bytes are sent to all peers (or time out). -// -// NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved. -func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool { - sw.Logger.Debug("Broadcast", "channel", chID, "msgBytes", msgBytes) - - peers := sw.peers.List() - var wg sync.WaitGroup - wg.Add(len(peers)) - successChan := make(chan bool, len(peers)) - - for _, peer := range peers { - go func(p Peer) { - defer wg.Done() - success := p.Send(chID, msgBytes) - successChan <- success - }(peer) - } - - go func() { - wg.Wait() - close(successChan) - }() - - return successChan -} - -// NumPeers returns the count of outbound/inbound and outbound-dialing peers. -// unconditional peers are not counted here. -func (sw *Switch) NumPeers() (outbound, inbound, dialing int) { - peers := sw.peers.List() - for _, peer := range peers { - if peer.IsOutbound() { - if !sw.IsPeerUnconditional(peer.ID()) { - outbound++ - } - } else { - if !sw.IsPeerUnconditional(peer.ID()) { - inbound++ - } - } - } - dialing = sw.dialing.Size() - return -} - -func (sw *Switch) IsPeerUnconditional(id types.NodeID) bool { - _, ok := sw.unconditionalPeerIDs[id] - return ok -} - -// MaxNumOutboundPeers returns a maximum number of outbound peers. -func (sw *Switch) MaxNumOutboundPeers() int { - return sw.config.MaxNumOutboundPeers -} - -// Peers returns the set of peers that are connected to the switch. -func (sw *Switch) Peers() IPeerSet { - return sw.peers -} - -// StopPeerForError disconnects from a peer due to external error. -// If the peer is persistent, it will attempt to reconnect. -// TODO: make record depending on reason. -func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { - if !peer.IsRunning() { - return - } - - sw.Logger.Error("Stopping peer for error", "peer", peer, "err", reason) - sw.stopAndRemovePeer(peer, reason) - - if peer.IsPersistent() { - var addr *NetAddress - if peer.IsOutbound() { // socket address for outbound peers - addr = peer.SocketAddr() - } else { // self-reported address for inbound peers - var err error - addr, err = peer.NodeInfo().NetAddress() - if err != nil { - sw.Logger.Error("Wanted to reconnect to inbound peer, but self-reported address is wrong", - "peer", peer, "err", err) - return - } - } - go sw.reconnectToPeer(addr) - } -} - -// StopPeerGracefully disconnects from a peer gracefully. -// TODO: handle graceful disconnects. -func (sw *Switch) StopPeerGracefully(peer Peer) { - sw.Logger.Info("Stopping peer gracefully") - sw.stopAndRemovePeer(peer, nil) -} - -func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { - if err := peer.Stop(); err != nil { - sw.Logger.Error("error while stopping peer", "error", err) // TODO: should return error to be handled accordingly - } - - for _, reactor := range sw.reactors { - reactor.RemovePeer(peer, reason) - } - - // Removing a peer should go last to avoid a situation where a peer - // reconnect to our node and the switch calls InitPeer before - // RemovePeer is finished. - // https://github.com/tendermint/tendermint/issues/3338 - if sw.peers.Remove(peer) { - sw.metrics.Peers.Add(float64(-1)) - } - - sw.conns.RemoveAddr(peer.RemoteAddr()) -} - -// reconnectToPeer tries to reconnect to the addr, first repeatedly -// with a fixed interval, then with exponential backoff. -// If no success after all that, it stops trying, and leaves it -// to the PEX/Addrbook to find the peer with the addr again -// NOTE: this will keep trying even if the handshake or auth fails. -// TODO: be more explicit with error types so we only retry on certain failures -// - ie. if we're getting ErrDuplicatePeer we can stop -// because the addrbook got us the peer back already -func (sw *Switch) reconnectToPeer(addr *NetAddress) { - if _, exists := sw.reconnecting.GetOrSet(string(addr.ID), addr); exists { - return - } - defer sw.reconnecting.Delete(string(addr.ID)) - - start := time.Now() - sw.Logger.Info("Reconnecting to peer", "addr", addr) - for i := 0; i < reconnectAttempts; i++ { - if !sw.IsRunning() { - return - } - - err := sw.DialPeerWithAddress(addr) - if err == nil { - return // success - } else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok { - return - } - - sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr) - // sleep a set amount - sw.randomSleep(reconnectInterval) - continue - } - - sw.Logger.Error("Failed to reconnect to peer. Beginning exponential backoff", - "addr", addr, "elapsed", time.Since(start)) - for i := 0; i < reconnectBackOffAttempts; i++ { - if !sw.IsRunning() { - return - } - - // sleep an exponentially increasing amount - sleepIntervalSeconds := math.Pow(reconnectBackOffBaseSeconds, float64(i)) - sw.randomSleep(time.Duration(sleepIntervalSeconds) * time.Second) - - err := sw.DialPeerWithAddress(addr) - if err == nil { - return // success - } else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok { - return - } - sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr) - } - sw.Logger.Error("Failed to reconnect to peer. Giving up", "addr", addr, "elapsed", time.Since(start)) -} - -// SetAddrBook allows to set address book on Switch. -func (sw *Switch) SetAddrBook(addrBook AddrBook) { - sw.addrBook = addrBook -} - -// MarkPeerAsGood marks the given peer as good when it did something useful -// like contributed to consensus. -func (sw *Switch) MarkPeerAsGood(peer Peer) { - if sw.addrBook != nil { - sw.addrBook.MarkGood(peer.ID()) - } -} - -//--------------------------------------------------------------------- -// Dialing - -type privateAddr interface { - PrivateAddr() bool -} - -func isPrivateAddr(err error) bool { - te, ok := err.(privateAddr) - return ok && te.PrivateAddr() -} - -// DialPeersAsync dials a list of peers asynchronously in random order. -// Used to dial peers from config on startup or from unsafe-RPC (trusted sources). -// It ignores ErrNetAddressLookup. However, if there are other errors, first -// encounter is returned. -// Nop if there are no peers. -func (sw *Switch) DialPeersAsync(peers []string) error { - netAddrs, errs := NewNetAddressStrings(peers) - // report all the errors - for _, err := range errs { - sw.Logger.Error("Error in peer's address", "err", err) - } - // return first non-ErrNetAddressLookup error - for _, err := range errs { - if _, ok := err.(types.ErrNetAddressLookup); ok { - continue - } - return err - } - sw.dialPeersAsync(netAddrs) - return nil -} - -func (sw *Switch) dialPeersAsync(netAddrs []*NetAddress) { - ourAddr := sw.NetAddress() - - // TODO: this code feels like it's in the wrong place. - // The integration tests depend on the addrBook being saved - // right away but maybe we can change that. Recall that - // the addrBook is only written to disk every 2min - if sw.addrBook != nil { - // add peers to `addrBook` - for _, netAddr := range netAddrs { - // do not add our address or ID - if !netAddr.Same(ourAddr) { - if err := sw.addrBook.AddAddress(netAddr, ourAddr); err != nil { - if isPrivateAddr(err) { - sw.Logger.Debug("Won't add peer's address to addrbook", "err", err) - } else { - sw.Logger.Error("Can't add peer's address to addrbook", "err", err) - } - } - } - } - // Persist some peers to disk right away. - // NOTE: integration tests depend on this - sw.addrBook.Save() - } - - // permute the list, dial them in random order. - perm := mrand.Perm(len(netAddrs)) - for i := 0; i < len(perm); i++ { - go func(i int) { - j := perm[i] - addr := netAddrs[j] - - if addr.Same(ourAddr) { - sw.Logger.Debug("Ignore attempt to connect to ourselves", "addr", addr, "ourAddr", ourAddr) - return - } - - sw.randomSleep(0) - - err := sw.DialPeerWithAddress(addr) - if err != nil { - switch err.(type) { - case ErrSwitchConnectToSelf, ErrSwitchDuplicatePeerID, ErrCurrentlyDialingOrExistingAddress: - sw.Logger.Debug("Error dialing peer", "err", err) - default: - sw.Logger.Error("Error dialing peer", "err", err) - } - } - }(i) - } -} - -// DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects -// and authenticates successfully. -// If we're currently dialing this address or it belongs to an existing peer, -// ErrCurrentlyDialingOrExistingAddress is returned. -func (sw *Switch) DialPeerWithAddress(addr *NetAddress) error { - if sw.IsDialingOrExistingAddress(addr) { - return ErrCurrentlyDialingOrExistingAddress{addr.String()} - } - - sw.dialing.Set(string(addr.ID), addr) - defer sw.dialing.Delete(string(addr.ID)) - - return sw.addOutboundPeerWithConfig(addr, sw.config) -} - -// sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds] -func (sw *Switch) randomSleep(interval time.Duration) { - // nolint:gosec // G404: Use of weak random number generator - r := time.Duration(mrand.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond - time.Sleep(r + interval) -} - -// IsDialingOrExistingAddress returns true if switch has a peer with the given -// address or dialing it at the moment. -func (sw *Switch) IsDialingOrExistingAddress(addr *NetAddress) bool { - return sw.dialing.Has(string(addr.ID)) || - sw.peers.Has(addr.ID) || - (!sw.config.AllowDuplicateIP && sw.peers.HasIP(addr.IP)) -} - -// AddPersistentPeers allows you to set persistent peers. It ignores -// ErrNetAddressLookup. However, if there are other errors, first encounter is -// returned. -func (sw *Switch) AddPersistentPeers(addrs []string) error { - sw.Logger.Info("Adding persistent peers", "addrs", addrs) - netAddrs, errs := NewNetAddressStrings(addrs) - // report all the errors - for _, err := range errs { - sw.Logger.Error("Error in peer's address", "err", err) - } - // return first non-ErrNetAddressLookup error - for _, err := range errs { - if _, ok := err.(types.ErrNetAddressLookup); ok { - continue - } - return err - } - sw.persistentPeersAddrs = netAddrs - return nil -} - -func (sw *Switch) AddUnconditionalPeerIDs(ids []string) error { - sw.Logger.Info("Adding unconditional peer ids", "ids", ids) - for i, id := range ids { - err := types.NodeID(id).Validate() - if err != nil { - return fmt.Errorf("wrong ID #%d: %w", i, err) - } - sw.unconditionalPeerIDs[types.NodeID(id)] = struct{}{} - } - return nil -} - -func (sw *Switch) AddPrivatePeerIDs(ids []string) error { - validIDs := make([]string, 0, len(ids)) - for i, id := range ids { - err := types.NodeID(id).Validate() - if err != nil { - return fmt.Errorf("wrong ID #%d: %w", i, err) - } - validIDs = append(validIDs, id) - } - - sw.addrBook.AddPrivateIDs(validIDs) - - return nil -} - -func (sw *Switch) IsPeerPersistent(na *NetAddress) bool { - for _, pa := range sw.persistentPeersAddrs { - if pa.Equals(na) { - return true - } - } - return false -} - -func (sw *Switch) acceptRoutine() { - for { - var peerNodeInfo types.NodeInfo - c, err := sw.transport.Accept() - if err == nil { - // NOTE: The legacy MConn transport did handshaking in Accept(), - // which was asynchronous and avoided head-of-line-blocking. - // However, as handshakes are being migrated out from the transport, - // we just do it synchronously here for now. - peerNodeInfo, _, err = sw.handshakePeer(c, "") - } - if err == nil { - err = sw.filterConn(c.(*mConnConnection).conn) - } - if err != nil { - if c != nil { - _ = c.Close() - } - if err == io.EOF { - err = ErrTransportClosed{} - } - switch err := err.(type) { - case ErrRejected: - addr := err.Addr() - if err.IsSelf() { - // Remove the given address from the address book and add to our addresses - // to avoid dialing in the future. - sw.addrBook.RemoveAddress(&addr) - sw.addrBook.AddOurAddress(&addr) - } - if err.IsIncompatible() { - sw.addrBook.RemoveAddress(&addr) - } - - sw.Logger.Info( - "Inbound Peer rejected", - "err", err, - "numPeers", sw.peers.Size(), - ) - - continue - case ErrFilterTimeout: - sw.Logger.Error( - "Peer filter timed out", - "err", err, - ) - - continue - case ErrTransportClosed: - sw.Logger.Error( - "Stopped accept routine, as transport is closed", - "numPeers", sw.peers.Size(), - ) - default: - sw.Logger.Error( - "Accept on transport errored", - "err", err, - "numPeers", sw.peers.Size(), - ) - // We could instead have a retry loop around the acceptRoutine, - // but that would need to stop and let the node shutdown eventually. - // So might as well panic and let process managers restart the node. - // There's no point in letting the node run without the acceptRoutine, - // since it won't be able to accept new connections. - panic(fmt.Errorf("accept routine exited: %v", err)) - } - - break - } - - isPersistent := false - addr, err := peerNodeInfo.NetAddress() - if err == nil { - isPersistent = sw.IsPeerPersistent(addr) - } - - p := newPeer( - peerNodeInfo, - newPeerConn(false, isPersistent, c), - sw.reactorsByCh, - sw.StopPeerForError, - PeerMetrics(sw.metrics), - ) - - if !sw.IsPeerUnconditional(p.NodeInfo().ID()) { - // Ignore connection if we already have enough peers. - _, in, _ := sw.NumPeers() - if in >= sw.config.MaxNumInboundPeers { - sw.Logger.Info( - "Ignoring inbound connection: already have enough inbound peers", - "address", p.SocketAddr(), - "have", in, - "max", sw.config.MaxNumInboundPeers, - ) - _ = p.CloseConn() - continue - } - - } - - if err := sw.addPeer(p); err != nil { - _ = p.CloseConn() - if p.IsRunning() { - _ = p.Stop() - } - sw.conns.RemoveAddr(p.RemoteAddr()) - sw.Logger.Info( - "Ignoring inbound connection: error while adding peer", - "err", err, - "id", p.ID(), - ) - } - } -} - -// dial the peer; make secret connection; authenticate against the dialed ID; -// add the peer. -// if dialing fails, start the reconnect loop. If handshake fails, it's over. -// If peer is started successfully, reconnectLoop will start when -// StopPeerForError is called. -func (sw *Switch) addOutboundPeerWithConfig( - addr *NetAddress, - cfg *config.P2PConfig, -) error { - sw.Logger.Info("Dialing peer", "address", addr) - - // XXX(xla): Remove the leakage of test concerns in implementation. - if cfg.TestDialFail { - go sw.reconnectToPeer(addr) - return fmt.Errorf("dial err (peerConfig.DialFail == true)") - } - - // Hardcoded timeout moved from MConn transport during refactoring. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - var peerNodeInfo types.NodeInfo - c, err := sw.transport.Dial(ctx, Endpoint{ - Protocol: MConnProtocol, - IP: addr.IP, - Port: addr.Port, - }) - if err == nil { - peerNodeInfo, _, err = sw.handshakePeer(c, addr.ID) - } - if err == nil { - err = sw.filterConn(c.(*mConnConnection).conn) - } - if err != nil { - if c != nil { - _ = c.Close() - } - if e, ok := err.(ErrRejected); ok { - if e.IsSelf() { - // Remove the given address from the address book and add to our addresses - // to avoid dialing in the future. - sw.addrBook.RemoveAddress(addr) - sw.addrBook.AddOurAddress(addr) - } - if e.IsIncompatible() { - sw.addrBook.RemoveAddress(addr) - } - - return err - } - - // retry persistent peers after - // any dial error besides IsSelf() - if sw.IsPeerPersistent(addr) { - go sw.reconnectToPeer(addr) - } - - return err - } - - p := newPeer( - peerNodeInfo, - newPeerConn(true, sw.IsPeerPersistent(addr), c), - sw.reactorsByCh, - sw.StopPeerForError, - PeerMetrics(sw.metrics), - ) - - if err := sw.addPeer(p); err != nil { - _ = p.CloseConn() - if p.IsRunning() { - _ = p.Stop() - } - sw.conns.RemoveAddr(p.RemoteAddr()) - return err - } - - return nil -} - -func (sw *Switch) handshakePeer( - c Connection, - expectPeerID types.NodeID, -) (types.NodeInfo, crypto.PubKey, error) { - // Moved from transport and hardcoded until legacy P2P stack removal. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - peerInfo, peerKey, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - err: fmt.Errorf("handshake failed: %v", err), - isAuthFailure: true, - } - } - - if err = peerInfo.Validate(); err != nil { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - err: err, - isNodeInfoInvalid: true, - } - } - - // For outgoing conns, ensure connection key matches dialed key. - if expectPeerID != "" { - peerID := types.NodeIDFromPubKey(peerKey) - if expectPeerID != peerID { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - id: peerID, - err: fmt.Errorf( - "conn.ID (%v) dialed ID (%v) mismatch", - peerID, - expectPeerID, - ), - isAuthFailure: true, - } - } - } - - if sw.nodeInfo.ID() == peerInfo.ID() { - return peerInfo, peerKey, ErrRejected{ - addr: *types.NewNetAddress(peerInfo.ID(), c.(*mConnConnection).conn.RemoteAddr()), - conn: c.(*mConnConnection).conn, - id: peerInfo.ID(), - isSelf: true, - } - } - - if err = sw.nodeInfo.CompatibleWith(peerInfo); err != nil { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - err: err, - id: peerInfo.ID(), - isIncompatible: true, - } - } - - return peerInfo, peerKey, nil -} - -func (sw *Switch) filterPeer(p Peer) error { - // Avoid duplicate - if sw.peers.Has(p.ID()) { - return ErrRejected{id: p.ID(), isDuplicate: true} - } - - errc := make(chan error, len(sw.peerFilters)) - - for _, f := range sw.peerFilters { - go func(f PeerFilterFunc, p Peer, errc chan<- error) { - errc <- f(sw.peers, p) - }(f, p, errc) - } - - for i := 0; i < cap(errc); i++ { - select { - case err := <-errc: - if err != nil { - return ErrRejected{id: p.ID(), err: err, isFiltered: true} - } - case <-time.After(sw.filterTimeout): - return ErrFilterTimeout{} - } - } - - return nil -} - -// filterConn filters a connection, rejecting it if this function errors. -// -// FIXME: This is only here for compatibility with the current Switch code. In -// the new P2P stack, peer/connection filtering should be moved into the Router -// or PeerManager and removed from here. -func (sw *Switch) filterConn(conn net.Conn) error { - if sw.conns.Has(conn) { - return ErrRejected{conn: conn, isDuplicate: true} - } - - host, _, err := net.SplitHostPort(conn.RemoteAddr().String()) - if err != nil { - return err - } - ip := net.ParseIP(host) - if ip == nil { - return fmt.Errorf("connection address has invalid IP address %q", host) - } - - // Apply filter callbacks. - chErr := make(chan error, len(sw.connFilters)) - for _, connFilter := range sw.connFilters { - go func(connFilter ConnFilterFunc) { - chErr <- connFilter(sw.conns, conn, []net.IP{ip}) - }(connFilter) - } - - for i := 0; i < cap(chErr); i++ { - select { - case err := <-chErr: - if err != nil { - return ErrRejected{conn: conn, err: err, isFiltered: true} - } - case <-time.After(sw.filterTimeout): - return ErrFilterTimeout{} - } - - } - - // FIXME: Doesn't really make sense to set this here, but we preserve the - // behavior from the previous P2P transport implementation. - sw.conns.Set(conn, []net.IP{ip}) - return nil -} - -// addPeer starts up the Peer and adds it to the Switch. Error is returned if -// the peer is filtered out or failed to start or can't be added. -func (sw *Switch) addPeer(p Peer) error { - if err := sw.filterPeer(p); err != nil { - return err - } - - p.SetLogger(sw.Logger.With("peer", p.SocketAddr())) - - // Handle the shut down case where the switch has stopped but we're - // concurrently trying to add a peer. - if !sw.IsRunning() { - // XXX should this return an error or just log and terminate? - sw.Logger.Error("Won't start a peer - switch is not running", "peer", p) - return nil - } - - // Add some data to the peer, which is required by reactors. - for _, reactor := range sw.reactors { - p = reactor.InitPeer(p) - } - - // Start the peer's send/recv routines. - // Must start it before adding it to the peer set - // to prevent Start and Stop from being called concurrently. - err := p.Start() - if err != nil { - // Should never happen - sw.Logger.Error("Error starting peer", "err", err, "peer", p) - return err - } - - // Add the peer to PeerSet. Do this before starting the reactors - // so that if Receive errors, we will find the peer and remove it. - // Add should not err since we already checked peers.Has(). - if err := sw.peers.Add(p); err != nil { - return err - } - sw.metrics.Peers.Add(float64(1)) - - // Start all the reactor protocols on the peer. - for _, reactor := range sw.reactors { - reactor.AddPeer(p) - } - - sw.Logger.Info("Added peer", "peer", p) - - return nil -} - -// NewNetAddressStrings returns an array of NetAddress'es build using -// the provided strings. -func NewNetAddressStrings(addrs []string) ([]*NetAddress, []error) { - netAddrs := make([]*NetAddress, 0) - errs := make([]error, 0) - for _, addr := range addrs { - netAddr, err := types.NewNetAddressString(addr) - if err != nil { - errs = append(errs, err) - } else { - netAddrs = append(netAddrs, netAddr) - } - } - return netAddrs, errs -} diff --git a/internal/p2p/switch_test.go b/internal/p2p/switch_test.go deleted file mode 100644 index e4be67324c..0000000000 --- a/internal/p2p/switch_test.go +++ /dev/null @@ -1,937 +0,0 @@ -package p2p - -import ( - "bytes" - "context" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "regexp" - "strconv" - "sync/atomic" - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" -) - -var ( - cfg *config.P2PConfig - ctx = context.Background() -) - -func init() { - cfg = config.DefaultP2PConfig() - cfg.PexReactor = true - cfg.AllowDuplicateIP = true -} - -type PeerMessage struct { - PeerID types.NodeID - Bytes []byte - Counter int -} - -type TestReactor struct { - BaseReactor - - mtx tmsync.Mutex - channels []*conn.ChannelDescriptor - logMessages bool - msgsCounter int - msgsReceived map[byte][]PeerMessage -} - -func NewTestReactor(channels []*conn.ChannelDescriptor, logMessages bool) *TestReactor { - tr := &TestReactor{ - channels: channels, - logMessages: logMessages, - msgsReceived: make(map[byte][]PeerMessage), - } - tr.BaseReactor = *NewBaseReactor("TestReactor", tr) - tr.SetLogger(log.TestingLogger()) - return tr -} - -func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor { - return tr.channels -} - -func (tr *TestReactor) AddPeer(peer Peer) {} - -func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {} - -func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) { - if tr.logMessages { - tr.mtx.Lock() - defer tr.mtx.Unlock() - // fmt.Printf("Received: %X, %X\n", chID, msgBytes) - tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.ID(), msgBytes, tr.msgsCounter}) - tr.msgsCounter++ - } -} - -func (tr *TestReactor) getMsgs(chID byte) []PeerMessage { - tr.mtx.Lock() - defer tr.mtx.Unlock() - return tr.msgsReceived[chID] -} - -//----------------------------------------------------------------------------- - -// convenience method for creating two switches connected to each other. -// XXX: note this uses net.Pipe and not a proper TCP conn -func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) { - // Create two switches that will be interconnected. - nodeProTxHashes := make([]crypto.ProTxHash, 2) - switches := MakeConnectedSwitches(cfg, nodeProTxHashes, initSwitch, Connect2Switches) - return switches[0], switches[1] -} - -func initSwitchFunc(i int, sw *Switch) *Switch { - sw.SetAddrBook(&AddrBookMock{ - Addrs: make(map[string]struct{}), - OurAddrs: make(map[string]struct{})}) - - // Make two reactors of two channels each - sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x00), Priority: 10}, - {ID: byte(0x01), Priority: 10}, - }, true)) - sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x02), Priority: 10}, - {ID: byte(0x03), Priority: 10}, - }, true)) - - return sw -} - -func TestSwitches(t *testing.T) { - s1, s2 := MakeSwitchPair(t, initSwitchFunc) - t.Cleanup(func() { - if err := s1.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := s2.Stop(); err != nil { - t.Error(err) - } - }) - - if s1.Peers().Size() != 1 { - t.Errorf("expected exactly 1 peer in s1, got %v", s1.Peers().Size()) - } - if s2.Peers().Size() != 1 { - t.Errorf("expected exactly 1 peer in s2, got %v", s2.Peers().Size()) - } - - // Lets send some messages - ch0Msg := []byte("channel zero") - ch1Msg := []byte("channel foo") - ch2Msg := []byte("channel bar") - - s1.Broadcast(byte(0x00), ch0Msg) - s1.Broadcast(byte(0x01), ch1Msg) - s1.Broadcast(byte(0x02), ch2Msg) - - assertMsgReceivedWithTimeout(t, - ch0Msg, - byte(0x00), - s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) - assertMsgReceivedWithTimeout(t, - ch1Msg, - byte(0x01), - s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) - assertMsgReceivedWithTimeout(t, - ch2Msg, - byte(0x02), - s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second) -} - -func assertMsgReceivedWithTimeout( - t *testing.T, - msgBytes []byte, - channel byte, - reactor *TestReactor, - checkPeriod, - timeout time.Duration, -) { - ticker := time.NewTicker(checkPeriod) - for { - select { - case <-ticker.C: - msgs := reactor.getMsgs(channel) - if len(msgs) > 0 { - if !bytes.Equal(msgs[0].Bytes, msgBytes) { - t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes) - } - return - } - - case <-time.After(timeout): - t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel) - } - } -} - -func TestSwitchFiltersOutItself(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", nil, initSwitchFunc, log.TestingLogger()) - - // simulate s1 having a public IP by creating a remote peer with the same ID - rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg} - rp.Start() - - // addr should be rejected in addPeer based on the same ID - err := s1.DialPeerWithAddress(rp.Addr()) - if assert.Error(t, err) { - if err, ok := err.(ErrRejected); ok { - if !err.IsSelf() { - t.Errorf("expected self to be rejected") - } - } else { - t.Errorf("expected ErrRejected") - } - } - - assert.True(t, s1.addrBook.OurAddress(rp.Addr())) - assert.False(t, s1.addrBook.HasAddress(rp.Addr())) - - rp.Stop() - - assertNoPeersAfterTimeout(t, s1, 100*time.Millisecond) -} - -func TestSwitchDialFailsOnIncompatiblePeer(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", nil, initSwitchFunc, log.TestingLogger()) - ni := s1.NodeInfo() - ni.Network = "network-a" - s1.SetNodeInfo(ni) - - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg, Network: "network-b"} - rp.Start() - defer rp.Stop() - - err := s1.DialPeerWithAddress(rp.Addr()) - require.Error(t, err) - errRejected, ok := err.(ErrRejected) - require.True(t, ok, "expected error to be of type IsRejected") - require.True(t, errRejected.IsIncompatible(), "expected error to be IsIncompatible") - - // remote peer should not have been added to the addressbook - require.False(t, s1.addrBook.HasAddress(rp.Addr())) -} - -func TestSwitchPeerFilter(t *testing.T) { - var ( - filters = []PeerFilterFunc{ - func(_ IPeerSet, _ Peer) error { return nil }, - func(_ IPeerSet, _ Peer) error { return fmt.Errorf("denied") }, - func(_ IPeerSet, _ Peer) error { return nil }, - } - sw = MakeSwitch( - cfg, - 1, - "testing", - "123.123.123", - nil, - initSwitchFunc, - log.TestingLogger(), - SwitchPeerFilters(filters...), - ) - ) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - t.Cleanup(rp.Stop) - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - err = sw.addPeer(p) - if err, ok := err.(ErrRejected); ok { - if !err.IsFiltered() { - t.Errorf("expected peer to be filtered") - } - } else { - t.Errorf("expected ErrRejected") - } -} - -func TestSwitchPeerFilterTimeout(t *testing.T) { - var ( - filters = []PeerFilterFunc{ - func(_ IPeerSet, _ Peer) error { - time.Sleep(10 * time.Millisecond) - return nil - }, - } - sw = MakeSwitch( - cfg, - 1, - "testing", - "123.123.123", - nil, - initSwitchFunc, - log.TestingLogger(), - SwitchFilterTimeout(5*time.Millisecond), - SwitchPeerFilters(filters...), - ) - ) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Log(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - err = sw.addPeer(p) - if _, ok := err.(ErrFilterTimeout); !ok { - t.Errorf("expected ErrFilterTimeout") - } -} - -func TestSwitchPeerFilterDuplicate(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", nil, initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - if err := sw.addPeer(p); err != nil { - t.Fatal(err) - } - - err = sw.addPeer(p) - if errRej, ok := err.(ErrRejected); ok { - if !errRej.IsDuplicate() { - t.Errorf("expected peer to be duplicate. got %v", errRej) - } - } else { - t.Errorf("expected ErrRejected, got %v", err) - } -} - -func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) { - time.Sleep(timeout) - if sw.Peers().Size() != 0 { - t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size()) - } -} - -func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", nil, initSwitchFunc, log.TestingLogger()) - err := sw.Start() - if err != nil { - t.Error(err) - } - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - err = sw.addPeer(p) - require.Nil(err) - - require.NotNil(sw.Peers().Get(rp.ID())) - - // simulate failure by closing connection - err = p.CloseConn() - require.NoError(err) - - assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond) - assert.False(p.IsRunning()) -} - -func TestSwitchStopPeerForError(t *testing.T) { - s := httptest.NewServer(promhttp.Handler()) - defer s.Close() - - scrapeMetrics := func() string { - resp, err := http.Get(s.URL) - require.NoError(t, err) - defer resp.Body.Close() - buf, _ := ioutil.ReadAll(resp.Body) - return string(buf) - } - - namespace, subsystem, name := config.TestInstrumentationConfig().Namespace, MetricsSubsystem, "peers" - re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + ` ([0-9\.]+)`) - peersMetricValue := func() float64 { - matches := re.FindStringSubmatch(scrapeMetrics()) - f, _ := strconv.ParseFloat(matches[1], 64) - return f - } - - p2pMetrics := PrometheusMetrics(namespace) - - // make two connected switches - sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch { - // set metrics on sw1 - if i == 0 { - opt := WithMetrics(p2pMetrics) - opt(sw) - } - return initSwitchFunc(i, sw) - }) - - assert.Equal(t, len(sw1.Peers().List()), 1) - assert.EqualValues(t, 1, peersMetricValue()) - - // send messages to the peer from sw1 - p := sw1.Peers().List()[0] - p.Send(0x1, []byte("here's a message to send")) - - // stop sw2. this should cause the p to fail, - // which results in calling StopPeerForError internally - t.Cleanup(func() { - if err := sw2.Stop(); err != nil { - t.Error(err) - } - }) - - // now call StopPeerForError explicitly, eg. from a reactor - sw1.StopPeerForError(p, fmt.Errorf("some err")) - - assert.Equal(t, len(sw1.Peers().List()), 0) - assert.EqualValues(t, 0, peersMetricValue()) -} - -func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", nil, initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // 1. simulate failure by closing connection - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - err = sw.AddPersistentPeers([]string{rp.Addr().String()}) - require.NoError(t, err) - - err = sw.DialPeerWithAddress(rp.Addr()) - require.Nil(t, err) - require.NotNil(t, sw.Peers().Get(rp.ID())) - - p := sw.Peers().List()[0] - err = p.(*peer).CloseConn() - require.NoError(t, err) - - waitUntilSwitchHasAtLeastNPeers(sw, 1) - assert.False(t, p.IsRunning()) // old peer instance - assert.Equal(t, 1, sw.Peers().Size()) // new peer instance - - // 2. simulate first time dial failure - rp = &remotePeer{ - PrivKey: ed25519.GenPrivKey(), - Config: cfg, - // Use different interface to prevent duplicate IP filter, this will break - // beyond two peers. - listenAddr: "127.0.0.1:0", - } - rp.Start() - defer rp.Stop() - - conf := config.DefaultP2PConfig() - conf.TestDialFail = true // will trigger a reconnect - err = sw.addOutboundPeerWithConfig(rp.Addr(), conf) - require.NotNil(t, err) - // DialPeerWithAddres - sw.peerConfig resets the dialer - waitUntilSwitchHasAtLeastNPeers(sw, 2) - assert.Equal(t, 2, sw.Peers().Size()) -} - -func TestSwitchReconnectsToInboundPersistentPeer(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", nil, initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // 1. simulate failure by closing the connection - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - err = sw.AddPersistentPeers([]string{rp.Addr().String()}) - require.NoError(t, err) - - conn, err := rp.Dial(sw.NetAddress()) - require.NoError(t, err) - time.Sleep(50 * time.Millisecond) - require.NotNil(t, sw.Peers().Get(rp.ID())) - - conn.Close() - - waitUntilSwitchHasAtLeastNPeers(sw, 1) - assert.Equal(t, 1, sw.Peers().Size()) -} - -func TestSwitchDialPeersAsync(t *testing.T) { - if testing.Short() { - return - } - - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", nil, initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - err = sw.DialPeersAsync([]string{rp.Addr().String()}) - require.NoError(t, err) - time.Sleep(dialRandomizerIntervalMilliseconds * time.Millisecond) - require.NotNil(t, sw.Peers().Get(rp.ID())) -} - -func waitUntilSwitchHasAtLeastNPeers(sw *Switch, n int) { - for i := 0; i < 20; i++ { - time.Sleep(250 * time.Millisecond) - has := sw.Peers().Size() - if has >= n { - break - } - } -} - -func TestSwitchFullConnectivity(t *testing.T) { - nodeProTxHashes := make([]crypto.ProTxHash, 3) - switches := MakeConnectedSwitches(cfg, nodeProTxHashes, initSwitchFunc, Connect2Switches) - defer func() { - for _, sw := range switches { - sw := sw - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - } - }() - - for i, sw := range switches { - if sw.Peers().Size() != 2 { - t.Fatalf("Expected each switch to be connected to 2 other, but %d switch only connected to %d", sw.Peers().Size(), i) - } - } -} - -func TestSwitchAcceptRoutine(t *testing.T) { - cfg.MaxNumInboundPeers = 5 - - // Create some unconditional peers. - const unconditionalPeersNum = 2 - var ( - unconditionalPeers = make([]*remotePeer, unconditionalPeersNum) - unconditionalPeerIDs = make([]string, unconditionalPeersNum) - ) - for i := 0; i < unconditionalPeersNum; i++ { - peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - peer.Start() - unconditionalPeers[i] = peer - unconditionalPeerIDs[i] = string(peer.ID()) - } - - // make switch - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", nil, initSwitchFunc, log.TestingLogger()) - err := sw.AddUnconditionalPeerIDs(unconditionalPeerIDs) - require.NoError(t, err) - err = sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - err := sw.Stop() - require.NoError(t, err) - }) - - // 0. check there are no peers - assert.Equal(t, 0, sw.Peers().Size()) - - // 1. check we connect up to MaxNumInboundPeers - peers := make([]*remotePeer, 0) - for i := 0; i < cfg.MaxNumInboundPeers; i++ { - peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - peers = append(peers, peer) - peer.Start() - c, err := peer.Dial(sw.NetAddress()) - require.NoError(t, err) - // spawn a reading routine to prevent connection from closing - go func(c net.Conn) { - for { - one := make([]byte, 1) - _, err := c.Read(one) - if err != nil { - return - } - } - }(c) - } - time.Sleep(100 * time.Millisecond) - assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size()) - - // 2. check we close new connections if we already have MaxNumInboundPeers peers - peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - peer.Start() - conn, err := peer.Dial(sw.NetAddress()) - require.NoError(t, err) - // check conn is closed - one := make([]byte, 1) - _ = conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) - _, err = conn.Read(one) - assert.Error(t, err) - assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size()) - peer.Stop() - - // 3. check we connect to unconditional peers despite the limit. - for _, peer := range unconditionalPeers { - c, err := peer.Dial(sw.NetAddress()) - require.NoError(t, err) - // spawn a reading routine to prevent connection from closing - go func(c net.Conn) { - for { - one := make([]byte, 1) - _, err := c.Read(one) - if err != nil { - return - } - } - }(c) - } - time.Sleep(10 * time.Millisecond) - assert.Equal(t, cfg.MaxNumInboundPeers+unconditionalPeersNum, sw.Peers().Size()) - - for _, peer := range peers { - peer.Stop() - } - for _, peer := range unconditionalPeers { - peer.Stop() - } -} - -func TestSwitchRejectsIncompatiblePeers(t *testing.T) { - sw := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", nil, initSwitchFunc, log.TestingLogger()) - ni := sw.NodeInfo() - ni.Network = "network-a" - sw.SetNodeInfo(ni) - - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - err := sw.Stop() - require.NoError(t, err) - }) - - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg, Network: "network-b"} - rp.Start() - defer rp.Stop() - - assert.Equal(t, 0, sw.Peers().Size()) - - conn, err := rp.Dial(sw.NetAddress()) - assert.Nil(t, err) - - one := make([]byte, 1) - _ = conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) - _, err = conn.Read(one) - assert.Error(t, err) - - assert.Equal(t, 0, sw.Peers().Size()) -} - -type errorTransport struct { - acceptErr error -} - -func (et errorTransport) String() string { - return "error" -} - -func (et errorTransport) Protocols() []Protocol { - return []Protocol{"error"} -} - -func (et errorTransport) Accept() (Connection, error) { - return nil, et.acceptErr -} -func (errorTransport) Dial(context.Context, Endpoint) (Connection, error) { - panic("not implemented") -} -func (errorTransport) Close() error { panic("not implemented") } -func (errorTransport) FlushClose() error { panic("not implemented") } -func (errorTransport) Endpoints() []Endpoint { panic("not implemented") } - -func TestSwitchAcceptRoutineErrorCases(t *testing.T) { - sw := NewSwitch(cfg, errorTransport{ErrFilterTimeout{}}) - assert.NotPanics(t, func() { - err := sw.Start() - require.NoError(t, err) - err = sw.Stop() - require.NoError(t, err) - }) - - sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}}) - assert.NotPanics(t, func() { - err := sw.Start() - require.NoError(t, err) - err = sw.Stop() - require.NoError(t, err) - }) - // TODO(melekes) check we remove our address from addrBook - - sw = NewSwitch(cfg, errorTransport{ErrTransportClosed{}}) - assert.NotPanics(t, func() { - err := sw.Start() - require.NoError(t, err) - err = sw.Stop() - require.NoError(t, err) - }) -} - -// mockReactor checks that InitPeer never called before RemovePeer. If that's -// not true, InitCalledBeforeRemoveFinished will return true. -type mockReactor struct { - *BaseReactor - - // atomic - removePeerInProgress uint32 - initCalledBeforeRemoveFinished uint32 -} - -func (r *mockReactor) GetChannels() []*ChannelDescriptor { - return []*ChannelDescriptor{{ID: testCh, Priority: 10}} -} - -func (r *mockReactor) RemovePeer(peer Peer, reason interface{}) { - atomic.StoreUint32(&r.removePeerInProgress, 1) - defer atomic.StoreUint32(&r.removePeerInProgress, 0) - time.Sleep(100 * time.Millisecond) -} - -func (r *mockReactor) InitPeer(peer Peer) Peer { - if atomic.LoadUint32(&r.removePeerInProgress) == 1 { - atomic.StoreUint32(&r.initCalledBeforeRemoveFinished, 1) - } - - return peer -} - -func (r *mockReactor) InitCalledBeforeRemoveFinished() bool { - return atomic.LoadUint32(&r.initCalledBeforeRemoveFinished) == 1 -} - -// see stopAndRemovePeer -func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { - // make reactor - reactor := &mockReactor{} - reactor.BaseReactor = NewBaseReactor("mockReactor", reactor) - - // make switch - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", nil, func(i int, sw *Switch) *Switch { - sw.AddReactor("mock", reactor) - return sw - }, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // add peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - _, err = rp.Dial(sw.NetAddress()) - require.NoError(t, err) - - // wait till the switch adds rp to the peer set, then stop the peer asynchronously - for { - time.Sleep(20 * time.Millisecond) - if peer := sw.Peers().Get(rp.ID()); peer != nil { - go sw.StopPeerForError(peer, "test") - break - } - } - - // simulate peer reconnecting to us - _, err = rp.Dial(sw.NetAddress()) - require.NoError(t, err) - // wait till the switch adds rp to the peer set - time.Sleep(50 * time.Millisecond) - - // make sure reactor.RemovePeer is finished before InitPeer is called - assert.False(t, reactor.InitCalledBeforeRemoveFinished()) -} - -func BenchmarkSwitchBroadcast(b *testing.B) { - s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch { - // Make bar reactors of bar channels each - sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x00), Priority: 10}, - {ID: byte(0x01), Priority: 10}, - }, false)) - sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x02), Priority: 10}, - {ID: byte(0x03), Priority: 10}, - }, false)) - return sw - }) - - b.Cleanup(func() { - if err := s1.Stop(); err != nil { - b.Error(err) - } - }) - - b.Cleanup(func() { - if err := s2.Stop(); err != nil { - b.Error(err) - } - }) - - // Allow time for goroutines to boot up - time.Sleep(1 * time.Second) - - b.ResetTimer() - - numSuccess, numFailure := 0, 0 - - // Send random message from foo channel to another - for i := 0; i < b.N; i++ { - chID := byte(i % 4) - successChan := s1.Broadcast(chID, []byte("test data")) - for s := range successChan { - if s { - numSuccess++ - } else { - numFailure++ - } - } - } - - b.Logf("success: %v, failure: %v", numSuccess, numFailure) -} - -func TestNewNetAddressStrings(t *testing.T) { - addrs, errs := NewNetAddressStrings([]string{ - "127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeed@127.0.0.2:8080"}) - assert.Len(t, errs, 1) - assert.Equal(t, 2, len(addrs)) -} diff --git a/internal/p2p/test_util.go b/internal/p2p/test_util.go deleted file mode 100644 index ba0225ec34..0000000000 --- a/internal/p2p/test_util.go +++ /dev/null @@ -1,296 +0,0 @@ -package p2p - -import ( - "context" - "encoding/hex" - "fmt" - mrand "math/rand" - "net" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/log" - tmnet "github.com/tendermint/tendermint/libs/net" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/types" -) - -const testCh = 0x01 - -//------------------------------------------------ - -func AddPeerToSwitchPeerSet(sw *Switch, peer Peer) { - sw.peers.Add(peer) //nolint:errcheck // ignore error -} - -func CreateRandomPeer(outbound bool) Peer { - addr, netAddr := CreateRoutableAddr() - p := &peer{ - peerConn: peerConn{outbound: outbound}, - nodeInfo: types.NodeInfo{ - NodeID: netAddr.ID, - ListenAddr: netAddr.DialString(), - }, - metrics: NopMetrics(), - } - p.SetLogger(log.TestingLogger().With("peer", addr)) - return p -} - -// nolint:gosec // G404: Use of weak random number generator -func CreateRoutableAddr() (addr string, netAddr *NetAddress) { - for { - var err error - addr = fmt.Sprintf("%X@%v.%v.%v.%v:26656", - tmrand.Bytes(20), - mrand.Int()%256, - mrand.Int()%256, - mrand.Int()%256, - mrand.Int()%256) - netAddr, err = types.NewNetAddressString(addr) - if err != nil { - panic(err) - } - if netAddr.Routable() { - break - } - } - return -} - -//------------------------------------------------------------------ -// Connects switches via arbitrary net.Conn. Used for testing. - -const TestHost = "localhost" - -// MakeConnectedSwitches returns n switches, connected according to the connect func. -// If connect==Connect2Switches, the switches will be fully connected. -// initSwitch defines how the i'th switch should be initialized (ie. with what reactors). -// NOTE: panics if any switch fails to start. -func MakeConnectedSwitches(cfg *config.P2PConfig, - nodeProTxHashes []crypto.ProTxHash, - initSwitch func(int, *Switch) *Switch, - connect func([]*Switch, int, int), -) []*Switch { - n := len(nodeProTxHashes) - switches := make([]*Switch, n) - for i := 0; i < n; i++ { - switches[i] = MakeSwitch(cfg, i, TestHost, "123.123.123", nodeProTxHashes[i], initSwitch, log.TestingLogger()) - } - - if err := StartSwitches(switches); err != nil { - panic(err) - } - - for i := 0; i < n; i++ { - for j := i + 1; j < n; j++ { - connect(switches, i, j) - } - } - - return switches -} - -// Connect2Switches will connect switches i and j via net.Pipe(). -// Blocks until a connection is established. -// NOTE: caller ensures i and j are within bounds. -func Connect2Switches(switches []*Switch, i, j int) { - switchI := switches[i] - switchJ := switches[j] - - c1, c2 := conn.NetPipe() - - doneCh := make(chan struct{}) - go func() { - err := switchI.addPeerWithConnection(c1) - if err != nil { - panic(err) - } - doneCh <- struct{}{} - }() - go func() { - err := switchJ.addPeerWithConnection(c2) - if err != nil { - panic(err) - } - doneCh <- struct{}{} - }() - <-doneCh - <-doneCh -} - -func (sw *Switch) addPeerWithConnection(conn net.Conn) error { - pc, err := testInboundPeerConn(sw.transport.(*MConnTransport), conn) - if err != nil { - if err := conn.Close(); err != nil { - sw.Logger.Error("Error closing connection", "err", err) - } - return err - } - peerNodeInfo, _, err := pc.conn.Handshake(context.Background(), sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - if err := conn.Close(); err != nil { - sw.Logger.Error("Error closing connection", "err", err) - } - return err - } - - p := newPeer( - peerNodeInfo, - pc, - sw.reactorsByCh, - sw.StopPeerForError, - ) - - if err = sw.addPeer(p); err != nil { - pc.CloseConn() - return err - } - - return nil -} - -// StartSwitches calls sw.Start() for each given switch. -// It returns the first encountered error. -func StartSwitches(switches []*Switch) error { - for _, s := range switches { - err := s.Start() // start switch and reactors - if err != nil { - return err - } - } - return nil -} - -func MakeSwitch( - cfg *config.P2PConfig, - i int, - network, version string, - nodeProTxHash crypto.ProTxHash, - initSwitch func(int, *Switch) *Switch, - logger log.Logger, - opts ...SwitchOption, -) *Switch { - nodeKey := types.GenNodeKey() - nodeInfo := testNodeInfo(nodeKey.ID, fmt.Sprintf("node%d", i), nodeProTxHash) - addr, err := types.NewNetAddressString( - nodeKey.ID.AddressString(nodeInfo.ListenAddr), - ) - if err != nil { - panic(err) - } - - swLogger := logger.With("switch", i) - t := NewMConnTransport(swLogger, MConnConfig(cfg), - []*ChannelDescriptor{}, MConnTransportOptions{}) - - // TODO: let the config be passed in? - sw := initSwitch(i, NewSwitch(cfg, t, opts...)) - sw.SetLogger(swLogger) - sw.SetNodeKey(nodeKey) - - if err := t.Listen(NewEndpoint(addr)); err != nil { - panic(err) - } - - ni := nodeInfo - ni.Channels = []byte{} - for ch := range sw.reactorsByCh { - ni.Channels = append(ni.Channels, ch) - } - nodeInfo = ni - - // TODO: We need to setup reactors ahead of time so the NodeInfo is properly - // populated and we don't have to do those awkward overrides and setters. - sw.SetNodeInfo(nodeInfo) - - return sw -} - -func testInboundPeerConn( - transport *MConnTransport, - conn net.Conn, -) (peerConn, error) { - return testPeerConn(transport, conn, false, false) -} - -func testPeerConn( - transport *MConnTransport, - rawConn net.Conn, - outbound, persistent bool, -) (pc peerConn, err error) { - - conn := newMConnConnection(transport.logger, rawConn, transport.mConnConfig, transport.channelDescs) - - return newPeerConn(outbound, persistent, conn), nil -} - -//---------------------------------------------------------------- -// rand node info - -func testNodeInfo(id types.NodeID, name string, nodeProTxHash crypto.ProTxHash) types.NodeInfo { - return testNodeInfoWithNetwork(id, name, "testing", nodeProTxHash) -} - -func testNodeInfoWithNetwork(id types.NodeID, name, network string, nodeProTxHash crypto.ProTxHash) types.NodeInfo { - return types.NodeInfo{ - ProtocolVersion: defaultProtocolVersion, - ProTxHash: nodeProTxHash, - NodeID: id, - ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort()), - Network: network, - Version: "1.2.3-rc0-deadbeef", - Channels: []byte{testCh}, - Moniker: name, - Other: types.NodeInfoOther{ - TxIndex: "on", - RPCAddress: fmt.Sprintf("127.0.0.1:%d", getFreePort()), - }, - } -} - -func getFreePort() int { - port, err := tmnet.GetFreePort() - if err != nil { - panic(err) - } - return port -} - -type AddrBookMock struct { - Addrs map[string]struct{} - OurAddrs map[string]struct{} - PrivateAddrs map[string]struct{} -} - -var _ AddrBook = (*AddrBookMock)(nil) - -func (book *AddrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error { - book.Addrs[addr.String()] = struct{}{} - return nil -} -func (book *AddrBookMock) AddOurAddress(addr *NetAddress) { book.OurAddrs[addr.String()] = struct{}{} } -func (book *AddrBookMock) OurAddress(addr *NetAddress) bool { - _, ok := book.OurAddrs[addr.String()] - return ok -} -func (book *AddrBookMock) MarkGood(types.NodeID) {} -func (book *AddrBookMock) HasAddress(addr *NetAddress) bool { - _, ok := book.Addrs[addr.String()] - return ok -} -func (book *AddrBookMock) RemoveAddress(addr *NetAddress) { - delete(book.Addrs, addr.String()) -} -func (book *AddrBookMock) Save() {} -func (book *AddrBookMock) AddPrivateIDs(addrs []string) { - for _, addr := range addrs { - book.PrivateAddrs[addr] = struct{}{} - } -} - -// FindIP implements AddrBook -func (book *AddrBookMock) FindIP(net.IP, uint16) types.NodeID { - return types.NodeID(hex.EncodeToString(tmrand.Bytes(types.NodeIDByteLength))) -} diff --git a/internal/p2p/transport.go b/internal/p2p/transport.go index a3245dfc8d..1b48d8b0fd 100644 --- a/internal/p2p/transport.go +++ b/internal/p2p/transport.go @@ -7,9 +7,7 @@ import ( "net" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" ) //go:generate ../../scripts/mockery_generate.sh Transport|Connection @@ -20,19 +18,14 @@ const ( defaultProtocol Protocol = MConnProtocol ) -// defaultProtocolVersion populates the Block and P2P versions using -// the global values, but not the App. -var defaultProtocolVersion = types.ProtocolVersion{ - P2P: version.P2PProtocol, - Block: version.BlockProtocol, - App: 0, -} - // Protocol identifies a transport protocol. type Protocol string // Transport is a connection-oriented mechanism for exchanging data with a peer. type Transport interface { + // Listen starts the transport on the specified endpoint. + Listen(*Endpoint) error + // Protocols returns the protocols supported by the transport. The Router // uses this to pick a transport for an Endpoint. Protocols() []Protocol @@ -41,19 +34,23 @@ type Transport interface { // // How to listen is transport-dependent, e.g. MConnTransport uses Listen() while // MemoryTransport starts listening via MemoryNetwork.CreateTransport(). - Endpoints() []Endpoint + Endpoint() (*Endpoint, error) // Accept waits for the next inbound connection on a listening endpoint, blocking // until either a connection is available or the transport is closed. On closure, // io.EOF is returned and further Accept calls are futile. - Accept() (Connection, error) + Accept(context.Context) (Connection, error) // Dial creates an outbound connection to an endpoint. - Dial(context.Context, Endpoint) (Connection, error) + Dial(context.Context, *Endpoint) (Connection, error) // Close stops accepting new connections, but does not close active connections. Close() error + // AddChannelDescriptors is only part of this interface + // temporarily + AddChannelDescriptors([]*ChannelDescriptor) + // Stringer is used to display the transport, e.g. in logs. // // Without this, the logger may use reflection to access and display @@ -88,22 +85,10 @@ type Connection interface { // ReceiveMessage returns the next message received on the connection, // blocking until one is available. Returns io.EOF if closed. - ReceiveMessage() (ChannelID, []byte, error) + ReceiveMessage(context.Context) (ChannelID, []byte, error) // SendMessage sends a message on the connection. Returns io.EOF if closed. - // - // FIXME: For compatibility with the legacy P2P stack, it returns an - // additional boolean false if the message timed out waiting to be accepted - // into the send buffer. This should be removed. - SendMessage(ChannelID, []byte) (bool, error) - - // TrySendMessage is a non-blocking version of SendMessage that returns - // immediately if the message buffer is full. It returns true if the message - // was accepted. - // - // FIXME: This method is here for backwards-compatibility with the legacy - // P2P stack and should be removed. - TrySendMessage(ChannelID, []byte) (bool, error) + SendMessage(context.Context, ChannelID, []byte) error // LocalEndpoint returns the local endpoint for the connection. LocalEndpoint() Endpoint @@ -114,18 +99,6 @@ type Connection interface { // Close closes the connection. Close() error - // FlushClose flushes all pending sends and then closes the connection. - // - // FIXME: This only exists for backwards-compatibility with the current - // MConnection implementation. There should really be a separate Flush() - // method, but there is no easy way to synchronously flush pending data with - // the current MConnection code. - FlushClose() error - - // Status returns the current connection status. - // FIXME: Only here for compatibility with the current Peer code. - Status() conn.ConnectionStatus - // Stringer is used to display the connection, e.g. in logs. // // Without this, the logger may use reflection to access and display @@ -156,12 +129,17 @@ type Endpoint struct { } // NewEndpoint constructs an Endpoint from a types.NetAddress structure. -func NewEndpoint(na *types.NetAddress) Endpoint { - return Endpoint{ +func NewEndpoint(addr string) (*Endpoint, error) { + na, err := types.ParseAddressString(addr) + if err != nil { + return nil, err + } + + return &Endpoint{ Protocol: MConnProtocol, IP: na.IP, Port: na.Port, - } + }, nil } // NodeAddress converts the endpoint into a NodeAddress for the given node ID. diff --git a/internal/p2p/transport_mconn.go b/internal/p2p/transport_mconn.go index eca261476c..0520f04db6 100644 --- a/internal/p2p/transport_mconn.go +++ b/internal/p2p/transport_mconn.go @@ -40,14 +40,15 @@ type MConnTransportOptions struct { // MConnTransport is a Transport implementation using the current multiplexed // Tendermint protocol ("MConn"). type MConnTransport struct { + mtx sync.Mutex logger log.Logger options MConnTransportOptions mConnConfig conn.MConnConfig channelDescs []*ChannelDescriptor - closeCh chan struct{} - closeOnce sync.Once - listener net.Listener + closeOnce sync.Once + doneCh chan struct{} + listener net.Listener } // NewMConnTransport sets up a new MConnection transport. This uses the @@ -63,7 +64,7 @@ func NewMConnTransport( logger: logger, options: options, mConnConfig: mConnConfig, - closeCh: make(chan struct{}), + doneCh: make(chan struct{}), channelDescs: channelDescs, } } @@ -78,24 +79,25 @@ func (m *MConnTransport) Protocols() []Protocol { return []Protocol{MConnProtocol, TCPProtocol} } -// Endpoints implements Transport. -func (m *MConnTransport) Endpoints() []Endpoint { +// Endpoint implements Transport. +func (m *MConnTransport) Endpoint() (*Endpoint, error) { if m.listener == nil { - return []Endpoint{} + return nil, errors.New("listenter not defined") } select { - case <-m.closeCh: - return []Endpoint{} + case <-m.doneCh: + return nil, errors.New("transport closed") default: } - endpoint := Endpoint{ + + endpoint := &Endpoint{ Protocol: MConnProtocol, } if addr, ok := m.listener.Addr().(*net.TCPAddr); ok { endpoint.IP = addr.IP endpoint.Port = uint16(addr.Port) } - return []Endpoint{endpoint} + return endpoint, nil } // Listen asynchronously listens for inbound connections on the given endpoint. @@ -105,7 +107,7 @@ func (m *MConnTransport) Endpoints() []Endpoint { // FIXME: Listen currently only supports listening on a single endpoint, it // might be useful to support listening on multiple addresses (e.g. IPv4 and // IPv6, or a private and public address) via multiple Listen() calls. -func (m *MConnTransport) Listen(endpoint Endpoint) error { +func (m *MConnTransport) Listen(endpoint *Endpoint) error { if m.listener != nil { return errors.New("transport is already listening") } @@ -132,26 +134,47 @@ func (m *MConnTransport) Listen(endpoint Endpoint) error { } // Accept implements Transport. -func (m *MConnTransport) Accept() (Connection, error) { +func (m *MConnTransport) Accept(ctx context.Context) (Connection, error) { if m.listener == nil { return nil, errors.New("transport is not listening") } - tcpConn, err := m.listener.Accept() - if err != nil { + conCh := make(chan net.Conn) + errCh := make(chan error) + go func() { + tcpConn, err := m.listener.Accept() + if err != nil { + select { + case errCh <- err: + case <-ctx.Done(): + } + } select { - case <-m.closeCh: - return nil, io.EOF - default: - return nil, err + case conCh <- tcpConn: + case <-ctx.Done(): } + }() + + select { + case <-ctx.Done(): + m.listener.Close() + return nil, io.EOF + case <-m.doneCh: + m.listener.Close() + return nil, io.EOF + case err := <-errCh: + return nil, err + case tcpConn := <-conCh: + m.mtx.Lock() + chDescs := m.channelDescs + m.mtx.Unlock() + return newMConnConnection(m.logger, tcpConn, m.mConnConfig, chDescs), nil } - return newMConnConnection(m.logger, tcpConn, m.mConnConfig, m.channelDescs), nil } // Dial implements Transport. -func (m *MConnTransport) Dial(ctx context.Context, endpoint Endpoint) (Connection, error) { +func (m *MConnTransport) Dial(ctx context.Context, endpoint *Endpoint) (Connection, error) { if err := m.validateEndpoint(endpoint); err != nil { return nil, err } @@ -178,7 +201,7 @@ func (m *MConnTransport) Dial(ctx context.Context, endpoint Endpoint) (Connectio func (m *MConnTransport) Close() error { var err error m.closeOnce.Do(func() { - close(m.closeCh) // must be closed first, to handle error in Accept() + close(m.doneCh) if m.listener != nil { err = m.listener.Close() } @@ -194,11 +217,13 @@ func (m *MConnTransport) Close() error { // connections should be agnostic to everything but the channel ID's which are // initialized in the handshake. func (m *MConnTransport) AddChannelDescriptors(channelDesc []*ChannelDescriptor) { + m.mtx.Lock() + defer m.mtx.Unlock() m.channelDescs = append(m.channelDescs, channelDesc...) } // validateEndpoint validates an endpoint. -func (m *MConnTransport) validateEndpoint(endpoint Endpoint) error { +func (m *MConnTransport) validateEndpoint(endpoint *Endpoint) error { if err := endpoint.Validate(); err != nil { return err } @@ -222,7 +247,7 @@ type mConnConnection struct { channelDescs []*ChannelDescriptor receiveCh chan mConnMessage errorCh chan error - closeCh chan struct{} + doneCh chan struct{} closeOnce sync.Once mconn *conn.MConnection // set during Handshake() @@ -248,7 +273,7 @@ func newMConnConnection( channelDescs: channelDescs, receiveCh: make(chan mConnMessage), errorCh: make(chan error, 1), // buffered to avoid onError leak - closeCh: make(chan struct{}), + doneCh: make(chan struct{}), } } @@ -277,7 +302,12 @@ func (c *mConnConnection) Handshake( }() var err error mconn, peerInfo, peerKey, err = c.handshake(ctx, nodeInfo, privKey) - errCh <- err + + select { + case errCh <- err: + case <-ctx.Done(): + } + }() select { @@ -290,8 +320,7 @@ func (c *mConnConnection) Handshake( return types.NodeInfo{}, nil, err } c.mconn = mconn - c.logger = mconn.Logger - if err = c.mconn.Start(); err != nil { + if err = c.mconn.Start(ctx); err != nil { return types.NodeInfo{}, nil, err } return peerInfo, peerKey, nil @@ -315,49 +344,67 @@ func (c *mConnConnection) handshake( return nil, types.NodeInfo{}, nil, err } + wg := &sync.WaitGroup{} var pbPeerInfo p2pproto.NodeInfo errCh := make(chan error, 2) + wg.Add(1) go func() { + defer wg.Done() _, err := protoio.NewDelimitedWriter(secretConn).WriteMsg(nodeInfo.ToProto()) - errCh <- err + select { + case errCh <- err: + case <-ctx.Done(): + } + }() + wg.Add(1) go func() { + defer wg.Done() _, err := protoio.NewDelimitedReader(secretConn, types.MaxNodeInfoSize()).ReadMsg(&pbPeerInfo) - errCh <- err - }() - for i := 0; i < cap(errCh); i++ { - if err = <-errCh; err != nil { - return nil, types.NodeInfo{}, nil, err + select { + case errCh <- err: + case <-ctx.Done(): } + }() + + wg.Wait() + + if err, ok := <-errCh; ok && err != nil { + return nil, types.NodeInfo{}, nil, err + } + + if err := ctx.Err(); err != nil { + return nil, types.NodeInfo{}, nil, err } + peerInfo, err := types.NodeInfoFromProto(&pbPeerInfo) if err != nil { return nil, types.NodeInfo{}, nil, err } - mconn := conn.NewMConnectionWithConfig( + mconn := conn.NewMConnection( + c.logger.With("peer", c.RemoteEndpoint().NodeAddress(peerInfo.NodeID)), secretConn, c.channelDescs, c.onReceive, c.onError, c.mConnConfig, ) - mconn.SetLogger(c.logger.With("peer", c.RemoteEndpoint().NodeAddress(peerInfo.NodeID))) return mconn, peerInfo, secretConn.RemotePubKey(), nil } // onReceive is a callback for MConnection received messages. -func (c *mConnConnection) onReceive(chID byte, payload []byte) { +func (c *mConnConnection) onReceive(ctx context.Context, chID ChannelID, payload []byte) { select { - case c.receiveCh <- mConnMessage{channelID: ChannelID(chID), payload: payload}: - case <-c.closeCh: + case c.receiveCh <- mConnMessage{channelID: chID, payload: payload}: + case <-ctx.Done(): } } // onError is a callback for MConnection errors. The error is passed via errorCh // to ReceiveMessage (but not SendMessage, for legacy P2P stack behavior). -func (c *mConnConnection) onError(e interface{}) { +func (c *mConnConnection) onError(ctx context.Context, e interface{}) { err, ok := e.(error) if !ok { err = fmt.Errorf("%v", err) @@ -367,7 +414,7 @@ func (c *mConnConnection) onError(e interface{}) { _ = c.Close() select { case c.errorCh <- err: - case <-c.closeCh: + case <-ctx.Done(): } } @@ -377,41 +424,32 @@ func (c *mConnConnection) String() string { } // SendMessage implements Connection. -func (c *mConnConnection) SendMessage(chID ChannelID, msg []byte) (bool, error) { +func (c *mConnConnection) SendMessage(ctx context.Context, chID ChannelID, msg []byte) error { if chID > math.MaxUint8 { - return false, fmt.Errorf("MConnection only supports 1-byte channel IDs (got %v)", chID) + return fmt.Errorf("MConnection only supports 1-byte channel IDs (got %v)", chID) } select { case err := <-c.errorCh: - return false, err - case <-c.closeCh: - return false, io.EOF + return err + case <-ctx.Done(): + return io.EOF default: - return c.mconn.Send(byte(chID), msg), nil - } -} + if ok := c.mconn.Send(chID, msg); !ok { + return errors.New("sending message timed out") + } -// TrySendMessage implements Connection. -func (c *mConnConnection) TrySendMessage(chID ChannelID, msg []byte) (bool, error) { - if chID > math.MaxUint8 { - return false, fmt.Errorf("MConnection only supports 1-byte channel IDs (got %v)", chID) - } - select { - case err := <-c.errorCh: - return false, err - case <-c.closeCh: - return false, io.EOF - default: - return c.mconn.TrySend(byte(chID), msg), nil + return nil } } // ReceiveMessage implements Connection. -func (c *mConnConnection) ReceiveMessage() (ChannelID, []byte, error) { +func (c *mConnConnection) ReceiveMessage(ctx context.Context) (ChannelID, []byte, error) { select { case err := <-c.errorCh: return 0, nil, err - case <-c.closeCh: + case <-c.doneCh: + return 0, nil, io.EOF + case <-ctx.Done(): return 0, nil, io.EOF case msg := <-c.receiveCh: return msg.channelID, msg.payload, nil @@ -442,38 +480,17 @@ func (c *mConnConnection) RemoteEndpoint() Endpoint { return endpoint } -// Status implements Connection. -func (c *mConnConnection) Status() conn.ConnectionStatus { - if c.mconn == nil { - return conn.ConnectionStatus{} - } - return c.mconn.Status() -} - // Close implements Connection. func (c *mConnConnection) Close() error { var err error c.closeOnce.Do(func() { - if c.mconn != nil && c.mconn.IsRunning() { - err = c.mconn.Stop() - } else { - err = c.conn.Close() - } - close(c.closeCh) - }) - return err -} + defer close(c.doneCh) -// FlushClose implements Connection. -func (c *mConnConnection) FlushClose() error { - var err error - c.closeOnce.Do(func() { if c.mconn != nil && c.mconn.IsRunning() { - c.mconn.FlushStop() + c.mconn.Stop() } else { err = c.conn.Close() } - close(c.closeCh) }) return err } diff --git a/internal/p2p/transport_mconn_test.go b/internal/p2p/transport_mconn_test.go index f4d7198edf..c478dbe1d2 100644 --- a/internal/p2p/transport_mconn_test.go +++ b/internal/p2p/transport_mconn_test.go @@ -1,6 +1,7 @@ package p2p_test import ( + "context" "io" "net" "testing" @@ -19,21 +20,19 @@ import ( func init() { testTransports["mconn"] = func(t *testing.T) p2p.Transport { transport := p2p.NewMConnTransport( - log.TestingLogger(), + log.NewNopLogger(), conn.DefaultMConnConfig(), - []*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}}, + []*p2p.ChannelDescriptor{{ID: chID, Priority: 1}}, p2p.MConnTransportOptions{}, ) - err := transport.Listen(p2p.Endpoint{ + err := transport.Listen(&p2p.Endpoint{ Protocol: p2p.MConnProtocol, IP: net.IPv4(127, 0, 0, 1), Port: 0, // assign a random port }) require.NoError(t, err) - t.Cleanup(func() { - require.NoError(t, transport.Close()) - }) + t.Cleanup(func() { _ = transport.Close() }) return transport } @@ -41,9 +40,9 @@ func init() { func TestMConnTransport_AcceptBeforeListen(t *testing.T) { transport := p2p.NewMConnTransport( - log.TestingLogger(), + log.NewNopLogger(), conn.DefaultMConnConfig(), - []*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}}, + []*p2p.ChannelDescriptor{{ID: chID, Priority: 1}}, p2p.MConnTransportOptions{ MaxAcceptedConnections: 2, }, @@ -51,17 +50,22 @@ func TestMConnTransport_AcceptBeforeListen(t *testing.T) { t.Cleanup(func() { _ = transport.Close() }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - _, err := transport.Accept() + _, err := transport.Accept(ctx) require.Error(t, err) require.NotEqual(t, io.EOF, err) // io.EOF should be returned after Close() } func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + transport := p2p.NewMConnTransport( - log.TestingLogger(), + log.NewNopLogger(), conn.DefaultMConnConfig(), - []*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}}, + []*p2p.ChannelDescriptor{{ID: chID, Priority: 1}}, p2p.MConnTransportOptions{ MaxAcceptedConnections: 2, }, @@ -69,19 +73,20 @@ func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) { t.Cleanup(func() { _ = transport.Close() }) - err := transport.Listen(p2p.Endpoint{ + err := transport.Listen(&p2p.Endpoint{ Protocol: p2p.MConnProtocol, IP: net.IPv4(127, 0, 0, 1), }) require.NoError(t, err) - require.NotEmpty(t, transport.Endpoints()) - endpoint := transport.Endpoints()[0] + endpoint, err := transport.Endpoint() + require.NoError(t, err) + require.NotNil(t, endpoint) // Start a goroutine to just accept any connections. acceptCh := make(chan p2p.Connection, 10) go func() { for { - conn, err := transport.Accept() + conn, err := transport.Accept(ctx) if err != nil { return } @@ -124,21 +129,24 @@ func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) { } func TestMConnTransport_Listen(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + testcases := []struct { - endpoint p2p.Endpoint + endpoint *p2p.Endpoint ok bool }{ // Valid v4 and v6 addresses, with mconn and tcp protocols. - {p2p.Endpoint{Protocol: p2p.MConnProtocol, IP: net.IPv4zero}, true}, - {p2p.Endpoint{Protocol: p2p.MConnProtocol, IP: net.IPv4(127, 0, 0, 1)}, true}, - {p2p.Endpoint{Protocol: p2p.MConnProtocol, IP: net.IPv6zero}, true}, - {p2p.Endpoint{Protocol: p2p.MConnProtocol, IP: net.IPv6loopback}, true}, - {p2p.Endpoint{Protocol: p2p.TCPProtocol, IP: net.IPv4zero}, true}, + {&p2p.Endpoint{Protocol: p2p.MConnProtocol, IP: net.IPv4zero}, true}, + {&p2p.Endpoint{Protocol: p2p.MConnProtocol, IP: net.IPv4(127, 0, 0, 1)}, true}, + {&p2p.Endpoint{Protocol: p2p.MConnProtocol, IP: net.IPv6zero}, true}, + {&p2p.Endpoint{Protocol: p2p.MConnProtocol, IP: net.IPv6loopback}, true}, + {&p2p.Endpoint{Protocol: p2p.TCPProtocol, IP: net.IPv4zero}, true}, // Invalid endpoints. - {p2p.Endpoint{}, false}, - {p2p.Endpoint{Protocol: p2p.MConnProtocol, Path: "foo"}, false}, - {p2p.Endpoint{Protocol: p2p.MConnProtocol, IP: net.IPv4zero, Path: "foo"}, false}, + {&p2p.Endpoint{}, false}, + {&p2p.Endpoint{Protocol: p2p.MConnProtocol, Path: "foo"}, false}, + {&p2p.Endpoint{Protocol: p2p.MConnProtocol, IP: net.IPv4zero, Path: "foo"}, false}, } for _, tc := range testcases { tc := tc @@ -146,17 +154,19 @@ func TestMConnTransport_Listen(t *testing.T) { t.Cleanup(leaktest.Check(t)) transport := p2p.NewMConnTransport( - log.TestingLogger(), + log.NewNopLogger(), conn.DefaultMConnConfig(), - []*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}}, + []*p2p.ChannelDescriptor{{ID: chID, Priority: 1}}, p2p.MConnTransportOptions{}, ) // Transport should not listen on any endpoints yet. - require.Empty(t, transport.Endpoints()) + endpoint, err := transport.Endpoint() + require.Error(t, err) + require.Nil(t, endpoint) // Start listening, and check any expected errors. - err := transport.Listen(tc.endpoint) + err = transport.Listen(tc.endpoint) if !tc.ok { require.Error(t, err) return @@ -164,9 +174,9 @@ func TestMConnTransport_Listen(t *testing.T) { require.NoError(t, err) // Check the endpoint. - endpoints := transport.Endpoints() - require.Len(t, endpoints, 1) - endpoint := endpoints[0] + endpoint, err = transport.Endpoint() + require.NoError(t, err) + require.NotNil(t, endpoint) require.Equal(t, p2p.MConnProtocol, endpoint.Protocol) if tc.endpoint.IP.IsUnspecified() { @@ -185,12 +195,15 @@ func TestMConnTransport_Listen(t *testing.T) { go func() { // Dialing the endpoint should work. var err error + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerConn, err = transport.Dial(ctx, endpoint) require.NoError(t, err) close(dialedChan) }() - conn, err := transport.Accept() + conn, err := transport.Accept(ctx) require.NoError(t, err) _ = conn.Close() <-dialedChan @@ -199,7 +212,7 @@ func TestMConnTransport_Listen(t *testing.T) { require.NoError(t, peerConn.Close()) // try to read from the connection should error - _, _, err = peerConn.ReceiveMessage() + _, _, err = peerConn.ReceiveMessage(ctx) require.Error(t, err) // Trying to listen again should error. diff --git a/internal/p2p/transport_memory.go b/internal/p2p/transport_memory.go index 41bc013461..f02e828d6f 100644 --- a/internal/p2p/transport_memory.go +++ b/internal/p2p/transport_memory.go @@ -9,8 +9,6 @@ import ( "sync" "github.com/tendermint/tendermint/crypto" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) @@ -95,21 +93,24 @@ type MemoryTransport struct { nodeID types.NodeID bufferSize int - acceptCh chan *MemoryConnection - closeCh chan struct{} - closeOnce sync.Once + acceptCh chan *MemoryConnection + closeCh chan struct{} + closeFn func() } // newMemoryTransport creates a new MemoryTransport. This is for internal use by // MemoryNetwork, use MemoryNetwork.CreateTransport() instead. func newMemoryTransport(network *MemoryNetwork, nodeID types.NodeID) *MemoryTransport { + once := &sync.Once{} + closeCh := make(chan struct{}) return &MemoryTransport{ logger: network.logger.With("local", nodeID), network: network, nodeID: nodeID, bufferSize: network.bufferSize, acceptCh: make(chan *MemoryConnection), - closeCh: make(chan struct{}), + closeCh: closeCh, + closeFn: func() { once.Do(func() { close(closeCh) }) }, } } @@ -118,41 +119,46 @@ func (t *MemoryTransport) String() string { return string(MemoryProtocol) } +func (*MemoryTransport) Listen(*Endpoint) error { return nil } + +func (t *MemoryTransport) AddChannelDescriptors([]*ChannelDescriptor) {} + // Protocols implements Transport. func (t *MemoryTransport) Protocols() []Protocol { return []Protocol{MemoryProtocol} } // Endpoints implements Transport. -func (t *MemoryTransport) Endpoints() []Endpoint { - select { - case <-t.closeCh: - return []Endpoint{} - default: - return []Endpoint{{ - Protocol: MemoryProtocol, - Path: string(t.nodeID), - // An arbitrary IP and port is used in order for the pex - // reactor to be able to send addresses to one another. - IP: net.IPv4zero, - Port: 0, - }} +func (t *MemoryTransport) Endpoint() (*Endpoint, error) { + if n := t.network.GetTransport(t.nodeID); n == nil { + return nil, errors.New("node not defined") } + + return &Endpoint{ + Protocol: MemoryProtocol, + Path: string(t.nodeID), + // An arbitrary IP and port is used in order for the pex + // reactor to be able to send addresses to one another. + IP: net.IPv4zero, + Port: 0, + }, nil } // Accept implements Transport. -func (t *MemoryTransport) Accept() (Connection, error) { +func (t *MemoryTransport) Accept(ctx context.Context) (Connection, error) { select { + case <-t.closeCh: + return nil, io.EOF case conn := <-t.acceptCh: t.logger.Info("accepted connection", "remote", conn.RemoteEndpoint().Path) return conn, nil - case <-t.closeCh: + case <-ctx.Done(): return nil, io.EOF } } // Dial implements Transport. -func (t *MemoryTransport) Dial(ctx context.Context, endpoint Endpoint) (Connection, error) { +func (t *MemoryTransport) Dial(ctx context.Context, endpoint *Endpoint) (Connection, error) { if endpoint.Protocol != MemoryProtocol { return nil, fmt.Errorf("invalid protocol %q", endpoint.Protocol) } @@ -176,28 +182,30 @@ func (t *MemoryTransport) Dial(ctx context.Context, endpoint Endpoint) (Connecti inCh := make(chan memoryMessage, t.bufferSize) outCh := make(chan memoryMessage, t.bufferSize) - closer := tmsync.NewCloser() - outConn := newMemoryConnection(t.logger, t.nodeID, peer.nodeID, inCh, outCh, closer) - inConn := newMemoryConnection(peer.logger, peer.nodeID, t.nodeID, outCh, inCh, closer) + once := &sync.Once{} + closeCh := make(chan struct{}) + closeFn := func() { once.Do(func() { close(closeCh) }) } + + outConn := newMemoryConnection(t.logger, t.nodeID, peer.nodeID, inCh, outCh) + outConn.closeCh = closeCh + outConn.closeFn = closeFn + inConn := newMemoryConnection(peer.logger, peer.nodeID, t.nodeID, outCh, inCh) + inConn.closeCh = closeCh + inConn.closeFn = closeFn select { case peer.acceptCh <- inConn: return outConn, nil - case <-peer.closeCh: - return nil, io.EOF case <-ctx.Done(): - return nil, ctx.Err() + return nil, io.EOF } } // Close implements Transport. func (t *MemoryTransport) Close() error { t.network.RemoveTransport(t.nodeID) - t.closeOnce.Do(func() { - close(t.closeCh) - t.logger.Info("closed transport") - }) + t.closeFn() return nil } @@ -209,7 +217,9 @@ type MemoryConnection struct { receiveCh <-chan memoryMessage sendCh chan<- memoryMessage - closer *tmsync.Closer + + closeFn func() + closeCh <-chan struct{} } // memoryMessage is passed internally, containing either a message or handshake. @@ -229,7 +239,6 @@ func newMemoryConnection( remoteID types.NodeID, receiveCh <-chan memoryMessage, sendCh chan<- memoryMessage, - closer *tmsync.Closer, ) *MemoryConnection { return &MemoryConnection{ logger: logger.With("remote", remoteID), @@ -237,7 +246,6 @@ func newMemoryConnection( remoteID: remoteID, receiveCh: receiveCh, sendCh: sendCh, - closer: closer, } } @@ -262,11 +270,6 @@ func (c *MemoryConnection) RemoteEndpoint() Endpoint { } } -// Status implements Connection. -func (c *MemoryConnection) Status() conn.ConnectionStatus { - return conn.ConnectionStatus{} -} - // Handshake implements Connection. func (c *MemoryConnection) Handshake( ctx context.Context, @@ -276,7 +279,7 @@ func (c *MemoryConnection) Handshake( select { case c.sendCh <- memoryMessage{nodeInfo: &nodeInfo, pubKey: privKey.PubKey()}: c.logger.Debug("sent handshake", "nodeInfo", nodeInfo) - case <-c.closer.Done(): + case <-c.closeCh: return types.NodeInfo{}, nil, io.EOF case <-ctx.Done(): return types.NodeInfo{}, nil, ctx.Err() @@ -289,7 +292,7 @@ func (c *MemoryConnection) Handshake( } c.logger.Debug("received handshake", "peerInfo", msg.nodeInfo) return *msg.nodeInfo, msg.pubKey, nil - case <-c.closer.Done(): + case <-c.closeCh: return types.NodeInfo{}, nil, io.EOF case <-ctx.Done(): return types.NodeInfo{}, nil, ctx.Err() @@ -297,11 +300,13 @@ func (c *MemoryConnection) Handshake( } // ReceiveMessage implements Connection. -func (c *MemoryConnection) ReceiveMessage() (ChannelID, []byte, error) { +func (c *MemoryConnection) ReceiveMessage(ctx context.Context) (ChannelID, []byte, error) { // Check close first, since channels are buffered. Otherwise, below select // may non-deterministically return non-error even when closed. select { - case <-c.closer.Done(): + case <-c.closeCh: + return 0, nil, io.EOF + case <-ctx.Done(): return 0, nil, io.EOF default: } @@ -310,64 +315,35 @@ func (c *MemoryConnection) ReceiveMessage() (ChannelID, []byte, error) { case msg := <-c.receiveCh: // c.logger.Debug("received message", "chID", msg.channelID, "msg", msg.message) return msg.channelID, msg.message, nil - case <-c.closer.Done(): + case <-ctx.Done(): + return 0, nil, io.EOF + case <-c.closeCh: return 0, nil, io.EOF } } // SendMessage implements Connection. -func (c *MemoryConnection) SendMessage(chID ChannelID, msg []byte) (bool, error) { - // Check close first, since channels are buffered. Otherwise, below select - // may non-deterministically return non-error even when closed. - select { - case <-c.closer.Done(): - return false, io.EOF - default: - } - - select { - case c.sendCh <- memoryMessage{channelID: chID, message: msg}: - // c.logger.Debug("sent message", "chID", chID, "msg", msg) - return true, nil - case <-c.closer.Done(): - return false, io.EOF - } -} - -// TrySendMessage implements Connection. -func (c *MemoryConnection) TrySendMessage(chID ChannelID, msg []byte) (bool, error) { +func (c *MemoryConnection) SendMessage(ctx context.Context, chID ChannelID, msg []byte) error { // Check close first, since channels are buffered. Otherwise, below select // may non-deterministically return non-error even when closed. select { - case <-c.closer.Done(): - return false, io.EOF + case <-c.closeCh: + return io.EOF + case <-ctx.Done(): + return io.EOF default: } select { case c.sendCh <- memoryMessage{channelID: chID, message: msg}: - // c.logger.Debug("sent message", "chID", chID, "msg", msg) - return true, nil - case <-c.closer.Done(): - return false, io.EOF - default: - return false, nil - } -} - -// Close implements Connection. -func (c *MemoryConnection) Close() error { - select { - case <-c.closer.Done(): + //c.logger.Debug("sent message", "chID", chID, "msg", msg) return nil - default: - c.closer.Close() - c.logger.Info("closed connection") + case <-ctx.Done(): + return io.EOF + case <-c.closeCh: + return io.EOF } - return nil } -// FlushClose implements Connection. -func (c *MemoryConnection) FlushClose() error { - return c.Close() -} +// Close implements Connection. +func (c *MemoryConnection) Close() error { c.closeFn(); return nil } diff --git a/internal/p2p/transport_memory_test.go b/internal/p2p/transport_memory_test.go index c4eea65c30..33d96cdb84 100644 --- a/internal/p2p/transport_memory_test.go +++ b/internal/p2p/transport_memory_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" @@ -18,7 +19,7 @@ func init() { testTransports["memory"] = func(t *testing.T) p2p.Transport { if network == nil { - network = p2p.NewMemoryNetwork(log.TestingLogger(), 1) + network = p2p.NewMemoryNetwork(log.NewNopLogger(), 1) } i := byte(network.Size()) nodeID, err := types.NewNodeID(hex.EncodeToString(bytes.Repeat([]byte{i<<4 + i}, 20))) diff --git a/internal/p2p/transport_test.go b/internal/p2p/transport_test.go index 1b8ab77f5a..b4edf9bc95 100644 --- a/internal/p2p/transport_test.go +++ b/internal/p2p/transport_test.go @@ -25,36 +25,43 @@ var testTransports = map[string]transportFactory{} // withTransports is a test helper that runs a test against all transports // registered in testTransports. -func withTransports(t *testing.T, tester func(*testing.T, transportFactory)) { +func withTransports(ctx context.Context, t *testing.T, tester func(context.Context, *testing.T, transportFactory)) { t.Helper() for name, transportFactory := range testTransports { transportFactory := transportFactory t.Run(name, func(t *testing.T) { t.Cleanup(leaktest.Check(t)) - tester(t, transportFactory) + tctx, cancel := context.WithCancel(ctx) + defer cancel() + + tester(tctx, t, transportFactory) }) } } func TestTransport_AcceptClose(t *testing.T) { // Just test accept unblock on close, happy path is tested widely elsewhere. - withTransports(t, func(t *testing.T, makeTransport transportFactory) { - a := makeTransport(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - // In-progress Accept should error on concurrent close. - errCh := make(chan error, 1) - go func() { - time.Sleep(200 * time.Millisecond) - errCh <- a.Close() - }() + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { + a := makeTransport(t) + opctx, opcancel := context.WithTimeout(ctx, 200*time.Millisecond) + defer opcancel() - _, err := a.Accept() + _, err := a.Accept(opctx) require.Error(t, err) require.Equal(t, io.EOF, err) - require.NoError(t, <-errCh) - // Closed transport should return error immediately. - _, err = a.Accept() + <-opctx.Done() + _ = a.Close() + + // Closed transport should return error immediately, + // because the transport is closed. We use the base + // context (ctx) rather than the operation context + // (opctx) because using the later would mean this + // could error because the context was canceled. + _, err = a.Accept(ctx) require.Error(t, err) require.Equal(t, io.EOF, err) }) @@ -75,16 +82,19 @@ func TestTransport_DialEndpoints(t *testing.T) { {[]byte{1, 2, 3, 4, 5}, false}, } - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) - endpoints := a.Endpoints() - require.NotEmpty(t, endpoints) - endpoint := endpoints[0] + endpoint, err := a.Endpoint() + require.NoError(t, err) + require.NotNil(t, endpoint) // Spawn a goroutine to simply accept any connections until closed. go func() { for { - conn, err := a.Accept() + conn, err := a.Accept(ctx) if err != nil { return } @@ -98,19 +108,19 @@ func TestTransport_DialEndpoints(t *testing.T) { require.NoError(t, conn.Close()) // Dialing empty endpoint should error. - _, err = a.Dial(ctx, p2p.Endpoint{}) + _, err = a.Dial(ctx, &p2p.Endpoint{}) require.Error(t, err) // Dialing without protocol should error. - noProtocol := endpoint + noProtocol := *endpoint noProtocol.Protocol = "" - _, err = a.Dial(ctx, noProtocol) + _, err = a.Dial(ctx, &noProtocol) require.Error(t, err) // Dialing with invalid protocol should error. - fooProtocol := endpoint + fooProtocol := *endpoint fooProtocol.Protocol = "foo" - _, err = a.Dial(ctx, fooProtocol) + _, err = a.Dial(ctx, &fooProtocol) require.Error(t, err) // Tests for networked endpoints (with IP). @@ -119,11 +129,12 @@ func TestTransport_DialEndpoints(t *testing.T) { tc := tc t.Run(tc.ip.String(), func(t *testing.T) { e := endpoint + require.NotNil(t, e) e.IP = tc.ip conn, err := a.Dial(ctx, e) if tc.ok { - require.NoError(t, conn.Close()) require.NoError(t, err) + require.NoError(t, conn.Close()) } else { require.Error(t, err, "endpoint=%s", e) } @@ -149,23 +160,27 @@ func TestTransport_DialEndpoints(t *testing.T) { } func TestTransport_Dial(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Most just tests dial failures, happy path is tested widely elsewhere. - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - require.NotEmpty(t, a.Endpoints()) - require.NotEmpty(t, b.Endpoints()) - aEndpoint := a.Endpoints()[0] - bEndpoint := b.Endpoints()[0] + aEndpoint, err := a.Endpoint() + require.NoError(t, err) + require.NotNil(t, aEndpoint) + bEndpoint, err := b.Endpoint() + require.NoError(t, err) + require.NotNil(t, bEndpoint) // Context cancellation should error. We can't test timeouts since we'd // need a non-responsive endpoint. cancelCtx, cancel := context.WithCancel(ctx) cancel() - _, err := a.Dial(cancelCtx, bEndpoint) + _, err = a.Dial(cancelCtx, bEndpoint) require.Error(t, err) - require.Equal(t, err, context.Canceled) // Unavailable endpoint should error. err = b.Close() @@ -176,7 +191,7 @@ func TestTransport_Dial(t *testing.T) { // Dialing from a closed transport should still work. errCh := make(chan error, 1) go func() { - conn, err := a.Accept() + conn, err := a.Accept(ctx) if err == nil { _ = conn.Close() } @@ -190,55 +205,71 @@ func TestTransport_Dial(t *testing.T) { } func TestTransport_Endpoints(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) // Both transports return valid and different endpoints. - aEndpoints := a.Endpoints() - bEndpoints := b.Endpoints() - require.NotEmpty(t, aEndpoints) - require.NotEmpty(t, bEndpoints) - require.NotEqual(t, aEndpoints, bEndpoints) - for _, endpoint := range append(aEndpoints, bEndpoints...) { + aEndpoint, err := a.Endpoint() + require.NoError(t, err) + require.NotNil(t, aEndpoint) + bEndpoint, err := b.Endpoint() + require.NoError(t, err) + require.NotNil(t, bEndpoint) + require.NotEqual(t, aEndpoint, bEndpoint) + for _, endpoint := range []*p2p.Endpoint{aEndpoint, bEndpoint} { err := endpoint.Validate() require.NoError(t, err, "invalid endpoint %q", endpoint) } // When closed, the transport should no longer return any endpoints. - err := a.Close() + require.NoError(t, a.Close()) + aEndpoint, err = a.Endpoint() + require.Error(t, err) + require.Nil(t, aEndpoint) + bEndpoint, err = b.Endpoint() require.NoError(t, err) - require.Empty(t, a.Endpoints()) - require.NotEmpty(t, b.Endpoints()) + require.NotNil(t, bEndpoint) }) } func TestTransport_Protocols(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) protocols := a.Protocols() - endpoints := a.Endpoints() + endpoint, err := a.Endpoint() + require.NoError(t, err) require.NotEmpty(t, protocols) - require.NotEmpty(t, endpoints) + require.NotNil(t, endpoint) - for _, endpoint := range endpoints { - require.Contains(t, protocols, endpoint.Protocol) - } + require.Contains(t, protocols, endpoint.Protocol) }) } func TestTransport_String(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) require.NotEmpty(t, a.String()) }) } func TestConnection_Handshake(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, ba := dialAccept(t, a, b) + ab, ba := dialAccept(ctx, t, a, b) // A handshake should pass the given keys and NodeInfo. aKey := ed25519.GenPrivKey() @@ -270,7 +301,10 @@ func TestConnection_Handshake(t *testing.T) { assert.Equal(t, aInfo, peerInfo) assert.Equal(t, aKey.PubKey(), peerKey) } - errCh <- err + select { + case errCh <- err: + case <-ctx.Done(): + } }() peerInfo, peerKey, err := ab.Handshake(ctx, aInfo, aKey) @@ -283,12 +317,15 @@ func TestConnection_Handshake(t *testing.T) { } func TestConnection_HandshakeCancel(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) // Handshake should error on context cancellation. - ab, ba := dialAccept(t, a, b) + ab, ba := dialAccept(ctx, t, a, b) timeoutCtx, cancel := context.WithTimeout(ctx, 1*time.Minute) cancel() _, _, err := ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey()) @@ -298,7 +335,7 @@ func TestConnection_HandshakeCancel(t *testing.T) { _ = ba.Close() // Handshake should error on context timeout. - ab, ba = dialAccept(t, a, b) + ab, ba = dialAccept(ctx, t, a, b) timeoutCtx, cancel = context.WithTimeout(ctx, 200*time.Millisecond) defer cancel() _, _, err = ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey()) @@ -310,35 +347,34 @@ func TestConnection_HandshakeCancel(t *testing.T) { } func TestConnection_FlushClose(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, _ := dialAcceptHandshake(t, a, b) + ab, _ := dialAcceptHandshake(ctx, t, a, b) - // FIXME: FlushClose should be removed (and replaced by separate Flush - // and Close calls if necessary). We can't reliably test it, so we just - // make sure it closes both ends and that it's idempotent. - err := ab.FlushClose() + err := ab.Close() require.NoError(t, err) - _, _, err = ab.ReceiveMessage() + _, _, err = ab.ReceiveMessage(ctx) require.Error(t, err) require.Equal(t, io.EOF, err) - _, err = ab.SendMessage(chID, []byte("closed")) + err = ab.SendMessage(ctx, chID, []byte("closed")) require.Error(t, err) - require.Equal(t, io.EOF, err) - - err = ab.FlushClose() - require.NoError(t, err) }) } func TestConnection_LocalRemoteEndpoint(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, ba := dialAcceptHandshake(t, a, b) + ab, ba := dialAcceptHandshake(ctx, t, a, b) // Local and remote connection endpoints correspond to each other. require.NotEmpty(t, ab.LocalEndpoint()) @@ -349,48 +385,40 @@ func TestConnection_LocalRemoteEndpoint(t *testing.T) { } func TestConnection_SendReceive(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, ba := dialAcceptHandshake(t, a, b) + ab, ba := dialAcceptHandshake(ctx, t, a, b) // Can send and receive a to b. - ok, err := ab.SendMessage(chID, []byte("foo")) + err := ab.SendMessage(ctx, chID, []byte("foo")) require.NoError(t, err) - require.True(t, ok) - ch, msg, err := ba.ReceiveMessage() + ch, msg, err := ba.ReceiveMessage(ctx) require.NoError(t, err) require.Equal(t, []byte("foo"), msg) require.Equal(t, chID, ch) // Can send and receive b to a. - _, err = ba.SendMessage(chID, []byte("bar")) + err = ba.SendMessage(ctx, chID, []byte("bar")) require.NoError(t, err) - _, msg, err = ab.ReceiveMessage() + _, msg, err = ab.ReceiveMessage(ctx) require.NoError(t, err) require.Equal(t, []byte("bar"), msg) - // TrySendMessage also works. - ok, err = ba.TrySendMessage(chID, []byte("try")) - require.NoError(t, err) - require.True(t, ok) - - ch, msg, err = ab.ReceiveMessage() - require.NoError(t, err) - require.Equal(t, []byte("try"), msg) - require.Equal(t, chID, ch) - // Connections should still be active after closing the transports. err = a.Close() require.NoError(t, err) err = b.Close() require.NoError(t, err) - _, err = ab.SendMessage(chID, []byte("still here")) + err = ab.SendMessage(ctx, chID, []byte("still here")) require.NoError(t, err) - ch, msg, err = ba.ReceiveMessage() + ch, msg, err = ba.ReceiveMessage(ctx) require.NoError(t, err) require.Equal(t, chID, ch) require.Equal(t, []byte("still here"), msg) @@ -400,45 +428,31 @@ func TestConnection_SendReceive(t *testing.T) { err = ba.Close() require.NoError(t, err) - _, _, err = ab.ReceiveMessage() - require.Error(t, err) - require.Equal(t, io.EOF, err) - _, err = ab.TrySendMessage(chID, []byte("closed try")) - require.Error(t, err) - require.Equal(t, io.EOF, err) - _, err = ab.SendMessage(chID, []byte("closed")) + _, _, err = ab.ReceiveMessage(ctx) require.Error(t, err) require.Equal(t, io.EOF, err) - _, _, err = ba.ReceiveMessage() + err = ab.SendMessage(ctx, chID, []byte("closed")) require.Error(t, err) require.Equal(t, io.EOF, err) - _, err = ba.TrySendMessage(chID, []byte("closed try")) - require.Error(t, err) - require.Equal(t, io.EOF, err) - _, err = ba.SendMessage(chID, []byte("closed")) + + _, _, err = ba.ReceiveMessage(ctx) require.Error(t, err) require.Equal(t, io.EOF, err) - }) -} -func TestConnection_Status(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { - a := makeTransport(t) - b := makeTransport(t) - ab, _ := dialAcceptHandshake(t, a, b) - - // FIXME: This isn't implemented in all transports, so for now we just - // check that it doesn't panic, which isn't really much of a test. - ab.Status() + err = ba.SendMessage(ctx, chID, []byte("closed")) + require.Error(t, err) }) } func TestConnection_String(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + withTransports(ctx, t, func(ctx context.Context, t *testing.T, makeTransport transportFactory) { a := makeTransport(t) b := makeTransport(t) - ab, _ := dialAccept(t, a, b) + ab, _ := dialAccept(ctx, t, a, b) require.NotEmpty(t, ab.String()) }) } @@ -585,11 +599,12 @@ func TestEndpoint_Validate(t *testing.T) { // dialAccept is a helper that dials b from a and returns both sides of the // connection. -func dialAccept(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connection) { +func dialAccept(ctx context.Context, t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connection) { t.Helper() - endpoints := b.Endpoints() - require.NotEmpty(t, endpoints, "peer not listening on any endpoints") + endpoint, err := b.Endpoint() + require.NoError(t, err) + require.NotNil(t, endpoint, "peer not listening on any endpoints") ctx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() @@ -597,12 +612,12 @@ func dialAccept(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connectio acceptCh := make(chan p2p.Connection, 1) errCh := make(chan error, 1) go func() { - conn, err := b.Accept() + conn, err := b.Accept(ctx) errCh <- err acceptCh <- conn }() - dialConn, err := a.Dial(ctx, endpoints[0]) + dialConn, err := a.Dial(ctx, endpoint) require.NoError(t, err) acceptConn := <-acceptCh @@ -618,13 +633,10 @@ func dialAccept(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connectio // dialAcceptHandshake is a helper that dials and handshakes b from a and // returns both sides of the connection. -func dialAcceptHandshake(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connection) { +func dialAcceptHandshake(ctx context.Context, t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.Connection) { t.Helper() - ab, ba := dialAccept(t, a, b) - - ctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() + ab, ba := dialAccept(ctx, t, a, b) errCh := make(chan error, 1) go func() { diff --git a/internal/p2p/trust/config.go b/internal/p2p/trust/config.go deleted file mode 100644 index 0f990a991d..0000000000 --- a/internal/p2p/trust/config.go +++ /dev/null @@ -1,55 +0,0 @@ -package trust - -import "time" - -// MetricConfig - Configures the weight functions and time intervals for the metric -type MetricConfig struct { - // Determines the percentage given to current behavior - ProportionalWeight float64 - - // Determines the percentage given to prior behavior - IntegralWeight float64 - - // The window of time that the trust metric will track events across. - // This can be set to cover many days without issue - TrackingWindow time.Duration - - // Each interval should be short for adapability. - // Less than 30 seconds is too sensitive, - // and greater than 5 minutes will make the metric numb - IntervalLength time.Duration -} - -// DefaultConfig returns a config with values that have been tested and produce desirable results -func DefaultConfig() MetricConfig { - return MetricConfig{ - ProportionalWeight: 0.4, - IntegralWeight: 0.6, - TrackingWindow: (time.Minute * 60 * 24) * 14, // 14 days. - IntervalLength: 1 * time.Minute, - } -} - -// Ensures that all configuration elements have valid values -func customConfig(tmc MetricConfig) MetricConfig { - config := DefaultConfig() - - // Check the config for set values, and setup appropriately - if tmc.ProportionalWeight > 0 { - config.ProportionalWeight = tmc.ProportionalWeight - } - - if tmc.IntegralWeight > 0 { - config.IntegralWeight = tmc.IntegralWeight - } - - if tmc.IntervalLength > time.Duration(0) { - config.IntervalLength = tmc.IntervalLength - } - - if tmc.TrackingWindow > time.Duration(0) && - tmc.TrackingWindow >= config.IntervalLength { - config.TrackingWindow = tmc.TrackingWindow - } - return config -} diff --git a/internal/p2p/trust/metric.go b/internal/p2p/trust/metric.go deleted file mode 100644 index aa0ff52986..0000000000 --- a/internal/p2p/trust/metric.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright 2017 Tendermint. All rights reserved. -// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. - -package trust - -import ( - "math" - "time" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/service" -) - -//--------------------------------------------------------------------------------------- - -const ( - // The weight applied to the derivative when current behavior is >= previous behavior - defaultDerivativeGamma1 = 0 - - // The weight applied to the derivative when current behavior is less than previous behavior - defaultDerivativeGamma2 = 1.0 - - // The weight applied to history data values when calculating the history value - defaultHistoryDataWeight = 0.8 -) - -// MetricHistoryJSON - history data necessary to save the trust metric -type MetricHistoryJSON struct { - NumIntervals int `json:"intervals"` - History []float64 `json:"history"` -} - -// Metric - keeps track of peer reliability -// See tendermint/docs/architecture/adr-006-trust-metric.md for details -type Metric struct { - service.BaseService - - // Mutex that protects the metric from concurrent access - mtx tmsync.Mutex - - // Determines the percentage given to current behavior - proportionalWeight float64 - - // Determines the percentage given to prior behavior - integralWeight float64 - - // Count of how many time intervals this metric has been tracking - numIntervals int - - // Size of the time interval window for this trust metric - maxIntervals int - - // The time duration for a single time interval - intervalLen time.Duration - - // Stores the trust history data for this metric - history []float64 - - // Weights applied to the history data when calculating the history value - historyWeights []float64 - - // The sum of the history weights used when calculating the history value - historyWeightSum float64 - - // The current number of history data elements - historySize int - - // The maximum number of history data elements - historyMaxSize int - - // The calculated history value for the current time interval - historyValue float64 - - // The number of recorded good and bad events for the current time interval - bad, good float64 - - // While true, history data is not modified - paused bool - - // Used during testing in order to control the passing of time intervals - testTicker MetricTicker -} - -// NewMetric returns a trust metric with the default configuration. -// Use Start to begin tracking the quality of peer behavior over time -func NewMetric() *Metric { - return NewMetricWithConfig(DefaultConfig()) -} - -// NewMetricWithConfig returns a trust metric with a custom configuration. -// Use Start to begin tracking the quality of peer behavior over time -func NewMetricWithConfig(tmc MetricConfig) *Metric { - tm := new(Metric) - config := customConfig(tmc) - - // Setup using the configuration values - tm.proportionalWeight = config.ProportionalWeight - tm.integralWeight = config.IntegralWeight - tm.intervalLen = config.IntervalLength - // The maximum number of time intervals is the tracking window / interval length - tm.maxIntervals = int(config.TrackingWindow / tm.intervalLen) - // The history size will be determined by the maximum number of time intervals - tm.historyMaxSize = intervalToHistoryOffset(tm.maxIntervals) + 1 - // This metric has a perfect history so far - tm.historyValue = 1.0 - - tm.BaseService = *service.NewBaseService(nil, "Metric", tm) - return tm -} - -// OnStart implements Service -func (tm *Metric) OnStart() error { - if err := tm.BaseService.OnStart(); err != nil { - return err - } - go tm.processRequests() - return nil -} - -// OnStop implements Service -// Nothing to do since the goroutine shuts down by itself via BaseService.Quit() -func (tm *Metric) OnStop() {} - -// Returns a snapshot of the trust metric history data -func (tm *Metric) HistoryJSON() MetricHistoryJSON { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - return MetricHistoryJSON{ - NumIntervals: tm.numIntervals, - History: tm.history, - } -} - -// Instantiates a trust metric by loading the history data for a single peer. -// This is called only once and only right after creation, which is why the -// lock is not held while accessing the trust metric struct members -func (tm *Metric) Init(hist MetricHistoryJSON) { - // Restore the number of time intervals we have previously tracked - if hist.NumIntervals > tm.maxIntervals { - hist.NumIntervals = tm.maxIntervals - } - tm.numIntervals = hist.NumIntervals - // Restore the history and its current size - if len(hist.History) > tm.historyMaxSize { - // Keep the history no larger than historyMaxSize - last := len(hist.History) - tm.historyMaxSize - hist.History = hist.History[last:] - } - tm.history = hist.History - tm.historySize = len(tm.history) - // Create the history weight values and weight sum - for i := 1; i <= tm.numIntervals; i++ { - x := math.Pow(defaultHistoryDataWeight, float64(i)) // Optimistic weight - tm.historyWeights = append(tm.historyWeights, x) - } - - for _, v := range tm.historyWeights { - tm.historyWeightSum += v - } - // Calculate the history value based on the loaded history data - tm.historyValue = tm.calcHistoryValue() -} - -// Pause tells the metric to pause recording data over time intervals. -// All method calls that indicate events will unpause the metric -func (tm *Metric) Pause() { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - // Pause the metric for now - tm.paused = true -} - -// BadEvents indicates that an undesirable event(s) took place -func (tm *Metric) BadEvents(num int) { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - tm.unpause() - tm.bad += float64(num) -} - -// GoodEvents indicates that a desirable event(s) took place -func (tm *Metric) GoodEvents(num int) { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - tm.unpause() - tm.good += float64(num) -} - -// TrustValue gets the dependable trust value; always between 0 and 1 -func (tm *Metric) TrustValue() float64 { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - return tm.calcTrustValue() -} - -// TrustScore gets a score based on the trust value always between 0 and 100 -func (tm *Metric) TrustScore() int { - score := tm.TrustValue() * 100 - - return int(math.Floor(score)) -} - -// NextTimeInterval saves current time interval data and prepares for the following interval -func (tm *Metric) NextTimeInterval() { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - if tm.paused { - // Do not prepare for the next time interval while paused - return - } - - // Add the current trust value to the history data - newHist := tm.calcTrustValue() - tm.history = append(tm.history, newHist) - - // Update history and interval counters - if tm.historySize < tm.historyMaxSize { - tm.historySize++ - } else { - // Keep the history no larger than historyMaxSize - last := len(tm.history) - tm.historyMaxSize - tm.history = tm.history[last:] - } - - if tm.numIntervals < tm.maxIntervals { - tm.numIntervals++ - // Add the optimistic weight for the new time interval - wk := math.Pow(defaultHistoryDataWeight, float64(tm.numIntervals)) - tm.historyWeights = append(tm.historyWeights, wk) - tm.historyWeightSum += wk - } - - // Update the history data using Faded Memories - tm.updateFadedMemory() - // Calculate the history value for the upcoming time interval - tm.historyValue = tm.calcHistoryValue() - tm.good = 0 - tm.bad = 0 -} - -// SetTicker allows a TestTicker to be provided that will manually control -// the passing of time from the perspective of the Metric. -// The ticker must be set before Start is called on the metric -func (tm *Metric) SetTicker(ticker MetricTicker) { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - tm.testTicker = ticker -} - -// Copy returns a new trust metric with members containing the same values -func (tm *Metric) Copy() *Metric { - if tm == nil { - return nil - } - - tm.mtx.Lock() - defer tm.mtx.Unlock() - - return &Metric{ - proportionalWeight: tm.proportionalWeight, - integralWeight: tm.integralWeight, - numIntervals: tm.numIntervals, - maxIntervals: tm.maxIntervals, - intervalLen: tm.intervalLen, - history: tm.history, - historyWeights: tm.historyWeights, - historyWeightSum: tm.historyWeightSum, - historySize: tm.historySize, - historyMaxSize: tm.historyMaxSize, - historyValue: tm.historyValue, - good: tm.good, - bad: tm.bad, - paused: tm.paused, - } - -} - -/* Private methods */ - -// This method is for a goroutine that handles all requests on the metric -func (tm *Metric) processRequests() { - t := tm.testTicker - if t == nil { - // No test ticker was provided, so we create a normal ticker - t = NewTicker(tm.intervalLen) - } - defer t.Stop() - // Obtain the raw channel - tick := t.GetChannel() -loop: - for { - select { - case <-tick: - tm.NextTimeInterval() - case <-tm.Quit(): - // Stop all further tracking for this metric - break loop - } - } -} - -// Wakes the trust metric up if it is currently paused -// This method needs to be called with the mutex locked -func (tm *Metric) unpause() { - // Check if this is the first experience with - // what we are tracking since being paused - if tm.paused { - tm.good = 0 - tm.bad = 0 - // New events cause us to unpause the metric - tm.paused = false - } -} - -// Calculates the trust value for the request processing -func (tm *Metric) calcTrustValue() float64 { - weightedP := tm.proportionalWeight * tm.proportionalValue() - weightedI := tm.integralWeight * tm.historyValue - weightedD := tm.weightedDerivative() - - tv := weightedP + weightedI + weightedD - // Do not return a negative value. - if tv < 0 { - tv = 0 - } - return tv -} - -// Calculates the current score for good/bad experiences -func (tm *Metric) proportionalValue() float64 { - value := 1.0 - - total := tm.good + tm.bad - if total > 0 { - value = tm.good / total - } - return value -} - -// Strengthens the derivative component when the change is negative -func (tm *Metric) weightedDerivative() float64 { - var weight float64 = defaultDerivativeGamma1 - - d := tm.derivativeValue() - if d < 0 { - weight = defaultDerivativeGamma2 - } - return weight * d -} - -// Calculates the derivative component -func (tm *Metric) derivativeValue() float64 { - return tm.proportionalValue() - tm.historyValue -} - -// Calculates the integral (history) component of the trust value -func (tm *Metric) calcHistoryValue() float64 { - var hv float64 - - for i := 0; i < tm.numIntervals; i++ { - hv += tm.fadedMemoryValue(i) * tm.historyWeights[i] - } - - return hv / tm.historyWeightSum -} - -// Retrieves the actual history data value that represents the requested time interval -func (tm *Metric) fadedMemoryValue(interval int) float64 { - first := tm.historySize - 1 - - if interval == 0 { - // Base case - return tm.history[first] - } - - offset := intervalToHistoryOffset(interval) - return tm.history[first-offset] -} - -// Performs the update for our Faded Memories process, which allows the -// trust metric tracking window to be large while maintaining a small -// number of history data values -func (tm *Metric) updateFadedMemory() { - if tm.historySize < 2 { - return - } - - end := tm.historySize - 1 - // Keep the most recent history element - for count := 1; count < tm.historySize; count++ { - i := end - count - // The older the data is, the more we spread it out - x := math.Pow(2, float64(count)) - // Two history data values are merged into a single value - tm.history[i] = ((tm.history[i] * (x - 1)) + tm.history[i+1]) / x - } -} - -// Map the interval value down to an offset from the beginning of history -func intervalToHistoryOffset(interval int) int { - // The system maintains 2^m interval values in the form of m history - // data values. Therefore, we access the ith interval by obtaining - // the history data index = the floor of log2(i) - return int(math.Floor(math.Log2(float64(interval)))) -} diff --git a/internal/p2p/trust/metric_test.go b/internal/p2p/trust/metric_test.go deleted file mode 100644 index 65caf38a23..0000000000 --- a/internal/p2p/trust/metric_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package trust - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestTrustMetricScores(t *testing.T) { - tm := NewMetric() - err := tm.Start() - require.NoError(t, err) - - // Perfect score - tm.GoodEvents(1) - score := tm.TrustScore() - assert.Equal(t, 100, score) - - // Less than perfect score - tm.BadEvents(10) - score = tm.TrustScore() - assert.NotEqual(t, 100, score) - err = tm.Stop() - require.NoError(t, err) -} - -func TestTrustMetricConfig(t *testing.T) { - // 7 days - window := time.Minute * 60 * 24 * 7 - config := MetricConfig{ - TrackingWindow: window, - IntervalLength: 2 * time.Minute, - } - - tm := NewMetricWithConfig(config) - err := tm.Start() - require.NoError(t, err) - - // The max time intervals should be the TrackingWindow / IntervalLen - assert.Equal(t, int(config.TrackingWindow/config.IntervalLength), tm.maxIntervals) - - dc := DefaultConfig() - // These weights should still be the default values - assert.Equal(t, dc.ProportionalWeight, tm.proportionalWeight) - assert.Equal(t, dc.IntegralWeight, tm.integralWeight) - err = tm.Stop() - require.NoError(t, err) - tm.Wait() - - config.ProportionalWeight = 0.3 - config.IntegralWeight = 0.7 - tm = NewMetricWithConfig(config) - err = tm.Start() - require.NoError(t, err) - - // These weights should be equal to our custom values - assert.Equal(t, config.ProportionalWeight, tm.proportionalWeight) - assert.Equal(t, config.IntegralWeight, tm.integralWeight) - err = tm.Stop() - require.NoError(t, err) - tm.Wait() -} - -func TestTrustMetricCopyNilPointer(t *testing.T) { - var tm *Metric - - ctm := tm.Copy() - - assert.Nil(t, ctm) -} - -// XXX: This test fails non-deterministically -//nolint:unused,deadcode -func _TestTrustMetricStopPause(t *testing.T) { - // The TestTicker will provide manual control over - // the passing of time within the metric - tt := NewTestTicker() - tm := NewMetric() - tm.SetTicker(tt) - err := tm.Start() - require.NoError(t, err) - // Allow some time intervals to pass and pause - tt.NextTick() - tt.NextTick() - tm.Pause() - - // could be 1 or 2 because Pause and NextTick race - first := tm.Copy().numIntervals - - // Allow more time to pass and check the intervals are unchanged - tt.NextTick() - tt.NextTick() - assert.Equal(t, first, tm.Copy().numIntervals) - - // Get the trust metric activated again - tm.GoodEvents(5) - // Allow some time intervals to pass and stop - tt.NextTick() - tt.NextTick() - err = tm.Stop() - require.NoError(t, err) - tm.Wait() - - second := tm.Copy().numIntervals - // Allow more intervals to pass while the metric is stopped - // and check that the number of intervals match - tm.NextTimeInterval() - tm.NextTimeInterval() - // XXX: fails non-deterministically: - // expected 5, got 6 - assert.Equal(t, second+2, tm.Copy().numIntervals) - - if first > second { - t.Fatalf("numIntervals should always increase or stay the same over time") - } -} diff --git a/internal/p2p/trust/store.go b/internal/p2p/trust/store.go deleted file mode 100644 index 9f200b9dd5..0000000000 --- a/internal/p2p/trust/store.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2017 Tendermint. All rights reserved. -// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. - -package trust - -import ( - "encoding/json" - "fmt" - "time" - - dbm "github.com/tendermint/tm-db" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/service" -) - -const defaultStorePeriodicSaveInterval = 1 * time.Minute - -var trustMetricKey = []byte("trustMetricStore") - -// MetricStore - Manages all trust metrics for peers -type MetricStore struct { - service.BaseService - - // Maps a Peer.Key to that peer's TrustMetric - peerMetrics map[string]*Metric - - // Mutex that protects the map and history data file - mtx tmsync.Mutex - - // The db where peer trust metric history data will be stored - db dbm.DB - - // This configuration will be used when creating new TrustMetrics - config MetricConfig -} - -// NewTrustMetricStore returns a store that saves data to the DB -// and uses the config when creating new trust metrics. -// Use Start to to initialize the trust metric store -func NewTrustMetricStore(db dbm.DB, tmc MetricConfig) *MetricStore { - tms := &MetricStore{ - peerMetrics: make(map[string]*Metric), - db: db, - config: tmc, - } - - tms.BaseService = *service.NewBaseService(nil, "MetricStore", tms) - return tms -} - -// OnStart implements Service -func (tms *MetricStore) OnStart() error { - if err := tms.BaseService.OnStart(); err != nil { - return err - } - - tms.mtx.Lock() - defer tms.mtx.Unlock() - - tms.loadFromDB() - go tms.saveRoutine() - return nil -} - -// OnStop implements Service -func (tms *MetricStore) OnStop() { - tms.BaseService.OnStop() - - tms.mtx.Lock() - defer tms.mtx.Unlock() - - // Stop all trust metric go-routines - for _, tm := range tms.peerMetrics { - if err := tm.Stop(); err != nil { - tms.Logger.Error("unable to stop metric store", "error", err) - } - } - - // Make the final trust history data save - tms.saveToDB() -} - -// Size returns the number of entries in the trust metric store -func (tms *MetricStore) Size() int { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - return tms.size() -} - -// AddPeerTrustMetric takes an existing trust metric and associates it with a peer key. -// The caller is expected to call Start on the TrustMetric being added -func (tms *MetricStore) AddPeerTrustMetric(key string, tm *Metric) { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - if key == "" || tm == nil { - return - } - tms.peerMetrics[key] = tm -} - -// GetPeerTrustMetric returns a trust metric by peer key -func (tms *MetricStore) GetPeerTrustMetric(key string) *Metric { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - tm, ok := tms.peerMetrics[key] - if !ok { - // If the metric is not available, we will create it - tm = NewMetricWithConfig(tms.config) - if err := tm.Start(); err != nil { - tms.Logger.Error("unable to start metric store", "error", err) - } - // The metric needs to be in the map - tms.peerMetrics[key] = tm - } - return tm -} - -// PeerDisconnected pauses the trust metric associated with the peer identified by the key -func (tms *MetricStore) PeerDisconnected(key string) { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - // If the Peer that disconnected has a metric, pause it - if tm, ok := tms.peerMetrics[key]; ok { - tm.Pause() - } -} - -// Saves the history data for all peers to the store DB. -// This public method acquires the trust metric store lock -func (tms *MetricStore) SaveToDB() { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - tms.saveToDB() -} - -/* Private methods */ - -// size returns the number of entries in the store without acquiring the mutex -func (tms *MetricStore) size() int { - return len(tms.peerMetrics) -} - -/* Loading & Saving */ -/* Both loadFromDB and savetoDB assume the mutex has been acquired */ - -// Loads the history data for all peers from the store DB -// cmn.Panics if file is corrupt -func (tms *MetricStore) loadFromDB() bool { - // Obtain the history data we have so far - bytes, err := tms.db.Get(trustMetricKey) - if err != nil { - panic(err) - } - if bytes == nil { - return false - } - - peers := make(map[string]MetricHistoryJSON) - err = json.Unmarshal(bytes, &peers) - if err != nil { - panic(fmt.Sprintf("Could not unmarshal Trust Metric Store DB data: %v", err)) - } - - // If history data exists in the file, - // load it into trust metric - for key, p := range peers { - tm := NewMetricWithConfig(tms.config) - - if err := tm.Start(); err != nil { - tms.Logger.Error("unable to start metric", "error", err) - } - tm.Init(p) - // Load the peer trust metric into the store - tms.peerMetrics[key] = tm - } - return true -} - -// Saves the history data for all peers to the store DB -func (tms *MetricStore) saveToDB() { - tms.Logger.Debug("Saving TrustHistory to DB", "size", tms.size()) - - peers := make(map[string]MetricHistoryJSON) - - for key, tm := range tms.peerMetrics { - // Add an entry for the peer identified by key - peers[key] = tm.HistoryJSON() - } - - // Write all the data back to the DB - bytes, err := json.Marshal(peers) - if err != nil { - tms.Logger.Error("Failed to encode the TrustHistory", "err", err) - return - } - if err := tms.db.SetSync(trustMetricKey, bytes); err != nil { - tms.Logger.Error("failed to flush data to disk", "error", err) - } -} - -// Periodically saves the trust history data to the DB -func (tms *MetricStore) saveRoutine() { - t := time.NewTicker(defaultStorePeriodicSaveInterval) - defer t.Stop() -loop: - for { - select { - case <-t.C: - tms.SaveToDB() - case <-tms.Quit(): - break loop - } - } -} diff --git a/internal/p2p/trust/store_test.go b/internal/p2p/trust/store_test.go deleted file mode 100644 index ecf17dc4ab..0000000000 --- a/internal/p2p/trust/store_test.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2017 Tendermint. All rights reserved. -// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. - -package trust - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - - "github.com/tendermint/tendermint/libs/log" -) - -func TestTrustMetricStoreSaveLoad(t *testing.T) { - dir := t.TempDir() - - historyDB, err := dbm.NewDB("trusthistory", "goleveldb", dir) - require.NoError(t, err) - - // 0 peers saved - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - store.saveToDB() - // Load the data from the file - store = NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - err = store.Start() - require.NoError(t, err) - // Make sure we still have 0 entries - assert.Zero(t, store.Size()) - - // 100 TestTickers - var tt []*TestTicker - for i := 0; i < 100; i++ { - // The TestTicker will provide manual control over - // the passing of time within the metric - tt = append(tt, NewTestTicker()) - } - // 100 peers - for i := 0; i < 100; i++ { - key := fmt.Sprintf("peer_%d", i) - tm := NewMetric() - - tm.SetTicker(tt[i]) - err = tm.Start() - require.NoError(t, err) - store.AddPeerTrustMetric(key, tm) - - tm.BadEvents(10) - tm.GoodEvents(1) - } - // Check that we have 100 entries and save - assert.Equal(t, 100, store.Size()) - // Give the 100 metrics time to process the history data - for i := 0; i < 100; i++ { - tt[i].NextTick() - tt[i].NextTick() - } - // Stop all the trust metrics and save - err = store.Stop() - require.NoError(t, err) - - // Load the data from the DB - store = NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - err = store.Start() - require.NoError(t, err) - - // Check that we still have 100 peers with imperfect trust values - assert.Equal(t, 100, store.Size()) - for _, tm := range store.peerMetrics { - assert.NotEqual(t, 1.0, tm.TrustValue()) - } - - err = store.Stop() - require.NoError(t, err) -} - -func TestTrustMetricStoreConfig(t *testing.T) { - historyDB, err := dbm.NewDB("", "memdb", "") - require.NoError(t, err) - - config := MetricConfig{ - ProportionalWeight: 0.5, - IntegralWeight: 0.5, - } - - // Create a store with custom config - store := NewTrustMetricStore(historyDB, config) - store.SetLogger(log.TestingLogger()) - err = store.Start() - require.NoError(t, err) - - // Have the store make us a metric with the config - tm := store.GetPeerTrustMetric("TestKey") - - // Check that the options made it to the metric - assert.Equal(t, 0.5, tm.proportionalWeight) - assert.Equal(t, 0.5, tm.integralWeight) - err = store.Stop() - require.NoError(t, err) -} - -func TestTrustMetricStoreLookup(t *testing.T) { - historyDB, err := dbm.NewDB("", "memdb", "") - require.NoError(t, err) - - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - err = store.Start() - require.NoError(t, err) - - // Create 100 peers in the trust metric store - for i := 0; i < 100; i++ { - key := fmt.Sprintf("peer_%d", i) - store.GetPeerTrustMetric(key) - - // Check that the trust metric was successfully entered - ktm := store.peerMetrics[key] - assert.NotNil(t, ktm, "Expected to find TrustMetric %s but wasn't there.", key) - } - - err = store.Stop() - require.NoError(t, err) -} - -func TestTrustMetricStorePeerScore(t *testing.T) { - historyDB, err := dbm.NewDB("", "memdb", "") - require.NoError(t, err) - - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - err = store.Start() - require.NoError(t, err) - - key := "TestKey" - tm := store.GetPeerTrustMetric(key) - - // This peer is innocent so far - first := tm.TrustScore() - assert.Equal(t, 100, first) - - // Add some undesirable events and disconnect - tm.BadEvents(1) - first = tm.TrustScore() - assert.NotEqual(t, 100, first) - tm.BadEvents(10) - second := tm.TrustScore() - - if second > first { - t.Errorf("a greater number of bad events should lower the trust score") - } - store.PeerDisconnected(key) - - // We will remember our experiences with this peer - tm = store.GetPeerTrustMetric(key) - assert.NotEqual(t, 100, tm.TrustScore()) - err = store.Stop() - require.NoError(t, err) -} diff --git a/internal/p2p/trust/ticker.go b/internal/p2p/trust/ticker.go deleted file mode 100644 index 3f0f309192..0000000000 --- a/internal/p2p/trust/ticker.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2017 Tendermint. All rights reserved. -// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. - -package trust - -import ( - "time" -) - -// MetricTicker provides a single ticker interface for the trust metric -type MetricTicker interface { - // GetChannel returns the receive only channel that fires at each time interval - GetChannel() <-chan time.Time - - // Stop will halt further activity on the ticker channel - Stop() -} - -// The ticker used during testing that provides manual control over time intervals -type TestTicker struct { - C chan time.Time - stopped bool -} - -// NewTestTicker returns our ticker used within test routines -func NewTestTicker() *TestTicker { - c := make(chan time.Time) - return &TestTicker{ - C: c, - } -} - -func (t *TestTicker) GetChannel() <-chan time.Time { - return t.C -} - -func (t *TestTicker) Stop() { - t.stopped = true -} - -// NextInterval manually sends Time on the ticker channel -func (t *TestTicker) NextTick() { - if t.stopped { - return - } - t.C <- time.Now() -} - -// Ticker is just a wrap around time.Ticker that allows it -// to meet the requirements of our interface -type Ticker struct { - *time.Ticker -} - -// NewTicker returns a normal time.Ticker wrapped to meet our interface -func NewTicker(d time.Duration) *Ticker { - return &Ticker{time.NewTicker(d)} -} - -func (t *Ticker) GetChannel() <-chan time.Time { - return t.C -} diff --git a/internal/p2p/types.go b/internal/p2p/types.go index 403f43528d..bee99a4fe6 100644 --- a/internal/p2p/types.go +++ b/internal/p2p/types.go @@ -5,4 +5,4 @@ import ( ) type ChannelDescriptor = conn.ChannelDescriptor -type ConnectionStatus = conn.ConnectionStatus +type ChannelID = conn.ChannelID diff --git a/internal/p2p/upnp/probe.go b/internal/p2p/upnp/probe.go deleted file mode 100644 index ae641abbbe..0000000000 --- a/internal/p2p/upnp/probe.go +++ /dev/null @@ -1,111 +0,0 @@ -package upnp - -import ( - "fmt" - "net" - "time" - - "github.com/tendermint/tendermint/libs/log" -) - -type Capabilities struct { - PortMapping bool - Hairpin bool -} - -func makeUPNPListener(intPort int, extPort int, logger log.Logger) (NAT, net.Listener, net.IP, error) { - nat, err := Discover() - if err != nil { - return nil, nil, nil, fmt.Errorf("nat upnp could not be discovered: %v", err) - } - logger.Info(fmt.Sprintf("ourIP: %v", nat.(*upnpNAT).ourIP)) - - ext, err := nat.GetExternalAddress() - if err != nil { - return nat, nil, nil, fmt.Errorf("external address error: %v", err) - } - logger.Info(fmt.Sprintf("External address: %v", ext)) - - port, err := nat.AddPortMapping("tcp", extPort, intPort, "Tendermint UPnP Probe", 0) - if err != nil { - return nat, nil, ext, fmt.Errorf("port mapping error: %v", err) - } - logger.Info(fmt.Sprintf("Port mapping mapped: %v", port)) - - // also run the listener, open for all remote addresses. - listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort)) - if err != nil { - return nat, nil, ext, fmt.Errorf("error establishing listener: %v", err) - } - return nat, listener, ext, nil -} - -func testHairpin(listener net.Listener, extAddr string, logger log.Logger) (supportsHairpin bool) { - // Listener - go func() { - inConn, err := listener.Accept() - if err != nil { - logger.Info(fmt.Sprintf("Listener.Accept() error: %v", err)) - return - } - logger.Info(fmt.Sprintf("Accepted incoming connection: %v -> %v", inConn.LocalAddr(), inConn.RemoteAddr())) - buf := make([]byte, 1024) - n, err := inConn.Read(buf) - if err != nil { - logger.Info(fmt.Sprintf("Incoming connection read error: %v", err)) - return - } - logger.Info(fmt.Sprintf("Incoming connection read %v bytes: %X", n, buf)) - if string(buf) == "test data" { - supportsHairpin = true - return - } - }() - - // Establish outgoing - outConn, err := net.Dial("tcp", extAddr) - if err != nil { - logger.Info(fmt.Sprintf("Outgoing connection dial error: %v", err)) - return - } - - n, err := outConn.Write([]byte("test data")) - if err != nil { - logger.Info(fmt.Sprintf("Outgoing connection write error: %v", err)) - return - } - logger.Info(fmt.Sprintf("Outgoing connection wrote %v bytes", n)) - - // Wait for data receipt - time.Sleep(1 * time.Second) - return supportsHairpin -} - -func Probe(logger log.Logger) (caps Capabilities, err error) { - logger.Info("Probing for UPnP!") - - intPort, extPort := 8001, 8001 - - nat, listener, ext, err := makeUPNPListener(intPort, extPort, logger) - if err != nil { - return - } - caps.PortMapping = true - - // Deferred cleanup - defer func() { - if err := nat.DeletePortMapping("tcp", intPort, extPort); err != nil { - logger.Error(fmt.Sprintf("Port mapping delete error: %v", err)) - } - if err := listener.Close(); err != nil { - logger.Error(fmt.Sprintf("Listener closing error: %v", err)) - } - }() - - supportsHairpin := testHairpin(listener, fmt.Sprintf("%v:%v", ext, extPort), logger) - if supportsHairpin { - caps.Hairpin = true - } - - return -} diff --git a/internal/p2p/upnp/upnp.go b/internal/p2p/upnp/upnp.go deleted file mode 100644 index c00530acae..0000000000 --- a/internal/p2p/upnp/upnp.go +++ /dev/null @@ -1,404 +0,0 @@ -// Taken from taipei-torrent. -// Just enough UPnP to be able to forward ports -// For more information, see: http://www.upnp-hacks.org/upnp.html -package upnp - -// TODO: use syscalls to get actual ourIP, see issue #712 - -import ( - "bytes" - "encoding/xml" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "strconv" - "strings" - "time" -) - -type upnpNAT struct { - serviceURL string - ourIP string - urnDomain string -} - -// protocol is either "udp" or "tcp" -type NAT interface { - GetExternalAddress() (addr net.IP, err error) - AddPortMapping( - protocol string, - externalPort, - internalPort int, - description string, - timeout int) (mappedExternalPort int, err error) - DeletePortMapping(protocol string, externalPort, internalPort int) (err error) -} - -func Discover() (nat NAT, err error) { - ssdp, err := net.ResolveUDPAddr("udp4", "239.255.255.250:1900") - if err != nil { - return - } - conn, err := net.ListenPacket("udp4", ":0") - if err != nil { - return - } - socket := conn.(*net.UDPConn) - defer socket.Close() - - if err := socket.SetDeadline(time.Now().Add(3 * time.Second)); err != nil { - return nil, err - } - - st := "InternetGatewayDevice:1" - - buf := bytes.NewBufferString( - "M-SEARCH * HTTP/1.1\r\n" + - "HOST: 239.255.255.250:1900\r\n" + - "ST: ssdp:all\r\n" + - "MAN: \"ssdp:discover\"\r\n" + - "MX: 2\r\n\r\n") - message := buf.Bytes() - answerBytes := make([]byte, 1024) - for i := 0; i < 3; i++ { - _, err = socket.WriteToUDP(message, ssdp) - if err != nil { - return - } - var n int - _, _, err = socket.ReadFromUDP(answerBytes) - if err != nil { - return - } - for { - n, _, err = socket.ReadFromUDP(answerBytes) - if err != nil { - break - } - answer := string(answerBytes[0:n]) - if !strings.Contains(answer, st) { - continue - } - // HTTP header field names are case-insensitive. - // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 - locString := "\r\nlocation:" - answer = strings.ToLower(answer) - locIndex := strings.Index(answer, locString) - if locIndex < 0 { - continue - } - loc := answer[locIndex+len(locString):] - endIndex := strings.Index(loc, "\r\n") - if endIndex < 0 { - continue - } - locURL := strings.TrimSpace(loc[0:endIndex]) - var serviceURL, urnDomain string - serviceURL, urnDomain, err = getServiceURL(locURL) - if err != nil { - return - } - var ourIP net.IP - ourIP, err = localIPv4() - if err != nil { - return - } - nat = &upnpNAT{serviceURL: serviceURL, ourIP: ourIP.String(), urnDomain: urnDomain} - return - } - } - err = errors.New("upnp port discovery failed") - return nat, err -} - -type Envelope struct { - XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Envelope"` - Soap *SoapBody -} -type SoapBody struct { - XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Body"` - ExternalIP *ExternalIPAddressResponse -} - -type ExternalIPAddressResponse struct { - XMLName xml.Name `xml:"GetExternalIPAddressResponse"` - IPAddress string `xml:"NewExternalIPAddress"` -} - -type ExternalIPAddress struct { - XMLName xml.Name `xml:"NewExternalIPAddress"` - IP string -} - -type Service struct { - ServiceType string `xml:"serviceType"` - ControlURL string `xml:"controlURL"` -} - -type DeviceList struct { - Device []Device `xml:"device"` -} - -type ServiceList struct { - Service []Service `xml:"service"` -} - -type Device struct { - XMLName xml.Name `xml:"device"` - DeviceType string `xml:"deviceType"` - DeviceList DeviceList `xml:"deviceList"` - ServiceList ServiceList `xml:"serviceList"` -} - -type Root struct { - Device Device -} - -func getChildDevice(d *Device, deviceType string) *Device { - dl := d.DeviceList.Device - for i := 0; i < len(dl); i++ { - if strings.Contains(dl[i].DeviceType, deviceType) { - return &dl[i] - } - } - return nil -} - -func getChildService(d *Device, serviceType string) *Service { - sl := d.ServiceList.Service - for i := 0; i < len(sl); i++ { - if strings.Contains(sl[i].ServiceType, serviceType) { - return &sl[i] - } - } - return nil -} - -func localIPv4() (net.IP, error) { - tt, err := net.Interfaces() - if err != nil { - return nil, err - } - for _, t := range tt { - aa, err := t.Addrs() - if err != nil { - return nil, err - } - for _, a := range aa { - ipnet, ok := a.(*net.IPNet) - if !ok { - continue - } - v4 := ipnet.IP.To4() - if v4 == nil || v4[0] == 127 { // loopback address - continue - } - return v4, nil - } - } - return nil, errors.New("cannot find local IP address") -} - -func getServiceURL(rootURL string) (url, urnDomain string, err error) { - r, err := http.Get(rootURL) // nolint: gosec - if err != nil { - return - } - defer r.Body.Close() - - if r.StatusCode >= 400 { - err = errors.New(string(rune(r.StatusCode))) - return - } - var root Root - err = xml.NewDecoder(r.Body).Decode(&root) - if err != nil { - return - } - a := &root.Device - if !strings.Contains(a.DeviceType, "InternetGatewayDevice:1") { - err = errors.New("no InternetGatewayDevice") - return - } - b := getChildDevice(a, "WANDevice:1") - if b == nil { - err = errors.New("no WANDevice") - return - } - c := getChildDevice(b, "WANConnectionDevice:1") - if c == nil { - err = errors.New("no WANConnectionDevice") - return - } - d := getChildService(c, "WANIPConnection:1") - if d == nil { - // Some routers don't follow the UPnP spec, and put WanIPConnection under WanDevice, - // instead of under WanConnectionDevice - d = getChildService(b, "WANIPConnection:1") - - if d == nil { - err = errors.New("no WANIPConnection") - return - } - } - // Extract the domain name, which isn't always 'schemas-upnp-org' - urnDomain = strings.Split(d.ServiceType, ":")[1] - url = combineURL(rootURL, d.ControlURL) - return url, urnDomain, err -} - -func combineURL(rootURL, subURL string) string { - protocolEnd := "://" - protoEndIndex := strings.Index(rootURL, protocolEnd) - a := rootURL[protoEndIndex+len(protocolEnd):] - rootIndex := strings.Index(a, "/") - return rootURL[0:protoEndIndex+len(protocolEnd)+rootIndex] + subURL -} - -func soapRequest(url, function, message, domain string) (r *http.Response, err error) { - fullMessage := "" + - "\r\n" + - "" + message + "" - - req, err := http.NewRequest("POST", url, strings.NewReader(fullMessage)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "text/xml ; charset=\"utf-8\"") - req.Header.Set("User-Agent", "Darwin/10.0.0, UPnP/1.0, MiniUPnPc/1.3") - // req.Header.Set("Transfer-Encoding", "chunked") - req.Header.Set("SOAPAction", "\"urn:"+domain+":service:WANIPConnection:1#"+function+"\"") - req.Header.Set("Connection", "Close") - req.Header.Set("Cache-Control", "no-cache") - req.Header.Set("Pragma", "no-cache") - - // log.Stderr("soapRequest ", req) - - r, err = http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - /*if r.Body != nil { - defer r.Body.Close() - }*/ - - if r.StatusCode >= 400 { - // log.Stderr(function, r.StatusCode) - err = errors.New("error " + strconv.Itoa(r.StatusCode) + " for " + function) - r = nil - return - } - return r, err -} - -type statusInfo struct { - externalIPAddress string -} - -func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) { - - message := "\r\n" + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain) - if response != nil { - defer response.Body.Close() - } - if err != nil { - return - } - var envelope Envelope - data, err := ioutil.ReadAll(response.Body) - if err != nil { - return - } - reader := bytes.NewReader(data) - err = xml.NewDecoder(reader).Decode(&envelope) - if err != nil { - return - } - - info = statusInfo{envelope.Soap.ExternalIP.IPAddress} - - if err != nil { - return - } - - return info, err -} - -// GetExternalAddress returns an external IP. If GetExternalIPAddress action -// fails or IP returned is invalid, GetExternalAddress returns an error. -func (n *upnpNAT) GetExternalAddress() (addr net.IP, err error) { - info, err := n.getExternalIPAddress() - if err != nil { - return - } - addr = net.ParseIP(info.externalIPAddress) - if addr == nil { - err = fmt.Errorf("failed to parse IP: %v", info.externalIPAddress) - } - return -} - -func (n *upnpNAT) AddPortMapping( - protocol string, - externalPort, - internalPort int, - description string, - timeout int) (mappedExternalPort int, err error) { - // A single concatenation would break ARM compilation. - message := "\r\n" + - "" + strconv.Itoa(externalPort) - message += "" + protocol + "" - message += "" + strconv.Itoa(internalPort) + "" + - "" + n.ourIP + "" + - "1" - message += description + - "" + strconv.Itoa(timeout) + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain) - if response != nil { - defer response.Body.Close() - } - if err != nil { - return - } - - // TODO: check response to see if the port was forwarded - // log.Println(message, response) - // JAE: - // body, err := ioutil.ReadAll(response.Body) - // fmt.Println(string(body), err) - mappedExternalPort = externalPort - _ = response - return mappedExternalPort, err -} - -func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) { - - message := "\r\n" + - "" + strconv.Itoa(externalPort) + - "" + protocol + "" + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain) - if response != nil { - defer response.Body.Close() - } - if err != nil { - return - } - - // TODO: check response to see if the port was deleted - // log.Println(message, response) - _ = response - return -} diff --git a/internal/p2p/wdrr_queue.go b/internal/p2p/wdrr_queue.go deleted file mode 100644 index 1b75ffce88..0000000000 --- a/internal/p2p/wdrr_queue.go +++ /dev/null @@ -1,287 +0,0 @@ -package p2p - -import ( - "fmt" - "sort" - "strconv" - - "github.com/gogo/protobuf/proto" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/log" -) - -// wrappedEnvelope wraps a p2p Envelope with its precomputed size. -type wrappedEnvelope struct { - envelope Envelope - size uint -} - -// assert the WDRR scheduler implements the queue interface at compile-time -var _ queue = (*wdrrScheduler)(nil) - -// wdrrQueue implements a Weighted Deficit Round Robin (WDRR) scheduling -// algorithm via the queue interface. A WDRR queue is created per peer, where -// the queue will have N number of flows. Each flow corresponds to a p2p Channel, -// so there are n input flows and a single output source, the peer's connection. -// -// The WDRR scheduler contains a shared buffer with a fixed capacity. -// -// Each flow has the following: -// - quantum: The number of bytes that is added to the deficit counter of the -// flow in each round. The flow can send at most quantum bytes at a time. Each -// flow has its own unique quantum, which gives the queue its weighted nature. -// A higher quantum corresponds to a higher weight/priority. The quantum is -// computed as MaxSendBytes * Priority. -// - deficit counter: The number of bytes that the flow is allowed to transmit -// when it is its turn. -// -// See: https://en.wikipedia.org/wiki/Deficit_round_robin -type wdrrScheduler struct { - logger log.Logger - metrics *Metrics - chDescs []ChannelDescriptor - capacity uint - size uint - chPriorities map[ChannelID]uint - buffer map[ChannelID][]wrappedEnvelope - quanta map[ChannelID]uint - deficits map[ChannelID]uint - - closer *tmsync.Closer - doneCh *tmsync.Closer - - enqueueCh chan Envelope - dequeueCh chan Envelope -} - -func newWDRRScheduler( - logger log.Logger, - m *Metrics, - chDescs []ChannelDescriptor, - enqueueBuf, dequeueBuf, capacity uint, -) *wdrrScheduler { - - // copy each ChannelDescriptor and sort them by channel priority - chDescsCopy := make([]ChannelDescriptor, len(chDescs)) - copy(chDescsCopy, chDescs) - sort.Slice(chDescsCopy, func(i, j int) bool { return chDescsCopy[i].Priority > chDescsCopy[j].Priority }) - - var ( - buffer = make(map[ChannelID][]wrappedEnvelope) - chPriorities = make(map[ChannelID]uint) - quanta = make(map[ChannelID]uint) - deficits = make(map[ChannelID]uint) - ) - - for _, chDesc := range chDescsCopy { - chID := ChannelID(chDesc.ID) - chPriorities[chID] = uint(chDesc.Priority) - buffer[chID] = make([]wrappedEnvelope, 0) - quanta[chID] = chDesc.MaxSendBytes * uint(chDesc.Priority) - } - - return &wdrrScheduler{ - logger: logger.With("queue", "wdrr"), - metrics: m, - capacity: capacity, - chPriorities: chPriorities, - chDescs: chDescsCopy, - buffer: buffer, - quanta: quanta, - deficits: deficits, - closer: tmsync.NewCloser(), - doneCh: tmsync.NewCloser(), - enqueueCh: make(chan Envelope, enqueueBuf), - dequeueCh: make(chan Envelope, dequeueBuf), - } -} - -// enqueue returns an unbuffered write-only channel which a producer can send on. -func (s *wdrrScheduler) enqueue() chan<- Envelope { - return s.enqueueCh -} - -// dequeue returns an unbuffered read-only channel which a consumer can read from. -func (s *wdrrScheduler) dequeue() <-chan Envelope { - return s.dequeueCh -} - -func (s *wdrrScheduler) closed() <-chan struct{} { - return s.closer.Done() -} - -// close closes the WDRR queue. After this call enqueue() will block, so the -// caller must select on closed() as well to avoid blocking forever. The -// enqueue() and dequeue() along with the internal channels will NOT be closed. -// Note, close() will block until all externally spawned goroutines have exited. -func (s *wdrrScheduler) close() { - s.closer.Close() - <-s.doneCh.Done() -} - -// start starts the WDRR queue process in a blocking goroutine. This must be -// called before the queue can start to process and accept Envelopes. -func (s *wdrrScheduler) start() { - go s.process() -} - -// process starts a blocking WDRR scheduler process, where we continuously -// evaluate if we need to attempt to enqueue an Envelope or schedule Envelopes -// to be dequeued and subsequently read and sent on the source connection. -// Internally, each p2p Channel maps to a flow, where each flow has a deficit -// and a quantum. -// -// For each Envelope requested to be enqueued, we evaluate if there is sufficient -// capacity in the shared buffer to add the Envelope. If so, it is added. -// Otherwise, we evaluate all flows of lower priority where we attempt find an -// existing Envelope in the shared buffer of sufficient size that can be dropped -// in place of the incoming Envelope. If there is no such Envelope that can be -// dropped, then the incoming Envelope is dropped. -// -// When there is nothing to be enqueued, we perform the WDRR algorithm and -// determine which Envelopes can be dequeued. For each Envelope that can be -// dequeued, it is sent on the dequeueCh. Specifically, for each flow, if it is -// non-empty, its deficit counter is incremented by its quantum value. Then, the -// value of the deficit counter is a maximal amount of bytes that can be sent at -// this round. If the deficit counter is greater than the Envelopes's message -// size at the head of the queue (HoQ), this envelope can be sent and the value -// of the counter is decremented by the message's size. Then, the size of the -// next Envelopes's message is compared to the counter value, etc. Once the flow -// is empty or the value of the counter is insufficient, the scheduler will skip -// to the next flow. If the flow is empty, the value of the deficit counter is -// reset to 0. -// -// XXX/TODO: Evaluate the single goroutine scheduler mechanism. In other words, -// evaluate the effectiveness and performance of having a single goroutine -// perform handling both enqueueing and dequeueing logic. Specifically, there -// is potentially contention between reading off of enqueueCh and trying to -// enqueue while also attempting to perform the WDRR algorithm and find the next -// set of Envelope(s) to send on the dequeueCh. Alternatively, we could consider -// separate scheduling goroutines, but then that requires the use of mutexes and -// possibly a degrading performance. -func (s *wdrrScheduler) process() { - defer s.doneCh.Close() - - for { - select { - case <-s.closer.Done(): - return - - case e := <-s.enqueueCh: - // attempt to enqueue the incoming Envelope - chIDStr := strconv.Itoa(int(e.channelID)) - wEnv := wrappedEnvelope{envelope: e, size: uint(proto.Size(e.Message))} - msgSize := wEnv.size - - s.metrics.PeerPendingSendBytes.With("peer_id", string(e.To)).Add(float64(msgSize)) - - // If we're at capacity, we need to either drop the incoming Envelope or - // an Envelope from a lower priority flow. Otherwise, we add the (wrapped) - // envelope to the flow's queue. - if s.size+wEnv.size > s.capacity { - chPriority := s.chPriorities[e.channelID] - - var ( - canDrop bool - dropIdx int - dropChID ChannelID - ) - - // Evaluate all lower priority flows and determine if there exists an - // Envelope that is of equal or greater size that we can drop in favor - // of the incoming Envelope. - for i := len(s.chDescs) - 1; i >= 0 && uint(s.chDescs[i].Priority) < chPriority && !canDrop; i-- { - currChID := ChannelID(s.chDescs[i].ID) - flow := s.buffer[currChID] - - for j := 0; j < len(flow) && !canDrop; j++ { - if flow[j].size >= wEnv.size { - canDrop = true - dropIdx = j - dropChID = currChID - break - } - } - } - - // If we can drop an existing Envelope, drop it and enqueue the incoming - // Envelope. - if canDrop { - chIDStr = strconv.Itoa(int(dropChID)) - chPriority = s.chPriorities[dropChID] - msgSize = s.buffer[dropChID][dropIdx].size - - // Drop Envelope for the lower priority flow and update the queue's - // buffer size - s.size -= msgSize - s.buffer[dropChID] = append(s.buffer[dropChID][:dropIdx], s.buffer[dropChID][dropIdx+1:]...) - - // add the incoming Envelope and update queue's buffer size - s.size += wEnv.size - s.buffer[e.channelID] = append(s.buffer[e.channelID], wEnv) - s.metrics.PeerQueueMsgSize.With("ch_id", chIDStr).Set(float64(wEnv.size)) - } - - // We either dropped the incoming Enevelope or one from an existing - // lower priority flow. - s.metrics.PeerQueueDroppedMsgs.With("ch_id", chIDStr).Add(1) - s.logger.Debug( - "dropped envelope", - "ch_id", chIDStr, - "priority", chPriority, - "capacity", s.capacity, - "msg_size", msgSize, - ) - } else { - // we have sufficient capacity to enqueue the incoming Envelope - s.metrics.PeerQueueMsgSize.With("ch_id", chIDStr).Set(float64(wEnv.size)) - s.buffer[e.channelID] = append(s.buffer[e.channelID], wEnv) - s.size += wEnv.size - } - - default: - // perform the WDRR algorithm - for _, chDesc := range s.chDescs { - chID := ChannelID(chDesc.ID) - - // only consider non-empty flows - if len(s.buffer[chID]) > 0 { - // bump flow's quantum - s.deficits[chID] += s.quanta[chID] - - // grab the flow's current deficit counter and HoQ (wrapped) Envelope - d := s.deficits[chID] - we := s.buffer[chID][0] - - // While the flow is non-empty and we can send the current Envelope - // on the dequeueCh: - // - // 1. send the Envelope - // 2. update the scheduler's shared buffer's size - // 3. update the flow's deficit - // 4. remove from the flow's queue - // 5. grab the next HoQ Envelope and flow's deficit - for len(s.buffer[chID]) > 0 && d >= we.size { - s.metrics.PeerSendBytesTotal.With( - "chID", fmt.Sprint(chID), - "peer_id", string(we.envelope.To)).Add(float64(we.size)) - s.dequeueCh <- we.envelope - s.size -= we.size - s.deficits[chID] -= we.size - s.buffer[chID] = s.buffer[chID][1:] - - if len(s.buffer[chID]) > 0 { - d = s.deficits[chID] - we = s.buffer[chID][0] - } - } - } - - // reset the flow's deficit to zero if it is empty - if len(s.buffer[chID]) == 0 { - s.deficits[chID] = 0 - } - } - } - } -} diff --git a/internal/p2p/wdrr_queue_test.go b/internal/p2p/wdrr_queue_test.go deleted file mode 100644 index d49c77e765..0000000000 --- a/internal/p2p/wdrr_queue_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package p2p - -import ( - "math" - "math/rand" - "testing" - "time" - - gogotypes "github.com/gogo/protobuf/types" - "github.com/stretchr/testify/require" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/log" -) - -type testMessage = gogotypes.StringValue - -func TestWDRRQueue_EqualWeights(t *testing.T) { - chDescs := []ChannelDescriptor{ - {ID: 0x01, Priority: 1, MaxSendBytes: 4}, - {ID: 0x02, Priority: 1, MaxSendBytes: 4}, - {ID: 0x03, Priority: 1, MaxSendBytes: 4}, - {ID: 0x04, Priority: 1, MaxSendBytes: 4}, - {ID: 0x05, Priority: 1, MaxSendBytes: 4}, - {ID: 0x06, Priority: 1, MaxSendBytes: 4}, - } - - peerQueue := newWDRRScheduler(log.NewNopLogger(), NopMetrics(), chDescs, 1000, 1000, 120) - peerQueue.start() - - totalMsgs := make(map[ChannelID]int) - deliveredMsgs := make(map[ChannelID]int) - successRates := make(map[ChannelID]float64) - - closer := tmsync.NewCloser() - - go func() { - timout := 10 * time.Second - ticker := time.NewTicker(timout) - defer ticker.Stop() - - for { - select { - case e := <-peerQueue.dequeue(): - deliveredMsgs[e.channelID]++ - ticker.Reset(timout) - - case <-ticker.C: - closer.Close() - } - } - }() - - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - maxMsgs := 5000 - minMsgs := 1000 - - for _, chDesc := range chDescs { - total := rng.Intn(maxMsgs-minMsgs) + minMsgs // total = rand[minMsgs, maxMsgs) - totalMsgs[ChannelID(chDesc.ID)] = total - - go func(cID ChannelID, n int) { - for i := 0; i < n; i++ { - peerQueue.enqueue() <- Envelope{ - channelID: cID, - Message: &testMessage{Value: "foo"}, // 5 bytes - } - } - }(ChannelID(chDesc.ID), total) - } - - // wait for dequeueing to complete - <-closer.Done() - - // close queue and wait for cleanup - peerQueue.close() - <-peerQueue.closed() - - var ( - sum float64 - stdDev float64 - ) - - for _, chDesc := range peerQueue.chDescs { - chID := ChannelID(chDesc.ID) - require.Zero(t, peerQueue.deficits[chID], "expected flow deficit to be zero") - require.Len(t, peerQueue.buffer[chID], 0, "expected flow queue to be empty") - - total := totalMsgs[chID] - delivered := deliveredMsgs[chID] - successRate := float64(delivered) / float64(total) - - sum += successRate - successRates[chID] = successRate - - // require some messages dropped - require.Less(t, delivered, total, "expected some messages to be dropped") - require.Less(t, successRate, 1.0, "expected a success rate below 100%") - } - - require.Zero(t, peerQueue.size, "expected scheduler size to be zero") - - numFlows := float64(len(peerQueue.buffer)) - mean := sum / numFlows - - for _, successRate := range successRates { - stdDev += math.Pow(successRate-mean, 2) - } - - stdDev = math.Sqrt(stdDev / numFlows) - require.Less(t, stdDev, 0.02, "expected success rate standard deviation to be less than 2%") -} - -func TestWDRRQueue_DecreasingWeights(t *testing.T) { - chDescs := []ChannelDescriptor{ - {ID: 0x01, Priority: 18, MaxSendBytes: 4}, - {ID: 0x02, Priority: 10, MaxSendBytes: 4}, - {ID: 0x03, Priority: 2, MaxSendBytes: 4}, - {ID: 0x04, Priority: 1, MaxSendBytes: 4}, - {ID: 0x05, Priority: 1, MaxSendBytes: 4}, - {ID: 0x06, Priority: 1, MaxSendBytes: 4}, - } - - peerQueue := newWDRRScheduler(log.NewNopLogger(), NopMetrics(), chDescs, 0, 0, 500) - peerQueue.start() - - totalMsgs := make(map[ChannelID]int) - deliveredMsgs := make(map[ChannelID]int) - successRates := make(map[ChannelID]float64) - - for _, chDesc := range chDescs { - total := 1000 - totalMsgs[ChannelID(chDesc.ID)] = total - - go func(cID ChannelID, n int) { - for i := 0; i < n; i++ { - peerQueue.enqueue() <- Envelope{ - channelID: cID, - Message: &testMessage{Value: "foo"}, // 5 bytes - } - } - }(ChannelID(chDesc.ID), total) - } - - closer := tmsync.NewCloser() - - go func() { - timout := 20 * time.Second - ticker := time.NewTicker(timout) - defer ticker.Stop() - - for { - select { - case e := <-peerQueue.dequeue(): - deliveredMsgs[e.channelID]++ - ticker.Reset(timout) - - case <-ticker.C: - closer.Close() - } - } - }() - - // wait for dequeueing to complete - <-closer.Done() - - // close queue and wait for cleanup - peerQueue.close() - <-peerQueue.closed() - - for i, chDesc := range peerQueue.chDescs { - chID := ChannelID(chDesc.ID) - require.Zero(t, peerQueue.deficits[chID], "expected flow deficit to be zero") - require.Len(t, peerQueue.buffer[chID], 0, "expected flow queue to be empty") - - total := totalMsgs[chID] - delivered := deliveredMsgs[chID] - successRate := float64(delivered) / float64(total) - - successRates[chID] = successRate - - // Require some messages dropped. Note, the top weighted flows may not have - // any dropped if lower priority non-empty queues always exist. - if i > 2 { - require.Less(t, delivered, total, "expected some messages to be dropped") - require.Less(t, successRate, 1.0, "expected a success rate below 100%") - } - } - - require.Zero(t, peerQueue.size, "expected scheduler size to be zero") - - // require channel 0x01 to have the highest success rate due to its weight - ch01Rate := successRates[ChannelID(chDescs[0].ID)] - for i := 1; i < len(chDescs); i++ { - require.GreaterOrEqual(t, ch01Rate, successRates[ChannelID(chDescs[i].ID)]) - } - - // require channel 0x02 to have the 2nd highest success rate due to its weight - ch02Rate := successRates[ChannelID(chDescs[1].ID)] - for i := 2; i < len(chDescs); i++ { - require.GreaterOrEqual(t, ch02Rate, successRates[ChannelID(chDescs[i].ID)]) - } - - // require channel 0x03 to have the 3rd highest success rate due to its weight - ch03Rate := successRates[ChannelID(chDescs[2].ID)] - for i := 3; i < len(chDescs); i++ { - require.GreaterOrEqual(t, ch03Rate, successRates[ChannelID(chDescs[i].ID)]) - } -} diff --git a/internal/proxy/app_conn.go b/internal/proxy/app_conn.go deleted file mode 100644 index 54ce61dac8..0000000000 --- a/internal/proxy/app_conn.go +++ /dev/null @@ -1,250 +0,0 @@ -package proxy - -import ( - "context" - "time" - - "github.com/go-kit/kit/metrics" - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/types" -) - -//go:generate ../../scripts/mockery_generate.sh AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot - -//---------------------------------------------------------------------------------------- -// Enforce which abci msgs can be sent on a connection at the type level - -type AppConnConsensus interface { - SetResponseCallback(abciclient.Callback) - Error() error - - InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error) - - BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error) - DeliverTxAsync(context.Context, types.RequestDeliverTx) (*abciclient.ReqRes, error) - EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error) - CommitSync(context.Context) (*types.ResponseCommit, error) -} - -type AppConnMempool interface { - SetResponseCallback(abciclient.Callback) - Error() error - - CheckTxAsync(context.Context, types.RequestCheckTx) (*abciclient.ReqRes, error) - CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error) - - FlushAsync(context.Context) (*abciclient.ReqRes, error) - FlushSync(context.Context) error -} - -type AppConnQuery interface { - Error() error - - EchoSync(context.Context, string) (*types.ResponseEcho, error) - InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error) - QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error) -} - -type AppConnSnapshot interface { - Error() error - - ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error) - OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) - LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) - ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) -} - -//----------------------------------------------------------------------------------------- -// Implements AppConnConsensus (subset of abciclient.Client) - -type appConnConsensus struct { - metrics *Metrics - appConn abciclient.Client -} - -func NewAppConnConsensus(appConn abciclient.Client, metrics *Metrics) AppConnConsensus { - return &appConnConsensus{ - metrics: metrics, - appConn: appConn, - } -} - -func (app *appConnConsensus) SetResponseCallback(cb abciclient.Callback) { - app.appConn.SetResponseCallback(cb) -} - -func (app *appConnConsensus) Error() error { - return app.appConn.Error() -} - -func (app *appConnConsensus) InitChainSync( - ctx context.Context, - req types.RequestInitChain, -) (*types.ResponseInitChain, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "init_chain", "type", "sync"))() - return app.appConn.InitChainSync(ctx, req) -} - -func (app *appConnConsensus) BeginBlockSync( - ctx context.Context, - req types.RequestBeginBlock, -) (*types.ResponseBeginBlock, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "begin_block", "type", "sync"))() - return app.appConn.BeginBlockSync(ctx, req) -} - -func (app *appConnConsensus) DeliverTxAsync( - ctx context.Context, - req types.RequestDeliverTx, -) (*abciclient.ReqRes, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "deliver_tx", "type", "async"))() - return app.appConn.DeliverTxAsync(ctx, req) -} - -func (app *appConnConsensus) EndBlockSync( - ctx context.Context, - req types.RequestEndBlock, -) (*types.ResponseEndBlock, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "end_block", "type", "sync"))() - return app.appConn.EndBlockSync(ctx, req) -} - -func (app *appConnConsensus) CommitSync(ctx context.Context) (*types.ResponseCommit, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "commit", "type", "sync"))() - return app.appConn.CommitSync(ctx) -} - -//------------------------------------------------ -// Implements AppConnMempool (subset of abciclient.Client) - -type appConnMempool struct { - metrics *Metrics - appConn abciclient.Client -} - -func NewAppConnMempool(appConn abciclient.Client, metrics *Metrics) AppConnMempool { - return &appConnMempool{ - metrics: metrics, - appConn: appConn, - } -} - -func (app *appConnMempool) SetResponseCallback(cb abciclient.Callback) { - app.appConn.SetResponseCallback(cb) -} - -func (app *appConnMempool) Error() error { - return app.appConn.Error() -} - -func (app *appConnMempool) FlushAsync(ctx context.Context) (*abciclient.ReqRes, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "async"))() - return app.appConn.FlushAsync(ctx) -} - -func (app *appConnMempool) FlushSync(ctx context.Context) error { - defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "sync"))() - return app.appConn.FlushSync(ctx) -} - -func (app *appConnMempool) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*abciclient.ReqRes, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "async"))() - return app.appConn.CheckTxAsync(ctx, req) -} - -func (app *appConnMempool) CheckTxSync(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "sync"))() - return app.appConn.CheckTxSync(ctx, req) -} - -//------------------------------------------------ -// Implements AppConnQuery (subset of abciclient.Client) - -type appConnQuery struct { - metrics *Metrics - appConn abciclient.Client -} - -func NewAppConnQuery(appConn abciclient.Client, metrics *Metrics) AppConnQuery { - return &appConnQuery{ - metrics: metrics, - appConn: appConn, - } -} - -func (app *appConnQuery) Error() error { - return app.appConn.Error() -} - -func (app *appConnQuery) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "echo", "type", "sync"))() - return app.appConn.EchoSync(ctx, msg) -} - -func (app *appConnQuery) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "info", "type", "sync"))() - return app.appConn.InfoSync(ctx, req) -} - -func (app *appConnQuery) QuerySync(ctx context.Context, reqQuery types.RequestQuery) (*types.ResponseQuery, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "query", "type", "sync"))() - return app.appConn.QuerySync(ctx, reqQuery) -} - -//------------------------------------------------ -// Implements AppConnSnapshot (subset of abciclient.Client) - -type appConnSnapshot struct { - metrics *Metrics - appConn abciclient.Client -} - -func NewAppConnSnapshot(appConn abciclient.Client, metrics *Metrics) AppConnSnapshot { - return &appConnSnapshot{ - metrics: metrics, - appConn: appConn, - } -} - -func (app *appConnSnapshot) Error() error { - return app.appConn.Error() -} - -func (app *appConnSnapshot) ListSnapshotsSync( - ctx context.Context, - req types.RequestListSnapshots, -) (*types.ResponseListSnapshots, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "list_snapshots", "type", "sync"))() - return app.appConn.ListSnapshotsSync(ctx, req) -} - -func (app *appConnSnapshot) OfferSnapshotSync( - ctx context.Context, - req types.RequestOfferSnapshot, -) (*types.ResponseOfferSnapshot, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "offer_snapshot", "type", "sync"))() - return app.appConn.OfferSnapshotSync(ctx, req) -} - -func (app *appConnSnapshot) LoadSnapshotChunkSync( - ctx context.Context, - req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "load_snapshot_chunk", "type", "sync"))() - return app.appConn.LoadSnapshotChunkSync(ctx, req) -} - -func (app *appConnSnapshot) ApplySnapshotChunkSync( - ctx context.Context, - req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "apply_snapshot_chunk", "type", "sync"))() - return app.appConn.ApplySnapshotChunkSync(ctx, req) -} - -// addTimeSample returns a function that, when called, adds an observation to m. -// The observation added to m is the number of seconds ellapsed since addTimeSample -// was initially called. addTimeSample is meant to be called in a defer to calculate -// the amount of time a function takes to complete. -func addTimeSample(m metrics.Histogram) func() { - start := time.Now() - return func() { m.Observe(time.Since(start).Seconds()) } -} diff --git a/internal/proxy/app_conn_test.go b/internal/proxy/app_conn_test.go deleted file mode 100644 index f1ae7fe1a8..0000000000 --- a/internal/proxy/app_conn_test.go +++ /dev/null @@ -1,186 +0,0 @@ -package proxy - -import ( - "context" - "fmt" - "strings" - "testing" - - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/abci/server" - "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/log" - tmrand "github.com/tendermint/tendermint/libs/rand" -) - -//---------------------------------------- - -type appConnTestI interface { - EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) - FlushSync(context.Context) error - InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error) -} - -type appConnTest struct { - appConn abciclient.Client -} - -func newAppConnTest(appConn abciclient.Client) appConnTestI { - return &appConnTest{appConn} -} - -func (app *appConnTest) EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) { - return app.appConn.EchoAsync(ctx, msg) -} - -func (app *appConnTest) FlushSync(ctx context.Context) error { - return app.appConn.FlushSync(ctx) -} - -func (app *appConnTest) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { - return app.appConn.InfoSync(ctx, req) -} - -//---------------------------------------- - -var SOCKET = "socket" - -func TestEcho(t *testing.T) { - sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := abciclient.NewRemoteCreator(sockPath, SOCKET, true) - - // Start server - s := server.NewSocketServer(sockPath, kvstore.NewApplication()) - s.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := s.Start(); err != nil { - t.Fatalf("Error starting socket server: %v", err.Error()) - } - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - // Start client - cli, err := clientCreator() - if err != nil { - t.Fatalf("Error creating ABCI client: %v", err.Error()) - } - cli.SetLogger(log.TestingLogger().With("module", "abci-client")) - if err := cli.Start(); err != nil { - t.Fatalf("Error starting ABCI client: %v", err.Error()) - } - - proxy := newAppConnTest(cli) - t.Log("Connected") - - ctx := context.Background() - for i := 0; i < 1000; i++ { - _, err = proxy.EchoAsync(ctx, fmt.Sprintf("echo-%v", i)) - if err != nil { - t.Error(err) - } - // flush sometimes - if i%128 == 0 { - if err := proxy.FlushSync(ctx); err != nil { - t.Error(err) - } - } - } - if err := proxy.FlushSync(ctx); err != nil { - t.Error(err) - } -} - -func BenchmarkEcho(b *testing.B) { - b.StopTimer() // Initialize - sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := abciclient.NewRemoteCreator(sockPath, SOCKET, true) - - // Start server - s := server.NewSocketServer(sockPath, kvstore.NewApplication()) - s.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := s.Start(); err != nil { - b.Fatalf("Error starting socket server: %v", err.Error()) - } - b.Cleanup(func() { - if err := s.Stop(); err != nil { - b.Error(err) - } - }) - - // Start client - cli, err := clientCreator() - if err != nil { - b.Fatalf("Error creating ABCI client: %v", err.Error()) - } - cli.SetLogger(log.TestingLogger().With("module", "abci-client")) - if err := cli.Start(); err != nil { - b.Fatalf("Error starting ABCI client: %v", err.Error()) - } - - proxy := newAppConnTest(cli) - b.Log("Connected") - echoString := strings.Repeat(" ", 200) - b.StartTimer() // Start benchmarking tests - - ctx := context.Background() - for i := 0; i < b.N; i++ { - _, err = proxy.EchoAsync(ctx, echoString) - if err != nil { - b.Error(err) - } - // flush sometimes - if i%128 == 0 { - if err := proxy.FlushSync(ctx); err != nil { - b.Error(err) - } - } - } - if err := proxy.FlushSync(ctx); err != nil { - b.Error(err) - } - - b.StopTimer() - // info := proxy.InfoSync(types.RequestInfo{""}) - // b.Log("N: ", b.N, info) -} - -func TestInfo(t *testing.T) { - sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := abciclient.NewRemoteCreator(sockPath, SOCKET, true) - - // Start server - s := server.NewSocketServer(sockPath, kvstore.NewApplication()) - s.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := s.Start(); err != nil { - t.Fatalf("Error starting socket server: %v", err.Error()) - } - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - // Start client - cli, err := clientCreator() - if err != nil { - t.Fatalf("Error creating ABCI client: %v", err.Error()) - } - cli.SetLogger(log.TestingLogger().With("module", "abci-client")) - if err := cli.Start(); err != nil { - t.Fatalf("Error starting ABCI client: %v", err.Error()) - } - - proxy := newAppConnTest(cli) - t.Log("Connected") - - resInfo, err := proxy.InfoSync(context.Background(), RequestInfo) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if resInfo.Data != "{\"size\":0}" { - t.Error("Expected ResponseInfo with one element '{\"size\":0}' but got something else") - } -} diff --git a/internal/proxy/client.go b/internal/proxy/client.go index ddb9a928de..735c996563 100644 --- a/internal/proxy/client.go +++ b/internal/proxy/client.go @@ -1,41 +1,214 @@ package proxy import ( + "context" "io" + "os" + "syscall" + "time" + + "github.com/go-kit/kit/metrics" abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" e2e "github.com/tendermint/tendermint/test/e2e/app" ) -// DefaultClientCreator returns a default ClientCreator, which will create a -// local client if addr is one of: 'kvstore', -// 'persistent_kvstore', 'e2e', or 'noop', otherwise - a remote client. +// ClientFactory returns a client object, which will create a local +// client if addr is one of: 'kvstore', 'persistent_kvstore', 'e2e', +// or 'noop', otherwise - a remote client. // // The Closer is a noop except for persistent_kvstore applications, // which will clean up the store. -func DefaultClientCreator(addr, transport, dbDir string) (abciclient.Creator, io.Closer) { +func ClientFactory(logger log.Logger, addr, transport, dbDir string) (abciclient.Client, io.Closer, error) { switch addr { case "kvstore": - return abciclient.NewLocalCreator(kvstore.NewApplication()), noopCloser{} + return abciclient.NewLocalClient(logger, kvstore.NewApplication()), noopCloser{}, nil case "persistent_kvstore": - app := kvstore.NewPersistentKVStoreApplication(dbDir) - return abciclient.NewLocalCreator(app), app + app := kvstore.NewPersistentKVStoreApplication(logger, dbDir) + return abciclient.NewLocalClient(logger, app), app, nil case "e2e": app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir)) if err != nil { - panic(err) + return nil, noopCloser{}, err } - return abciclient.NewLocalCreator(app), noopCloser{} + return abciclient.NewLocalClient(logger, app), noopCloser{}, nil case "noop": - return abciclient.NewLocalCreator(types.NewBaseApplication()), noopCloser{} + return abciclient.NewLocalClient(logger, types.NewBaseApplication()), noopCloser{}, nil default: - mustConnect := false // loop retrying - return abciclient.NewRemoteCreator(addr, transport, mustConnect), noopCloser{} + const mustConnect = false // loop retrying + client, err := abciclient.NewClient(logger, addr, transport, mustConnect) + if err != nil { + return nil, noopCloser{}, err + } + + return client, noopCloser{}, nil } } type noopCloser struct{} func (noopCloser) Close() error { return nil } + +// proxyClient provides the application connection. +type proxyClient struct { + service.BaseService + logger log.Logger + + client abciclient.Client + metrics *Metrics +} + +// New creates a proxy application interface. +func New(client abciclient.Client, logger log.Logger, metrics *Metrics) abciclient.Client { + conn := &proxyClient{ + logger: logger, + metrics: metrics, + client: client, + } + conn.BaseService = *service.NewBaseService(logger, "proxyClient", conn) + return conn +} + +func (app *proxyClient) OnStop() { tryCallStop(app.client) } +func (app *proxyClient) Error() error { return app.client.Error() } + +func tryCallStop(client abciclient.Client) { + if c, ok := client.(interface{ Stop() }); ok { + c.Stop() + } +} + +func (app *proxyClient) OnStart(ctx context.Context) error { + var err error + defer func() { + if err != nil { + tryCallStop(app.client) + } + }() + + // Kill Tendermint if the ABCI application crashes. + go func() { + if !app.client.IsRunning() { + return + } + app.client.Wait() + if ctx.Err() != nil { + return + } + + if err := app.client.Error(); err != nil { + app.logger.Error("client connection terminated. Did the application crash? Please restart tendermint", + "err", err) + + if killErr := kill(); killErr != nil { + app.logger.Error("Failed to kill this process - please do so manually", + "err", killErr) + } + } + + }() + + return app.client.Start(ctx) +} + +func kill() error { + p, err := os.FindProcess(os.Getpid()) + if err != nil { + return err + } + + return p.Signal(syscall.SIGABRT) +} + +func (app *proxyClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "init_chain", "type", "sync"))() + return app.client.InitChain(ctx, req) +} + +func (app *proxyClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "prepare_proposal", "type", "sync"))() + return app.client.PrepareProposal(ctx, req) +} + +func (app *proxyClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "process_proposal", "type", "sync"))() + return app.client.ProcessProposal(ctx, req) +} + +func (app *proxyClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "extend_vote", "type", "sync"))() + return app.client.ExtendVote(ctx, req) +} + +func (app *proxyClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "verify_vote_extension", "type", "sync"))() + return app.client.VerifyVoteExtension(ctx, req) +} + +func (app *proxyClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "finalize_block", "type", "sync"))() + return app.client.FinalizeBlock(ctx, req) +} + +func (app *proxyClient) Commit(ctx context.Context) (*types.ResponseCommit, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "commit", "type", "sync"))() + return app.client.Commit(ctx) +} + +func (app *proxyClient) Flush(ctx context.Context) error { + defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "sync"))() + return app.client.Flush(ctx) +} + +func (app *proxyClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "sync"))() + return app.client.CheckTx(ctx, req) +} + +func (app *proxyClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "echo", "type", "sync"))() + return app.client.Echo(ctx, msg) +} + +func (app *proxyClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "info", "type", "sync"))() + return app.client.Info(ctx, req) +} + +func (app *proxyClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "query", "type", "sync"))() + return app.client.Query(ctx, req) +} + +func (app *proxyClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "list_snapshots", "type", "sync"))() + return app.client.ListSnapshots(ctx, req) +} + +func (app *proxyClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "offer_snapshot", "type", "sync"))() + return app.client.OfferSnapshot(ctx, req) +} + +func (app *proxyClient) LoadSnapshotChunk(ctx context.Context, req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "load_snapshot_chunk", "type", "sync"))() + return app.client.LoadSnapshotChunk(ctx, req) +} + +func (app *proxyClient) ApplySnapshotChunk(ctx context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "apply_snapshot_chunk", "type", "sync"))() + return app.client.ApplySnapshotChunk(ctx, req) +} + +// addTimeSample returns a function that, when called, adds an observation to m. +// The observation added to m is the number of seconds ellapsed since addTimeSample +// was initially called. addTimeSample is meant to be called in a defer to calculate +// the amount of time a function takes to complete. +func addTimeSample(m metrics.Histogram) func() { + start := time.Now() + return func() { m.Observe(time.Since(start).Seconds()) } +} diff --git a/internal/proxy/client_test.go b/internal/proxy/client_test.go new file mode 100644 index 0000000000..09ac3f2c87 --- /dev/null +++ b/internal/proxy/client_test.go @@ -0,0 +1,235 @@ +package proxy + +import ( + "context" + "errors" + "fmt" + "os" + "os/signal" + "strings" + "syscall" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "gotest.tools/assert" + + abciclient "github.com/tendermint/tendermint/abci/client" + abcimocks "github.com/tendermint/tendermint/abci/client/mocks" + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/abci/server" + "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + tmrand "github.com/tendermint/tendermint/libs/rand" +) + +//---------------------------------------- + +type appConnTestI interface { + Echo(context.Context, string) (*types.ResponseEcho, error) + Flush(context.Context) error + Info(context.Context, *types.RequestInfo) (*types.ResponseInfo, error) +} + +type appConnTest struct { + appConn abciclient.Client +} + +func newAppConnTest(appConn abciclient.Client) appConnTestI { + return &appConnTest{appConn} +} + +func (app *appConnTest) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { + return app.appConn.Echo(ctx, msg) +} + +func (app *appConnTest) Flush(ctx context.Context) error { + return app.appConn.Flush(ctx) +} + +func (app *appConnTest) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) { + return app.appConn.Info(ctx, req) +} + +//---------------------------------------- + +var SOCKET = "socket" + +func TestEcho(t *testing.T) { + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) + logger := log.NewNopLogger() + client, err := abciclient.NewClient(logger, sockPath, SOCKET, true) + if err != nil { + t.Fatal(err) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Start server + s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication()) + require.NoError(t, s.Start(ctx), "error starting socket server") + t.Cleanup(func() { cancel(); s.Wait() }) + + // Start client + require.NoError(t, client.Start(ctx), "Error starting ABCI client") + + proxy := newAppConnTest(client) + t.Log("Connected") + + for i := 0; i < 1000; i++ { + _, err = proxy.Echo(ctx, fmt.Sprintf("echo-%v", i)) + if err != nil { + t.Error(err) + } + // flush sometimes + if i%128 == 0 { + if err := proxy.Flush(ctx); err != nil { + t.Error(err) + } + } + } + if err := proxy.Flush(ctx); err != nil { + t.Error(err) + } +} + +func BenchmarkEcho(b *testing.B) { + b.StopTimer() // Initialize + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) + logger := log.NewNopLogger() + client, err := abciclient.NewClient(logger, sockPath, SOCKET, true) + if err != nil { + b.Fatal(err) + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Start server + s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication()) + require.NoError(b, s.Start(ctx), "Error starting socket server") + b.Cleanup(func() { cancel(); s.Wait() }) + + // Start client + require.NoError(b, client.Start(ctx), "Error starting ABCI client") + + proxy := newAppConnTest(client) + b.Log("Connected") + echoString := strings.Repeat(" ", 200) + b.StartTimer() // Start benchmarking tests + + for i := 0; i < b.N; i++ { + _, err = proxy.Echo(ctx, echoString) + if err != nil { + b.Error(err) + } + // flush sometimes + if i%128 == 0 { + if err := proxy.Flush(ctx); err != nil { + b.Error(err) + } + } + } + if err := proxy.Flush(ctx); err != nil { + b.Error(err) + } + + b.StopTimer() + // info := proxy.Info(types.RequestInfo{""}) + // b.Log("N: ", b.N, info) +} + +func TestInfo(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) + logger := log.NewNopLogger() + client, err := abciclient.NewClient(logger, sockPath, SOCKET, true) + if err != nil { + t.Fatal(err) + } + + // Start server + s := server.NewSocketServer(logger.With("module", "abci-server"), sockPath, kvstore.NewApplication()) + require.NoError(t, s.Start(ctx), "Error starting socket server") + t.Cleanup(func() { cancel(); s.Wait() }) + + // Start client + require.NoError(t, client.Start(ctx), "Error starting ABCI client") + + proxy := newAppConnTest(client) + t.Log("Connected") + + resInfo, err := proxy.Info(ctx, &RequestInfo) + require.NoError(t, err) + + if resInfo.Data != "{\"size\":0}" { + t.Error("Expected ResponseInfo with one element '{\"size\":0}' but got something else") + } +} + +type noopStoppableClientImpl struct { + abciclient.Client + count int +} + +func (c *noopStoppableClientImpl) Stop() { c.count++ } + +func TestAppConns_Start_Stop(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + clientMock := &abcimocks.Client{} + clientMock.On("Start", mock.Anything).Return(nil) + clientMock.On("Error").Return(nil) + clientMock.On("IsRunning").Return(true) + clientMock.On("Wait").Return(nil).Times(1) + cl := &noopStoppableClientImpl{Client: clientMock} + + appConns := New(cl, log.NewNopLogger(), NopMetrics()) + + err := appConns.Start(ctx) + require.NoError(t, err) + + time.Sleep(200 * time.Millisecond) + + cancel() + appConns.Wait() + + clientMock.AssertExpectations(t) + assert.Equal(t, 1, cl.count) +} + +// Upon failure, we call tmos.Kill +func TestAppConns_Failure(t *testing.T) { + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGTERM, syscall.SIGABRT) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + clientMock := &abcimocks.Client{} + clientMock.On("SetLogger", mock.Anything).Return() + clientMock.On("Start", mock.Anything).Return(nil) + clientMock.On("IsRunning").Return(true) + clientMock.On("Wait").Return(nil) + clientMock.On("Error").Return(errors.New("EOF")) + cl := &noopStoppableClientImpl{Client: clientMock} + + appConns := New(cl, log.NewNopLogger(), NopMetrics()) + + err := appConns.Start(ctx) + require.NoError(t, err) + t.Cleanup(func() { cancel(); appConns.Wait() }) + + select { + case sig := <-c: + t.Logf("signal %q successfully received", sig) + case <-ctx.Done(): + t.Fatal("expected process to receive SIGTERM signal") + } +} diff --git a/internal/proxy/mocks/app_conn_consensus.go b/internal/proxy/mocks/app_conn_consensus.go index fa93b0931e..3f5df314ad 100644 --- a/internal/proxy/mocks/app_conn_consensus.go +++ b/internal/proxy/mocks/app_conn_consensus.go @@ -5,8 +5,6 @@ package mocks import ( context "context" - abciclient "github.com/tendermint/tendermint/abci/client" - mock "github.com/stretchr/testify/mock" types "github.com/tendermint/tendermint/abci/types" @@ -17,21 +15,58 @@ type AppConnConsensus struct { mock.Mock } -// BeginBlockSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { +// Commit provides a mock function with given fields: _a0 +func (_m *AppConnConsensus) Commit(_a0 context.Context) (*types.ResponseCommit, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseCommit + if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseCommit) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Error provides a mock function with given fields: +func (_m *AppConnConsensus) Error() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ExtendVote provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) ExtendVote(_a0 context.Context, _a1 types.RequestExtendVote) (*types.ResponseExtendVote, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseBeginBlock - if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok { + var r0 *types.ResponseExtendVote + if rf, ok := ret.Get(0).(func(context.Context, types.RequestExtendVote) *types.ResponseExtendVote); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseBeginBlock) + r0 = ret.Get(0).(*types.ResponseExtendVote) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, types.RequestExtendVote) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -40,22 +75,22 @@ func (_m *AppConnConsensus) BeginBlockSync(_a0 context.Context, _a1 types.Reques return r0, r1 } -// CommitSync provides a mock function with given fields: _a0 -func (_m *AppConnConsensus) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) { - ret := _m.Called(_a0) +// FinalizeBlock provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) FinalizeBlock(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { + ret := _m.Called(_a0, _a1) - var r0 *types.ResponseCommit - if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok { - r0 = rf(_a0) + var r0 *types.ResponseFinalizeBlock + if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseCommit) + r0 = ret.Get(0).(*types.ResponseFinalizeBlock) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) + if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -63,21 +98,21 @@ func (_m *AppConnConsensus) CommitSync(_a0 context.Context) (*types.ResponseComm return r0, r1 } -// DeliverTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abciclient.ReqRes, error) { +// InitChain provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) InitChain(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) { ret := _m.Called(_a0, _a1) - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abciclient.ReqRes); ok { + var r0 *types.ResponseInitChain + if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *types.ResponseInitChain); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) + r0 = ret.Get(0).(*types.ResponseInitChain) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -86,21 +121,21 @@ func (_m *AppConnConsensus) DeliverTxAsync(_a0 context.Context, _a1 types.Reques return r0, r1 } -// EndBlockSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) { +// PrepareProposal provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) PrepareProposal(_a0 context.Context, _a1 types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseEndBlock - if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok { + var r0 *types.ResponsePrepareProposal + if rf, ok := ret.Get(0).(func(context.Context, types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseEndBlock) + r0 = ret.Get(0).(*types.ResponsePrepareProposal) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, types.RequestPrepareProposal) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -109,35 +144,44 @@ func (_m *AppConnConsensus) EndBlockSync(_a0 context.Context, _a1 types.RequestE return r0, r1 } -// Error provides a mock function with given fields: -func (_m *AppConnConsensus) Error() error { - ret := _m.Called() +// ProcessProposal provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) ProcessProposal(_a0 context.Context, _a1 types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { + ret := _m.Called(_a0, _a1) - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + var r0 *types.ResponseProcessProposal + if rf, ok := ret.Get(0).(func(context.Context, types.RequestProcessProposal) *types.ResponseProcessProposal); ok { + r0 = rf(_a0, _a1) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseProcessProposal) + } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.RequestProcessProposal) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } -// InitChainSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) { +// VerifyVoteExtension provides a mock function with given fields: _a0, _a1 +func (_m *AppConnConsensus) VerifyVoteExtension(_a0 context.Context, _a1 types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseInitChain - if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *types.ResponseInitChain); ok { + var r0 *types.ResponseVerifyVoteExtension + if rf, ok := ret.Get(0).(func(context.Context, types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseInitChain) + r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, types.RequestVerifyVoteExtension) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -145,8 +189,3 @@ func (_m *AppConnConsensus) InitChainSync(_a0 context.Context, _a1 types.Request return r0, r1 } - -// SetResponseCallback provides a mock function with given fields: _a0 -func (_m *AppConnConsensus) SetResponseCallback(_a0 abciclient.Callback) { - _m.Called(_a0) -} diff --git a/internal/proxy/mocks/app_conn_mempool.go b/internal/proxy/mocks/app_conn_mempool.go index 5429d8f909..fd5acef7d8 100644 --- a/internal/proxy/mocks/app_conn_mempool.go +++ b/internal/proxy/mocks/app_conn_mempool.go @@ -5,8 +5,6 @@ package mocks import ( context "context" - abciclient "github.com/tendermint/tendermint/abci/client" - mock "github.com/stretchr/testify/mock" types "github.com/tendermint/tendermint/abci/types" @@ -17,31 +15,8 @@ type AppConnMempool struct { mock.Mock } -// CheckTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abciclient.ReqRes, error) { - ret := _m.Called(_a0, _a1) - - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abciclient.ReqRes); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CheckTxSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnMempool) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) { +// CheckTx provides a mock function with given fields: _a0, _a1 +func (_m *AppConnMempool) CheckTx(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseCheckTx @@ -77,31 +52,8 @@ func (_m *AppConnMempool) Error() error { return r0 } -// FlushAsync provides a mock function with given fields: _a0 -func (_m *AppConnMempool) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) { - ret := _m.Called(_a0) - - var r0 *abciclient.ReqRes - if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*abciclient.ReqRes) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// FlushSync provides a mock function with given fields: _a0 -func (_m *AppConnMempool) FlushSync(_a0 context.Context) error { +// Flush provides a mock function with given fields: _a0 +func (_m *AppConnMempool) Flush(_a0 context.Context) error { ret := _m.Called(_a0) var r0 error @@ -113,8 +65,3 @@ func (_m *AppConnMempool) FlushSync(_a0 context.Context) error { return r0 } - -// SetResponseCallback provides a mock function with given fields: _a0 -func (_m *AppConnMempool) SetResponseCallback(_a0 abciclient.Callback) { - _m.Called(_a0) -} diff --git a/internal/proxy/mocks/app_conn_query.go b/internal/proxy/mocks/app_conn_query.go index 47ac5bef94..e515cb784e 100644 --- a/internal/proxy/mocks/app_conn_query.go +++ b/internal/proxy/mocks/app_conn_query.go @@ -15,8 +15,8 @@ type AppConnQuery struct { mock.Mock } -// EchoSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnQuery) EchoSync(_a0 context.Context, _a1 string) (*types.ResponseEcho, error) { +// Echo provides a mock function with given fields: _a0, _a1 +func (_m *AppConnQuery) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseEcho @@ -52,8 +52,8 @@ func (_m *AppConnQuery) Error() error { return r0 } -// InfoSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnQuery) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) { +// Info provides a mock function with given fields: _a0, _a1 +func (_m *AppConnQuery) Info(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseInfo @@ -75,8 +75,8 @@ func (_m *AppConnQuery) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*t return r0, r1 } -// QuerySync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnQuery) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) { +// Query provides a mock function with given fields: _a0, _a1 +func (_m *AppConnQuery) Query(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseQuery diff --git a/internal/proxy/mocks/app_conn_snapshot.go b/internal/proxy/mocks/app_conn_snapshot.go index 0b6f10ce13..0b3f06ad70 100644 --- a/internal/proxy/mocks/app_conn_snapshot.go +++ b/internal/proxy/mocks/app_conn_snapshot.go @@ -15,8 +15,8 @@ type AppConnSnapshot struct { mock.Mock } -// ApplySnapshotChunkSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnSnapshot) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { +// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1 +func (_m *AppConnSnapshot) ApplySnapshotChunk(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseApplySnapshotChunk @@ -52,8 +52,8 @@ func (_m *AppConnSnapshot) Error() error { return r0 } -// ListSnapshotsSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnSnapshot) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { +// ListSnapshots provides a mock function with given fields: _a0, _a1 +func (_m *AppConnSnapshot) ListSnapshots(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseListSnapshots @@ -75,8 +75,8 @@ func (_m *AppConnSnapshot) ListSnapshotsSync(_a0 context.Context, _a1 types.Requ return r0, r1 } -// LoadSnapshotChunkSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnSnapshot) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { +// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1 +func (_m *AppConnSnapshot) LoadSnapshotChunk(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseLoadSnapshotChunk @@ -98,8 +98,8 @@ func (_m *AppConnSnapshot) LoadSnapshotChunkSync(_a0 context.Context, _a1 types. return r0, r1 } -// OfferSnapshotSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnSnapshot) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { +// OfferSnapshot provides a mock function with given fields: _a0, _a1 +func (_m *AppConnSnapshot) OfferSnapshot(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { ret := _m.Called(_a0, _a1) var r0 *types.ResponseOfferSnapshot diff --git a/internal/proxy/multi_app_conn.go b/internal/proxy/multi_app_conn.go deleted file mode 100644 index 0bcc64af67..0000000000 --- a/internal/proxy/multi_app_conn.go +++ /dev/null @@ -1,202 +0,0 @@ -package proxy - -import ( - "fmt" - "os" - "syscall" - - abciclient "github.com/tendermint/tendermint/abci/client" - tmlog "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" -) - -const ( - connConsensus = "consensus" - connMempool = "mempool" - connQuery = "query" - connSnapshot = "snapshot" -) - -// AppConns is the Tendermint's interface to the application that consists of -// multiple connections. -type AppConns interface { - service.Service - - // Mempool connection - Mempool() AppConnMempool - // Consensus connection - Consensus() AppConnConsensus - // Query connection - Query() AppConnQuery - // Snapshot connection - Snapshot() AppConnSnapshot -} - -// NewAppConns calls NewMultiAppConn. -func NewAppConns(clientCreator abciclient.Creator, metrics *Metrics) AppConns { - return NewMultiAppConn(clientCreator, metrics) -} - -// multiAppConn implements AppConns. -// -// A multiAppConn is made of a few appConns and manages their underlying abci -// clients. -// TODO: on app restart, clients must reboot together -type multiAppConn struct { - service.BaseService - - metrics *Metrics - consensusConn AppConnConsensus - mempoolConn AppConnMempool - queryConn AppConnQuery - snapshotConn AppConnSnapshot - - consensusConnClient abciclient.Client - mempoolConnClient abciclient.Client - queryConnClient abciclient.Client - snapshotConnClient abciclient.Client - - clientCreator abciclient.Creator -} - -// NewMultiAppConn makes all necessary abci connections to the application. -func NewMultiAppConn(clientCreator abciclient.Creator, metrics *Metrics) AppConns { - multiAppConn := &multiAppConn{ - metrics: metrics, - clientCreator: clientCreator, - } - multiAppConn.BaseService = *service.NewBaseService(nil, "multiAppConn", multiAppConn) - return multiAppConn -} - -func (app *multiAppConn) Mempool() AppConnMempool { - return app.mempoolConn -} - -func (app *multiAppConn) Consensus() AppConnConsensus { - return app.consensusConn -} - -func (app *multiAppConn) Query() AppConnQuery { - return app.queryConn -} - -func (app *multiAppConn) Snapshot() AppConnSnapshot { - return app.snapshotConn -} - -func (app *multiAppConn) OnStart() error { - c, err := app.abciClientFor(connQuery) - if err != nil { - return err - } - app.queryConnClient = c - app.queryConn = NewAppConnQuery(c, app.metrics) - - c, err = app.abciClientFor(connSnapshot) - if err != nil { - app.stopAllClients() - return err - } - app.snapshotConnClient = c - app.snapshotConn = NewAppConnSnapshot(c, app.metrics) - - c, err = app.abciClientFor(connMempool) - if err != nil { - app.stopAllClients() - return err - } - app.mempoolConnClient = c - app.mempoolConn = NewAppConnMempool(c, app.metrics) - - c, err = app.abciClientFor(connConsensus) - if err != nil { - app.stopAllClients() - return err - } - app.consensusConnClient = c - app.consensusConn = NewAppConnConsensus(c, app.metrics) - - // Kill Tendermint if the ABCI application crashes. - go app.killTMOnClientError() - - return nil -} - -func (app *multiAppConn) OnStop() { - app.stopAllClients() -} - -func (app *multiAppConn) killTMOnClientError() { - killFn := func(conn string, err error, logger tmlog.Logger) { - logger.Error( - fmt.Sprintf("%s connection terminated. Did the application crash? Please restart tendermint", conn), - "err", err) - if killErr := kill(); killErr != nil { - logger.Error("Failed to kill this process - please do so manually", "err", killErr) - } - } - - select { - case <-app.consensusConnClient.Quit(): - if err := app.consensusConnClient.Error(); err != nil { - killFn(connConsensus, err, app.Logger) - } - case <-app.mempoolConnClient.Quit(): - if err := app.mempoolConnClient.Error(); err != nil { - killFn(connMempool, err, app.Logger) - } - case <-app.queryConnClient.Quit(): - if err := app.queryConnClient.Error(); err != nil { - killFn(connQuery, err, app.Logger) - } - case <-app.snapshotConnClient.Quit(): - if err := app.snapshotConnClient.Error(); err != nil { - killFn(connSnapshot, err, app.Logger) - } - } -} - -func (app *multiAppConn) stopAllClients() { - if app.consensusConnClient != nil { - if err := app.consensusConnClient.Stop(); err != nil { - app.Logger.Error("error while stopping consensus client", "error", err) - } - } - if app.mempoolConnClient != nil { - if err := app.mempoolConnClient.Stop(); err != nil { - app.Logger.Error("error while stopping mempool client", "error", err) - } - } - if app.queryConnClient != nil { - if err := app.queryConnClient.Stop(); err != nil { - app.Logger.Error("error while stopping query client", "error", err) - } - } - if app.snapshotConnClient != nil { - if err := app.snapshotConnClient.Stop(); err != nil { - app.Logger.Error("error while stopping snapshot client", "error", err) - } - } -} - -func (app *multiAppConn) abciClientFor(conn string) (abciclient.Client, error) { - c, err := app.clientCreator() - if err != nil { - return nil, fmt.Errorf("error creating ABCI client (%s connection): %w", conn, err) - } - c.SetLogger(app.Logger.With("module", "abci-client", "connection", conn)) - if err := c.Start(); err != nil { - return nil, fmt.Errorf("error starting ABCI client (%s connection): %w", conn, err) - } - return c, nil -} - -func kill() error { - p, err := os.FindProcess(os.Getpid()) - if err != nil { - return err - } - - return p.Signal(syscall.SIGTERM) -} diff --git a/internal/proxy/multi_app_conn_test.go b/internal/proxy/multi_app_conn_test.go deleted file mode 100644 index 25ed692aba..0000000000 --- a/internal/proxy/multi_app_conn_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package proxy - -import ( - "errors" - "os" - "os/signal" - "syscall" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - abciclient "github.com/tendermint/tendermint/abci/client" - abcimocks "github.com/tendermint/tendermint/abci/client/mocks" -) - -func TestAppConns_Start_Stop(t *testing.T) { - quitCh := make(<-chan struct{}) - - clientMock := &abcimocks.Client{} - clientMock.On("SetLogger", mock.Anything).Return().Times(4) - clientMock.On("Start").Return(nil).Times(4) - clientMock.On("Stop").Return(nil).Times(4) - clientMock.On("Quit").Return(quitCh).Times(4) - - creatorCallCount := 0 - creator := func() (abciclient.Client, error) { - creatorCallCount++ - return clientMock, nil - } - - appConns := NewAppConns(creator, NopMetrics()) - - err := appConns.Start() - require.NoError(t, err) - - time.Sleep(100 * time.Millisecond) - - err = appConns.Stop() - require.NoError(t, err) - - clientMock.AssertExpectations(t) - assert.Equal(t, 4, creatorCallCount) -} - -// Upon failure, we call tmos.Kill -func TestAppConns_Failure(t *testing.T) { - ok := make(chan struct{}) - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGTERM) - go func() { - for range c { - close(ok) - } - }() - - quitCh := make(chan struct{}) - var recvQuitCh <-chan struct{} // nolint:gosimple - recvQuitCh = quitCh - - clientMock := &abcimocks.Client{} - clientMock.On("SetLogger", mock.Anything).Return() - clientMock.On("Start").Return(nil) - clientMock.On("Stop").Return(nil) - - clientMock.On("Quit").Return(recvQuitCh) - clientMock.On("Error").Return(errors.New("EOF")).Once() - - creator := func() (abciclient.Client, error) { - return clientMock, nil - } - - appConns := NewAppConns(creator, NopMetrics()) - - err := appConns.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := appConns.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate failure - close(quitCh) - - select { - case <-ok: - t.Log("SIGTERM successfully received") - case <-time.After(5 * time.Second): - t.Fatal("expected process to receive SIGTERM signal") - } -} diff --git a/internal/pubsub/example_test.go b/internal/pubsub/example_test.go new file mode 100644 index 0000000000..c4b5dc5c94 --- /dev/null +++ b/internal/pubsub/example_test.go @@ -0,0 +1,34 @@ +package pubsub_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/libs/log" +) + +func TestExample(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newTestServer(ctx, t, log.NewNopLogger()) + + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "example-client", + Query: query.MustCompile(`abci.account.name='John'`), + })) + + events := []abci.Event{ + { + Type: "abci.account", + Attributes: []abci.EventAttribute{{Key: "name", Value: "John"}}, + }, + } + require.NoError(t, s.PublishWithEvents(pubstring("Tombstone"), events)) + sub.mustReceive(ctx, pubstring("Tombstone")) +} diff --git a/internal/pubsub/pubsub.go b/internal/pubsub/pubsub.go new file mode 100644 index 0000000000..c0ad4ae3c7 --- /dev/null +++ b/internal/pubsub/pubsub.go @@ -0,0 +1,421 @@ +// Package pubsub implements an event dispatching server with a single publisher +// and multiple subscriber clients. Multiple goroutines can safely publish to a +// single Server instance. +// +// Clients register subscriptions with a query to select which messages they +// wish to receive. When messages are published, they are broadcast to all +// clients whose subscription query matches that message. Queries are +// constructed using the github.com/tendermint/tendermint/internal/pubsub/query +// package. +// +// Example: +// +// q, err := query.New(`account.name='John'`) +// if err != nil { +// return err +// } +// sub, err := pubsub.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ +// ClientID: "johns-transactions", +// Query: q, +// }) +// if err != nil { +// return err +// } +// +// for { +// next, err := sub.Next(ctx) +// if err == pubsub.ErrTerminated { +// return err // terminated by publisher +// } else if err != nil { +// return err // timed out, client unsubscribed, etc. +// } +// process(next) +// } +// +package pubsub + +import ( + "context" + "errors" + "fmt" + "sync" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/types" +) + +var ( + // ErrSubscriptionNotFound is returned when a client tries to unsubscribe + // from not existing subscription. + ErrSubscriptionNotFound = errors.New("subscription not found") + + // ErrAlreadySubscribed is returned when a client tries to subscribe twice or + // more using the same query. + ErrAlreadySubscribed = errors.New("already subscribed") + + // ErrServerStopped is returned when attempting to publish or subscribe to a + // server that has been stopped. + ErrServerStopped = errors.New("pubsub server is stopped") +) + +// SubscribeArgs are the parameters to create a new subscription. +type SubscribeArgs struct { + ClientID string // Client ID + Query *query.Query // filter query for events (required) + Limit int // subscription queue capacity limit (0 means 1) + Quota int // subscription queue soft quota (0 uses Limit) +} + +// UnsubscribeArgs are the parameters to remove a subscription. +// The subscriber ID must be populated, and at least one of the client ID or +// the registered query. +type UnsubscribeArgs struct { + Subscriber string // subscriber ID chosen by the client (required) + ID string // subscription ID (assigned by the server) + Query *query.Query // the query registered with the subscription +} + +// Validate returns nil if args are valid to identify a subscription to remove. +// Otherwise, it reports an error. +func (args UnsubscribeArgs) Validate() error { + if args.Subscriber == "" { + return errors.New("must specify a subscriber") + } + return nil +} + +// Server allows clients to subscribe/unsubscribe for messages, publishing +// messages with or without events, and manages internal state. +type Server struct { + service.BaseService + logger log.Logger + + queue chan item + done <-chan struct{} // closed when server should exit + pubs sync.RWMutex // excl: shutdown; shared: active publisher + exited chan struct{} // server exited + + // All subscriptions currently known. + // Lock exclusive to add, remove, or cancel subscriptions. + // Lock shared to look up or publish to subscriptions. + subs struct { + sync.RWMutex + index *subIndex + + // This function is called synchronously with each message published + // before it is delivered to any other subscriber. This allows an index + // to be persisted before any subscribers see the messages. + observe func(Message) error + } + + // TODO(creachadair): Rework the options so that this does not need to live + // as a field. It is not otherwise needed. + queueCap int +} + +// Option sets a parameter for the server. +type Option func(*Server) + +// NewServer returns a new server. See the commentary on the Option functions +// for a detailed description of how to configure buffering. If no options are +// provided, the resulting server's queue is unbuffered. +func NewServer(logger log.Logger, options ...Option) *Server { + s := &Server{logger: logger} + + s.BaseService = *service.NewBaseService(logger, "PubSub", s) + for _, opt := range options { + opt(s) + } + + // The queue receives items to be published. + s.queue = make(chan item, s.queueCap) + + // The index tracks subscriptions by ID and query terms. + s.subs.index = newSubIndex() + + return s +} + +// BufferCapacity allows you to specify capacity for publisher's queue. This +// is the number of messages that can be published without blocking. If no +// buffer is specified, publishing is synchronous with delivery. This function +// will panic if cap < 0. +func BufferCapacity(cap int) Option { + if cap < 0 { + panic("negative buffer capacity") + } + return func(s *Server) { s.queueCap = cap } +} + +// BufferCapacity returns capacity of the publication queue. +func (s *Server) BufferCapacity() int { return cap(s.queue) } + +// Observe registers an observer function that will be called synchronously +// with each published message matching any of the given queries, prior to it +// being forwarded to any subscriber. If no queries are specified, all +// messages will be observed. An error is reported if an observer is already +// registered. +func (s *Server) Observe(ctx context.Context, observe func(Message) error, queries ...*query.Query) error { + s.subs.Lock() + defer s.subs.Unlock() + if observe == nil { + return errors.New("observe callback is nil") + } else if s.subs.observe != nil { + return errors.New("an observer is already registered") + } + + // Compile the message filter. + var matches func(Message) bool + if len(queries) == 0 { + matches = func(Message) bool { return true } + } else { + matches = func(msg Message) bool { + for _, q := range queries { + if q.Matches(msg.events) { + return true + } + } + return false + } + } + + s.subs.observe = func(msg Message) error { + if matches(msg) { + return observe(msg) + } + return nil // nothing to do for this message + } + return nil +} + +// SubscribeWithArgs creates a subscription for the given arguments. It is an +// error if the query is nil, a subscription already exists for the specified +// client ID and query, or if the capacity arguments are invalid. +func (s *Server) SubscribeWithArgs(ctx context.Context, args SubscribeArgs) (*Subscription, error) { + s.subs.Lock() + defer s.subs.Unlock() + + if s.subs.index == nil { + return nil, ErrServerStopped + } else if s.subs.index.contains(args.ClientID, args.Query.String()) { + return nil, ErrAlreadySubscribed + } + + if args.Limit == 0 { + args.Limit = 1 + } + sub, err := newSubscription(args.Quota, args.Limit) + if err != nil { + return nil, err + } + s.subs.index.add(&subInfo{ + clientID: args.ClientID, + query: args.Query, + subID: sub.id, + sub: sub, + }) + return sub, nil +} + +// Unsubscribe removes the subscription for the given client and/or query. It +// returns ErrSubscriptionNotFound if no such subscription exists. +func (s *Server) Unsubscribe(ctx context.Context, args UnsubscribeArgs) error { + if err := args.Validate(); err != nil { + return err + } + s.subs.Lock() + defer s.subs.Unlock() + if s.subs.index == nil { + return ErrServerStopped + } + + // TODO(creachadair): Do we need to support unsubscription for an "empty" + // query? I believe that case is not possible by the Query grammar, but we + // should make sure. + // + // Revisit this logic once we are able to remove indexing by query. + + var evict subInfoSet + if args.Subscriber != "" { + evict = s.subs.index.findClientID(args.Subscriber) + if args.Query != nil { + evict = evict.withQuery(args.Query.String()) + } + } else { + evict = s.subs.index.findQuery(args.Query.String()) + } + + if len(evict) == 0 { + return ErrSubscriptionNotFound + } + s.removeSubs(evict, ErrUnsubscribed) + return nil +} + +// UnsubscribeAll removes all subscriptions for the given client ID. +// It returns ErrSubscriptionNotFound if no subscriptions exist for that client. +func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { + s.subs.Lock() + defer s.subs.Unlock() + + if s.subs.index == nil { + return ErrServerStopped + } + evict := s.subs.index.findClientID(clientID) + if len(evict) == 0 { + return ErrSubscriptionNotFound + } + s.removeSubs(evict, ErrUnsubscribed) + return nil +} + +// NumClients returns the number of clients. +func (s *Server) NumClients() int { + s.subs.RLock() + defer s.subs.RUnlock() + return len(s.subs.index.byClient) +} + +// NumClientSubscriptions returns the number of subscriptions the client has. +func (s *Server) NumClientSubscriptions(clientID string) int { + s.subs.RLock() + defer s.subs.RUnlock() + return len(s.subs.index.findClientID(clientID)) +} + +// Publish publishes the given message. An error will be returned to the caller +// if the pubsub server has shut down. +func (s *Server) Publish(msg types.EventData) error { + return s.publish(msg, []abci.Event{}) +} + +// PublishWithEvents publishes the given message with the set of events. The set +// is matched with clients queries. If there is a match, the message is sent to +// the client. +func (s *Server) PublishWithEvents(msg types.EventData, events []abci.Event) error { + return s.publish(msg, events) +} + +// OnStop implements part of the Service interface. It is a no-op. +func (s *Server) OnStop() {} + +// Wait implements Service.Wait by blocking until the server has exited, then +// yielding to the base service wait. +func (s *Server) Wait() { <-s.exited; s.BaseService.Wait() } + +// OnStart implements Service.OnStart by starting the server. +func (s *Server) OnStart(ctx context.Context) error { s.run(ctx); return nil } + +func (s *Server) publish(data types.EventData, events []abci.Event) error { + s.pubs.RLock() + defer s.pubs.RUnlock() + + select { + case <-s.done: + return ErrServerStopped + case s.queue <- item{ + Data: data, + Events: events, + }: + return nil + } +} + +func (s *Server) run(ctx context.Context) { + // The server runs until ctx is canceled. + s.done = ctx.Done() + queue := s.queue + + // Shutdown monitor: When the context ends, wait for any active publish + // calls to exit, then close the queue to signal the sender to exit. + go func() { + <-ctx.Done() + s.pubs.Lock() + defer s.pubs.Unlock() + close(s.queue) + s.queue = nil + }() + + s.exited = make(chan struct{}) + go func() { + defer close(s.exited) + + // Sender: Service the queue and forward messages to subscribers. + for it := range queue { + if err := s.send(it.Data, it.Events); err != nil { + s.logger.Error("error sending event", "err", err) + } + } + // Terminate all subscribers before exit. + s.subs.Lock() + defer s.subs.Unlock() + for si := range s.subs.index.all { + si.sub.stop(ErrTerminated) + } + s.subs.index = nil + }() +} + +// removeSubs cancels and removes all the subscriptions in evict with the given +// error. The caller must hold the s.subs lock. +func (s *Server) removeSubs(evict subInfoSet, reason error) { + for si := range evict { + si.sub.stop(reason) + } + s.subs.index.removeAll(evict) +} + +// send delivers the given message to all matching subscribers. An error in +// query matching stops transmission and is returned. +func (s *Server) send(data types.EventData, events []abci.Event) error { + // At exit, evict any subscriptions that were too slow. + evict := make(subInfoSet) + defer func() { + if len(evict) != 0 { + s.subs.Lock() + defer s.subs.Unlock() + s.removeSubs(evict, ErrTerminated) + } + }() + + // N.B. Order is important here. We must acquire and defer the lock release + // AFTER deferring the eviction cleanup: The cleanup must happen after the + // reader lock has released, or it will deadlock. + s.subs.RLock() + defer s.subs.RUnlock() + + // If an observer is defined, give it control of the message before + // attempting to deliver it to any matching subscribers. If the observer + // fails, the message will not be forwarded. + if s.subs.observe != nil { + err := s.subs.observe(Message{ + data: data, + events: events, + }) + if err != nil { + return fmt.Errorf("observer failed on message: %w", err) + } + } + + for si := range s.subs.index.all { + if !si.query.Matches(events) { + continue + } + + // Publish the events to the subscriber's queue. If this fails, e.g., + // because the queue is over capacity or out of quota, evict the + // subscription from the index. + if err := si.sub.publish(Message{ + subID: si.sub.id, + data: data, + events: events, + }); err != nil { + evict.add(si) + } + } + + return nil +} diff --git a/internal/pubsub/pubsub_test.go b/internal/pubsub/pubsub_test.go new file mode 100644 index 0000000000..e366977b54 --- /dev/null +++ b/internal/pubsub/pubsub_test.go @@ -0,0 +1,482 @@ +package pubsub_test + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" +) + +const ( + clientID = "test-client" +) + +// pubstring is a trivial implementation of the EventData interface for +// string-valued test data. +type pubstring string + +func (pubstring) TypeTag() string { return "pubstring" } + +func TestSubscribeWithArgs(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + s := newTestServer(ctx, t, logger) + + t.Run("DefaultLimit", func(t *testing.T) { + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.All, + })) + + require.Equal(t, 1, s.NumClients()) + require.Equal(t, 1, s.NumClientSubscriptions(clientID)) + + require.NoError(t, s.Publish(pubstring("Ka-Zar"))) + sub.mustReceive(ctx, pubstring("Ka-Zar")) + }) + t.Run("PositiveLimit", func(t *testing.T) { + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID + "-2", + Query: query.All, + Limit: 10, + })) + require.NoError(t, s.Publish(pubstring("Aggamon"))) + sub.mustReceive(ctx, pubstring("Aggamon")) + }) +} + +func TestObserver(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewNopLogger() + + s := newTestServer(ctx, t, logger) + + done := make(chan struct{}) + var got interface{} + require.NoError(t, s.Observe(ctx, func(msg pubsub.Message) error { + defer close(done) + got = msg.Data() + return nil + })) + + const input = pubstring("Lions and tigers and bears, oh my!") + require.NoError(t, s.Publish(input)) + <-done + require.Equal(t, got, input) +} + +func TestObserverErrors(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + s := newTestServer(ctx, t, logger) + + require.Error(t, s.Observe(ctx, nil, query.All)) + require.NoError(t, s.Observe(ctx, func(pubsub.Message) error { return nil })) + require.Error(t, s.Observe(ctx, func(pubsub.Message) error { return nil }, query.All)) +} + +func TestPublishDoesNotBlock(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + s := newTestServer(ctx, t, logger) + + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.All, + })) + published := make(chan struct{}) + go func() { + defer close(published) + + require.NoError(t, s.Publish(pubstring("Quicksilver"))) + require.NoError(t, s.Publish(pubstring("Asylum"))) + require.NoError(t, s.Publish(pubstring("Ivan"))) + }() + + select { + case <-published: + sub.mustReceive(ctx, pubstring("Quicksilver")) + sub.mustFail(ctx, pubsub.ErrTerminated) + case <-time.After(3 * time.Second): + t.Fatal("Publishing should not have blocked") + } +} + +func TestSubscribeErrors(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + s := newTestServer(ctx, t, logger) + + t.Run("NegativeLimitErr", func(t *testing.T) { + _, err := s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.All, + Limit: -5, + }) + require.Error(t, err) + }) +} + +func TestSlowSubscriber(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + s := newTestServer(ctx, t, logger) + + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.All, + })) + + require.NoError(t, s.Publish(pubstring("Fat Cobra"))) + require.NoError(t, s.Publish(pubstring("Viper"))) + require.NoError(t, s.Publish(pubstring("Black Panther"))) + + // We had capacity for one item, so we should get that item, but after that + // the subscription should have been terminated by the publisher. + sub.mustReceive(ctx, pubstring("Fat Cobra")) + sub.mustFail(ctx, pubsub.ErrTerminated) +} + +func TestDifferentClients(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + s := newTestServer(ctx, t, logger) + + sub1 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "client-1", + Query: query.MustCompile(`tm.events.type='NewBlock'`), + })) + + events := []abci.Event{{ + Type: "tm.events", + Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, + }} + + require.NoError(t, s.PublishWithEvents(pubstring("Iceman"), events)) + sub1.mustReceive(ctx, pubstring("Iceman")) + + sub2 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "client-2", + Query: query.MustCompile(`tm.events.type='NewBlock' AND abci.account.name='Igor'`), + })) + + events = []abci.Event{ + { + Type: "tm.events", + Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, + }, + { + Type: "abci.account", + Attributes: []abci.EventAttribute{{Key: "name", Value: "Igor"}}, + }, + } + + require.NoError(t, s.PublishWithEvents(pubstring("Ultimo"), events)) + sub1.mustReceive(ctx, pubstring("Ultimo")) + sub2.mustReceive(ctx, pubstring("Ultimo")) + + sub3 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "client-3", + Query: query.MustCompile( + `tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10`), + })) + + events = []abci.Event{{ + Type: "tm.events", + Attributes: []abci.EventAttribute{{Key: "type", Value: "NewRoundStep"}}, + }} + + require.NoError(t, s.PublishWithEvents(pubstring("Valeria Richards"), events)) + sub3.mustTimeOut(ctx, 100*time.Millisecond) +} + +func TestSubscribeDuplicateKeys(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + s := newTestServer(ctx, t, logger) + + testCases := []struct { + query string + expected types.EventData + }{ + {`withdraw.rewards='17'`, pubstring("Iceman")}, + {`withdraw.rewards='22'`, pubstring("Iceman")}, + {`withdraw.rewards='1' AND withdraw.rewards='22'`, pubstring("Iceman")}, + {`withdraw.rewards='100'`, nil}, + } + + for i, tc := range testCases { + id := fmt.Sprintf("client-%d", i) + q := query.MustCompile(tc.query) + t.Run(id, func(t *testing.T) { + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: id, + Query: q, + })) + + events := []abci.Event{ + { + Type: "transfer", + Attributes: []abci.EventAttribute{ + {Key: "sender", Value: "foo"}, + {Key: "sender", Value: "bar"}, + {Key: "sender", Value: "baz"}, + }, + }, + { + Type: "withdraw", + Attributes: []abci.EventAttribute{ + {Key: "rewards", Value: "1"}, + {Key: "rewards", Value: "17"}, + {Key: "rewards", Value: "22"}, + }, + }, + } + + require.NoError(t, s.PublishWithEvents(pubstring("Iceman"), events)) + + if tc.expected != nil { + sub.mustReceive(ctx, tc.expected) + } else { + sub.mustTimeOut(ctx, 100*time.Millisecond) + } + }) + } +} + +func TestClientSubscribesTwice(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + s := newTestServer(ctx, t, logger) + + q := query.MustCompile(`tm.events.type='NewBlock'`) + events := []abci.Event{{ + Type: "tm.events", + Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, + }} + + sub1 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: q, + })) + + require.NoError(t, s.PublishWithEvents(pubstring("Goblin Queen"), events)) + sub1.mustReceive(ctx, pubstring("Goblin Queen")) + + // Subscribing a second time with the same client ID and query fails. + { + sub2, err := s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: q, + }) + require.Error(t, err) + require.Nil(t, sub2) + } + + // The attempt to re-subscribe does not disrupt the existing sub. + require.NoError(t, s.PublishWithEvents(pubstring("Spider-Man"), events)) + sub1.mustReceive(ctx, pubstring("Spider-Man")) +} + +func TestUnsubscribe(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + s := newTestServer(ctx, t, logger) + + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.MustCompile(`tm.events.type='NewBlock'`), + })) + + // Removing the subscription we just made should succeed. + require.NoError(t, s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ + Subscriber: clientID, + Query: query.MustCompile(`tm.events.type='NewBlock'`), + })) + + // Publishing should still work. + require.NoError(t, s.Publish(pubstring("Nick Fury"))) + + // The unsubscribed subscriber should report as such. + sub.mustFail(ctx, pubsub.ErrUnsubscribed) +} + +func TestClientUnsubscribesTwice(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + s := newTestServer(ctx, t, logger) + + newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.MustCompile(`tm.events.type='NewBlock'`), + })) + require.NoError(t, s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ + Subscriber: clientID, + Query: query.MustCompile(`tm.events.type='NewBlock'`), + })) + require.ErrorIs(t, s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ + Subscriber: clientID, + Query: query.MustCompile(`tm.events.type='NewBlock'`), + }), pubsub.ErrSubscriptionNotFound) + require.ErrorIs(t, s.UnsubscribeAll(ctx, clientID), pubsub.ErrSubscriptionNotFound) +} + +func TestResubscribe(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + s := newTestServer(ctx, t, logger) + + args := pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.All, + } + newTestSub(t).must(s.SubscribeWithArgs(ctx, args)) + + require.NoError(t, s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ + Subscriber: clientID, + Query: query.All, + })) + + sub := newTestSub(t).must(s.SubscribeWithArgs(ctx, args)) + + require.NoError(t, s.Publish(pubstring("Cable"))) + sub.mustReceive(ctx, pubstring("Cable")) +} + +func TestUnsubscribeAll(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + s := newTestServer(ctx, t, logger) + + sub1 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.MustCompile(`tm.events.type='NewBlock'`), + })) + sub2 := newTestSub(t).must(s.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: clientID, + Query: query.MustCompile(`tm.events.type='NewBlockHeader'`), + })) + + require.NoError(t, s.UnsubscribeAll(ctx, clientID)) + require.NoError(t, s.Publish(pubstring("Nick Fury"))) + + sub1.mustFail(ctx, pubsub.ErrUnsubscribed) + sub2.mustFail(ctx, pubsub.ErrUnsubscribed) + +} + +func TestBufferCapacity(t *testing.T) { + logger := log.NewNopLogger() + s := pubsub.NewServer(logger, pubsub.BufferCapacity(2)) + + require.Equal(t, 2, s.BufferCapacity()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + require.NoError(t, s.Publish(pubstring("Nighthawk"))) + require.NoError(t, s.Publish(pubstring("Sage"))) + + ctx, cancel = context.WithTimeout(ctx, 100*time.Millisecond) + defer cancel() + + sig := make(chan struct{}) + + go func() { defer close(sig); _ = s.Publish(pubstring("Ironclad")) }() + + select { + case <-sig: + t.Fatal("should not fire") + + case <-ctx.Done(): + return + } + +} + +func newTestServer(ctx context.Context, t testing.TB, logger log.Logger) *pubsub.Server { + t.Helper() + + s := pubsub.NewServer(logger) + + require.NoError(t, s.Start(ctx)) + t.Cleanup(s.Wait) + return s +} + +type testSub struct { + t testing.TB + *pubsub.Subscription +} + +func newTestSub(t testing.TB) *testSub { return &testSub{t: t} } + +func (s *testSub) must(sub *pubsub.Subscription, err error) *testSub { + s.t.Helper() + require.NoError(s.t, err) + require.NotNil(s.t, sub) + s.Subscription = sub + return s +} + +func (s *testSub) mustReceive(ctx context.Context, want types.EventData) { + s.t.Helper() + got, err := s.Next(ctx) + require.NoError(s.t, err) + require.Equal(s.t, want, got.Data()) +} + +func (s *testSub) mustTimeOut(ctx context.Context, dur time.Duration) { + s.t.Helper() + tctx, cancel := context.WithTimeout(ctx, dur) + defer cancel() + got, err := s.Next(tctx) + if !errors.Is(err, context.DeadlineExceeded) { + s.t.Errorf("Next: got (%+v, %v), want %v", got, err, context.DeadlineExceeded) + } +} + +func (s *testSub) mustFail(ctx context.Context, want error) { + s.t.Helper() + got, err := s.Next(ctx) + if err == nil && want != nil { + s.t.Fatalf("Next: got (%+v, %v), want error %v", got, err, want) + } + require.ErrorIs(s.t, err, want) +} diff --git a/libs/pubsub/query/bench_test.go b/internal/pubsub/query/bench_test.go similarity index 85% rename from libs/pubsub/query/bench_test.go rename to internal/pubsub/query/bench_test.go index 894c16628a..0916e9c8af 100644 --- a/libs/pubsub/query/bench_test.go +++ b/internal/pubsub/query/bench_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query" ) const testQuery = `tm.events.type='NewBlock' AND abci.account.name='Igor'` @@ -48,10 +48,7 @@ func BenchmarkMatchCustom(b *testing.B) { } b.ResetTimer() for i := 0; i < b.N; i++ { - ok, err := q.Matches(testEvents) - if err != nil { - b.Fatal(err) - } else if !ok { + if !q.Matches(testEvents) { b.Error("no match") } } diff --git a/libs/pubsub/query/query.go b/internal/pubsub/query/query.go similarity index 94% rename from libs/pubsub/query/query.go rename to internal/pubsub/query/query.go index e874f037cb..23510a75d2 100644 --- a/libs/pubsub/query/query.go +++ b/internal/pubsub/query/query.go @@ -20,7 +20,7 @@ import ( "time" "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query/syntax" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" ) // All is a query that matches all events. @@ -67,13 +67,18 @@ func Compile(ast syntax.Query) (*Query, error) { return &Query{ast: ast, conds: conds}, nil } -// Matches satisfies part of the pubsub.Query interface. This implementation -// never reports an error. A nil *Query matches all events. -func (q *Query) Matches(events []types.Event) (bool, error) { +// Matches reports whether q matches the given events. If q == nil, the query +// matches any non-empty collection of events. +func (q *Query) Matches(events []types.Event) bool { if q == nil { - return true, nil + return true } - return q.matchesEvents(events), nil + for _, cond := range q.conds { + if !cond.matchesAny(events) { + return false + } + } + return len(events) != 0 } // String matches part of the pubsub.Query interface. @@ -92,16 +97,6 @@ func (q *Query) Syntax() syntax.Query { return q.ast } -// matchesEvents reports whether all the conditions match the given events. -func (q *Query) matchesEvents(events []types.Event) bool { - for _, cond := range q.conds { - if !cond.matchesAny(events) { - return false - } - } - return len(events) != 0 -} - // A condition is a compiled match condition. A condition matches an event if // the event has the designated type, contains an attribute with the given // name, and the match function returns true for the attribute value. diff --git a/libs/pubsub/query/query_test.go b/internal/pubsub/query/query_test.go similarity index 94% rename from libs/pubsub/query/query_test.go rename to internal/pubsub/query/query_test.go index b0d1fb7fec..fc5fd82f00 100644 --- a/libs/pubsub/query/query_test.go +++ b/internal/pubsub/query/query_test.go @@ -7,13 +7,10 @@ import ( "time" "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/libs/pubsub/query/syntax" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" ) -var _ pubsub.Query = (*query.Query)(nil) - // Example events from the OpenAPI documentation: // https://github.com/tendermint/tendermint/blob/master/rpc/openapi/openapi.yaml // @@ -210,11 +207,7 @@ func TestCompiledMatches(t *testing.T) { t.Fatalf("NewCompiled %#q: unexpected error: %v", tc.s, err) } - got, err := c.Matches(tc.events) - if err != nil { - t.Errorf("Query: %#q\nInput: %+v\nMatches: got error %v", - tc.s, tc.events, err) - } + got := c.Matches(tc.events) if got != tc.matches { t.Errorf("Query: %#q\nInput: %+v\nMatches: got %v, want %v", tc.s, tc.events, got, tc.matches) @@ -231,10 +224,7 @@ func TestAllMatchesAll(t *testing.T) { `Rilly|Blue=`, ) for i := 0; i < len(events); i++ { - match, err := query.All.Matches(events[:i]) - if err != nil { - t.Errorf("Matches failed: %v", err) - } else if !match { + if !query.All.Matches(events[:i]) { t.Errorf("Did not match on %+v ", events[:i]) } } diff --git a/libs/pubsub/query/syntax/doc.go b/internal/pubsub/query/syntax/doc.go similarity index 100% rename from libs/pubsub/query/syntax/doc.go rename to internal/pubsub/query/syntax/doc.go diff --git a/libs/pubsub/query/syntax/parser.go b/internal/pubsub/query/syntax/parser.go similarity index 100% rename from libs/pubsub/query/syntax/parser.go rename to internal/pubsub/query/syntax/parser.go diff --git a/libs/pubsub/query/syntax/scanner.go b/internal/pubsub/query/syntax/scanner.go similarity index 100% rename from libs/pubsub/query/syntax/scanner.go rename to internal/pubsub/query/syntax/scanner.go diff --git a/libs/pubsub/query/syntax/syntax_test.go b/internal/pubsub/query/syntax/syntax_test.go similarity index 98% rename from libs/pubsub/query/syntax/syntax_test.go rename to internal/pubsub/query/syntax/syntax_test.go index ac95fd8b17..ac0473beb1 100644 --- a/libs/pubsub/query/syntax/syntax_test.go +++ b/internal/pubsub/query/syntax/syntax_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - "github.com/tendermint/tendermint/libs/pubsub/query/syntax" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" ) func TestScanner(t *testing.T) { diff --git a/internal/pubsub/subindex.go b/internal/pubsub/subindex.go new file mode 100644 index 0000000000..eadb193af2 --- /dev/null +++ b/internal/pubsub/subindex.go @@ -0,0 +1,117 @@ +package pubsub + +import ( + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/types" +) + +// An item to be published to subscribers. +type item struct { + Data types.EventData + Events []abci.Event +} + +// A subInfo value records a single subscription. +type subInfo struct { + clientID string // chosen by the client + query *query.Query // chosen by the client + subID string // assigned at registration + sub *Subscription // receives published events +} + +// A subInfoSet is an unordered set of subscription info records. +type subInfoSet map[*subInfo]struct{} + +func (s subInfoSet) contains(si *subInfo) bool { _, ok := s[si]; return ok } +func (s subInfoSet) add(si *subInfo) { s[si] = struct{}{} } +func (s subInfoSet) remove(si *subInfo) { delete(s, si) } + +// withQuery returns the subset of s whose query string matches qs. +func (s subInfoSet) withQuery(qs string) subInfoSet { + out := make(subInfoSet) + for si := range s { + if si.query.String() == qs { + out.add(si) + } + } + return out +} + +// A subIndex is an indexed collection of subscription info records. +// The index is not safe for concurrent use without external synchronization. +type subIndex struct { + all subInfoSet // all subscriptions + byClient map[string]subInfoSet // per-client subscriptions + byQuery map[string]subInfoSet // per-query subscriptions + + // TODO(creachadair): We allow indexing by query to support existing use by + // the RPC service methods for event streaming. Fix up those methods not to + // require this, and then remove indexing by query. +} + +// newSubIndex constructs a new, empty subscription index. +func newSubIndex() *subIndex { + return &subIndex{ + all: make(subInfoSet), + byClient: make(map[string]subInfoSet), + byQuery: make(map[string]subInfoSet), + } +} + +// findClients returns the set of subscriptions for the given client ID, or nil. +func (idx *subIndex) findClientID(id string) subInfoSet { return idx.byClient[id] } + +// findQuery returns the set of subscriptions on the given query string, or nil. +func (idx *subIndex) findQuery(qs string) subInfoSet { return idx.byQuery[qs] } + +// contains reports whether idx contains any subscription matching the given +// client ID and query pair. +func (idx *subIndex) contains(clientID, query string) bool { + csubs, qsubs := idx.byClient[clientID], idx.byQuery[query] + if len(csubs) == 0 || len(qsubs) == 0 { + return false + } + for si := range csubs { + if qsubs.contains(si) { + return true + } + } + return false +} + +// add adds si to the index, replacing any previous entry with the same terms. +// It is the caller's responsibility to check for duplicates before adding. +// See also the contains method. +func (idx *subIndex) add(si *subInfo) { + idx.all.add(si) + if m := idx.byClient[si.clientID]; m == nil { + idx.byClient[si.clientID] = subInfoSet{si: struct{}{}} + } else { + m.add(si) + } + qs := si.query.String() + if m := idx.byQuery[qs]; m == nil { + idx.byQuery[qs] = subInfoSet{si: struct{}{}} + } else { + m.add(si) + } +} + +// removeAll removes all the elements of s from the index. +func (idx *subIndex) removeAll(s subInfoSet) { + for si := range s { + idx.all.remove(si) + idx.byClient[si.clientID].remove(si) + if len(idx.byClient[si.clientID]) == 0 { + delete(idx.byClient, si.clientID) + } + if si.query != nil { + qs := si.query.String() + idx.byQuery[qs].remove(si) + if len(idx.byQuery[qs]) == 0 { + delete(idx.byQuery, qs) + } + } + } +} diff --git a/internal/pubsub/subscription.go b/internal/pubsub/subscription.go new file mode 100644 index 0000000000..fd1a239296 --- /dev/null +++ b/internal/pubsub/subscription.go @@ -0,0 +1,90 @@ +package pubsub + +import ( + "context" + "errors" + + "github.com/google/uuid" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/libs/queue" + "github.com/tendermint/tendermint/types" +) + +var ( + // ErrUnsubscribed is returned by Next when the client has unsubscribed. + ErrUnsubscribed = errors.New("subscription removed by client") + + // ErrTerminated is returned by Next when the subscription was terminated by + // the publisher. + ErrTerminated = errors.New("subscription terminated by publisher") +) + +// A Subscription represents a client subscription for a particular query. +type Subscription struct { + id string + queue *queue.Queue // open until the subscription ends + stopErr error // after queue is closed, the reason why +} + +// newSubscription returns a new subscription with the given queue capacity. +func newSubscription(quota, limit int) (*Subscription, error) { + queue, err := queue.New(queue.Options{ + SoftQuota: quota, + HardLimit: limit, + }) + if err != nil { + return nil, err + } + return &Subscription{ + id: uuid.NewString(), + queue: queue, + }, nil +} + +// Next blocks until a message is available, ctx ends, or the subscription +// ends. Next returns ErrUnsubscribed if s was unsubscribed, ErrTerminated if +// s was terminated by the publisher, or a context error if ctx ended without a +// message being available. +func (s *Subscription) Next(ctx context.Context) (Message, error) { + next, err := s.queue.Wait(ctx) + if errors.Is(err, queue.ErrQueueClosed) { + return Message{}, s.stopErr + } else if err != nil { + return Message{}, err + } + return next.(Message), nil +} + +// ID returns the unique subscription identifier for s. +func (s *Subscription) ID() string { return s.id } + +// publish transmits msg to the subscriber. It reports a queue error if the +// queue cannot accept any further messages. +func (s *Subscription) publish(msg Message) error { return s.queue.Add(msg) } + +// stop terminates the subscription with the given error reason. +func (s *Subscription) stop(err error) { + if err == nil { + panic("nil stop error") + } + s.stopErr = err + s.queue.Close() +} + +// Message glues data and events together. +type Message struct { + subID string + data types.EventData + events []abci.Event +} + +// SubscriptionID returns the unique identifier for the subscription +// that produced this message. +func (msg Message) SubscriptionID() string { return msg.subID } + +// Data returns an original data published. +func (msg Message) Data() types.EventData { return msg.data } + +// Events returns events, which matched the client's query. +func (msg Message) Events() []abci.Event { return msg.events } diff --git a/internal/rpc/core/abci.go b/internal/rpc/core/abci.go index 06c0330509..fa45c6b456 100644 --- a/internal/rpc/core/abci.go +++ b/internal/rpc/core/abci.go @@ -1,27 +1,21 @@ package core import ( + "context" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/proxy" - "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // ABCIQuery queries the application for some information. // More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_query -func (env *Environment) ABCIQuery( - ctx *rpctypes.Context, - path string, - data bytes.HexBytes, - height int64, - prove bool, -) (*coretypes.ResultABCIQuery, error) { - resQuery, err := env.ProxyAppQuery.QuerySync(ctx.Context(), abci.RequestQuery{ - Path: path, - Data: data, - Height: height, - Prove: prove, +func (env *Environment) ABCIQuery(ctx context.Context, req *coretypes.RequestABCIQuery) (*coretypes.ResultABCIQuery, error) { + resQuery, err := env.ProxyApp.Query(ctx, &abci.RequestQuery{ + Path: req.Path, + Data: req.Data, + Height: int64(req.Height), + Prove: req.Prove, }) if err != nil { return nil, err @@ -32,8 +26,8 @@ func (env *Environment) ABCIQuery( // ABCIInfo gets some info about the application. // More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_info -func (env *Environment) ABCIInfo(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) { - resInfo, err := env.ProxyAppQuery.InfoSync(ctx.Context(), proxy.RequestInfo) +func (env *Environment) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { + resInfo, err := env.ProxyApp.Info(ctx, &proxy.RequestInfo) if err != nil { return nil, err } diff --git a/internal/rpc/core/blocks.go b/internal/rpc/core/blocks.go index a32248440f..1b3c212167 100644 --- a/internal/rpc/core/blocks.go +++ b/internal/rpc/core/blocks.go @@ -1,16 +1,15 @@ package core import ( + "context" "fmt" "sort" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -24,19 +23,15 @@ import ( // order (highest first). // // More: https://docs.tendermint.com/master/rpc/#/Info/blockchain -func (env *Environment) BlockchainInfo( - ctx *rpctypes.Context, - minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { - - const limit int64 = 20 - - var err error - minHeight, maxHeight, err = filterMinMax( +func (env *Environment) BlockchainInfo(ctx context.Context, req *coretypes.RequestBlockchainInfo) (*coretypes.ResultBlockchainInfo, error) { + const limit = 20 + minHeight, maxHeight, err := filterMinMax( env.BlockStore.Base(), env.BlockStore.Height(), - minHeight, - maxHeight, - limit) + int64(req.MinHeight), + int64(req.MaxHeight), + limit, + ) if err != nil { return nil, err } @@ -93,8 +88,8 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { // Block gets block at a given height. // If no height is provided, it will fetch the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/block -func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultBlock, error) { - height, err := env.getHeight(env.BlockStore.Height(), heightPtr) +func (env *Environment) Block(ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultBlock, error) { + height, err := env.getHeight(env.BlockStore.Height(), (*int64)(req.Height)) if err != nil { return nil, err } @@ -110,12 +105,8 @@ func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*coretyp // BlockByHash gets block by hash. // More: https://docs.tendermint.com/master/rpc/#/Info/block_by_hash -func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { - // N.B. The hash parameter is HexBytes so that the reflective parameter - // decoding logic in the HTTP service will correctly translate from JSON. - // See https://github.com/tendermint/tendermint/issues/6802 for context. - - block := env.BlockStore.LoadBlockByHash(hash) +func (env *Environment) BlockByHash(ctx context.Context, req *coretypes.RequestBlockByHash) (*coretypes.ResultBlock, error) { + block := env.BlockStore.LoadBlockByHash(req.Hash) if block == nil { return &coretypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil } @@ -127,8 +118,8 @@ func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash bytes.HexBytes) // Header gets block header at a given height. // If no height is provided, it will fetch the latest header. // More: https://docs.tendermint.com/master/rpc/#/Info/header -func (env *Environment) Header(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultHeader, error) { - height, err := env.getHeight(env.BlockStore.Height(), heightPtr) +func (env *Environment) Header(ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultHeader, error) { + height, err := env.getHeight(env.BlockStore.Height(), (*int64)(req.Height)) if err != nil { return nil, err } @@ -143,12 +134,8 @@ func (env *Environment) Header(ctx *rpctypes.Context, heightPtr *int64) (*corety // HeaderByHash gets header by hash. // More: https://docs.tendermint.com/master/rpc/#/Info/header_by_hash -func (env *Environment) HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) { - // N.B. The hash parameter is HexBytes so that the reflective parameter - // decoding logic in the HTTP service will correctly translate from JSON. - // See https://github.com/tendermint/tendermint/issues/6802 for context. - - blockMeta := env.BlockStore.LoadBlockMetaByHash(hash) +func (env *Environment) HeaderByHash(ctx context.Context, req *coretypes.RequestBlockByHash) (*coretypes.ResultHeader, error) { + blockMeta := env.BlockStore.LoadBlockMetaByHash(req.Hash) if blockMeta == nil { return &coretypes.ResultHeader{}, nil } @@ -159,8 +146,8 @@ func (env *Environment) HeaderByHash(ctx *rpctypes.Context, hash bytes.HexBytes) // Commit gets block commit at a given height. // If no height is provided, it will fetch the commit for the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/commit -func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultCommit, error) { - height, err := env.getHeight(env.BlockStore.Height(), heightPtr) +func (env *Environment) Commit(ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultCommit, error) { + height, err := env.getHeight(env.BlockStore.Height(), (*int64)(req.Height)) if err != nil { return nil, err } @@ -194,11 +181,9 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*corety // If no height is provided, it will fetch results for the latest block. // // Results are for the height of the block containing the txs. -// Thus response.results.deliver_tx[5] is the results of executing -// getBlock(h).Txs[5] // More: https://docs.tendermint.com/master/rpc/#/Info/block_results -func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultBlockResults, error) { - height, err := env.getHeight(env.BlockStore.Height(), heightPtr) +func (env *Environment) BlockResults(ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultBlockResults, error) { + height, err := env.getHeight(env.BlockStore.Height(), (*int64)(req.Height)) if err != nil { return nil, err } @@ -209,35 +194,27 @@ func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (* } var totalGasUsed int64 - for _, tx := range results.GetDeliverTxs() { - totalGasUsed += tx.GetGasUsed() + for _, res := range results.FinalizeBlock.GetTxResults() { + totalGasUsed += res.GetGasUsed() } return &coretypes.ResultBlockResults{ Height: height, - TxsResults: results.DeliverTxs, + TxsResults: results.FinalizeBlock.TxResults, TotalGasUsed: totalGasUsed, - BeginBlockEvents: results.BeginBlock.Events, - EndBlockEvents: results.EndBlock.Events, - ValidatorSetUpdate: results.EndBlock.ValidatorSetUpdate, - ConsensusParamUpdates: consensusParamsPtrFromProtoPtr(results.EndBlock.ConsensusParamUpdates), + FinalizeBlockEvents: results.FinalizeBlock.Events, + ValidatorSetUpdate: results.FinalizeBlock.ValidatorSetUpdate, + ConsensusParamUpdates: consensusParamsPtrFromProtoPtr(results.FinalizeBlock.ConsensusParamUpdates), }, nil } -// BlockSearch searches for a paginated set of blocks matching BeginBlock and -// EndBlock event search criteria. -func (env *Environment) BlockSearch( - ctx *rpctypes.Context, - query string, - pagePtr, perPagePtr *int, - orderBy string, -) (*coretypes.ResultBlockSearch, error) { - +// BlockSearch searches for a paginated set of blocks matching the provided query. +func (env *Environment) BlockSearch(ctx context.Context, req *coretypes.RequestBlockSearch) (*coretypes.ResultBlockSearch, error) { if !indexer.KVSinkEnabled(env.EventSinks) { return nil, fmt.Errorf("block searching is disabled due to no kvEventSink") } - q, err := tmquery.New(query) + q, err := tmquery.New(req.Query) if err != nil { return nil, err } @@ -249,13 +226,13 @@ func (env *Environment) BlockSearch( } } - results, err := kvsink.SearchBlockEvents(ctx.Context(), q) + results, err := kvsink.SearchBlockEvents(ctx, q) if err != nil { return nil, err } // sort results (must be done before pagination) - switch orderBy { + switch req.OrderBy { case "desc", "": sort.Slice(results, func(i, j int) bool { return results[i] > results[j] }) @@ -268,9 +245,9 @@ func (env *Environment) BlockSearch( // paginate results totalCount := len(results) - perPage := env.validatePerPage(perPagePtr) + perPage := env.validatePerPage(req.PerPage.IntPtr()) - page, err := validatePage(pagePtr, perPage, totalCount) + page, err := validatePage(req.Page.IntPtr(), perPage, totalCount) if err != nil { return nil, err } diff --git a/internal/rpc/core/blocks_test.go b/internal/rpc/core/blocks_test.go index ac0fe32db9..478ae08a66 100644 --- a/internal/rpc/core/blocks_test.go +++ b/internal/rpc/core/blocks_test.go @@ -1,6 +1,7 @@ package core import ( + "context" "fmt" "testing" @@ -14,7 +15,6 @@ import ( "github.com/tendermint/tendermint/internal/state/mocks" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func TestBlockchainInfo(t *testing.T) { @@ -71,13 +71,13 @@ func TestBlockchainInfo(t *testing.T) { func TestBlockResults(t *testing.T) { results := &tmstate.ABCIResponses{ - DeliverTxs: []*abci.ResponseDeliverTx{ - {Code: 0, Data: []byte{0x01}, Log: "ok", GasUsed: 10}, - {Code: 0, Data: []byte{0x02}, Log: "ok", GasUsed: 5}, - {Code: 1, Log: "not ok", GasUsed: 0}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + TxResults: []*abci.ExecTxResult{ + {Code: 0, Data: []byte{0x01}, Log: "ok", GasUsed: 10}, + {Code: 0, Data: []byte{0x02}, Log: "ok", GasUsed: 5}, + {Code: 1, Log: "not ok", GasUsed: 0}, + }, }, - EndBlock: &abci.ResponseEndBlock{}, - BeginBlock: &abci.ResponseBeginBlock{}, } env := &Environment{} @@ -99,17 +99,19 @@ func TestBlockResults(t *testing.T) { {101, true, nil}, {100, false, &coretypes.ResultBlockResults{ Height: 100, - TxsResults: results.DeliverTxs, + TxsResults: results.FinalizeBlock.TxResults, TotalGasUsed: 15, - BeginBlockEvents: results.BeginBlock.Events, - EndBlockEvents: results.EndBlock.Events, - ValidatorSetUpdate: results.EndBlock.ValidatorSetUpdate, - ConsensusParamUpdates: consensusParamsPtrFromProtoPtr(results.EndBlock.ConsensusParamUpdates), + FinalizeBlockEvents: results.FinalizeBlock.Events, + ValidatorSetUpdate: results.FinalizeBlock.ValidatorSetUpdate, + ConsensusParamUpdates: consensusParamsPtrFromProtoPtr(results.FinalizeBlock.ConsensusParamUpdates), }}, } + ctx := context.Background() for _, tc := range testCases { - res, err := env.BlockResults(&rpctypes.Context{}, &tc.height) + res, err := env.BlockResults(ctx, &coretypes.RequestBlockInfo{ + Height: (*coretypes.Int64)(&tc.height), + }) if tc.wantErr { assert.Error(t, err) } else { diff --git a/internal/rpc/core/consensus.go b/internal/rpc/core/consensus.go index c2d93ff1b5..8de2612f7d 100644 --- a/internal/rpc/core/consensus.go +++ b/internal/rpc/core/consensus.go @@ -1,14 +1,11 @@ package core import ( - "errors" + "context" - "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/libs" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) // Validators gets the validator set at the given block height. @@ -18,13 +15,9 @@ import ( // for the validators in the set as used in computing their Merkle root. // // More: https://docs.tendermint.com/master/rpc/#/Info/validators -func (env *Environment) Validators( - ctx *rpctypes.Context, - heightPtr *int64, - pagePtr, perPagePtr *int, requestQuorumInfo *bool) (*coretypes.ResultValidators, error) { - +func (env *Environment) Validators(ctx context.Context, req *coretypes.RequestValidators) (*coretypes.ResultValidators, error) { // The latest validator that we know is the NextValidator of the last block. - height, err := env.getHeight(env.latestUncommittedHeight(), heightPtr) + height, err := env.getHeight(env.latestUncommittedHeight(), (*int64)(req.Height)) if err != nil { return nil, err } @@ -35,8 +28,8 @@ func (env *Environment) Validators( } totalCount := len(validators.Validators) - perPage := env.validatePerPage(perPagePtr) - page, err := validatePage(pagePtr, perPage, totalCount) + perPage := env.validatePerPage(req.PerPage.IntPtr()) + page, err := validatePage(req.Page.IntPtr(), perPage, totalCount) if err != nil { return nil, err } @@ -49,8 +42,9 @@ func (env *Environment) Validators( BlockHeight: height, Validators: v, Count: len(v), - Total: totalCount} - if requestQuorumInfo != nil && libs.BoolValue(requestQuorumInfo) { + Total: totalCount, + } + if libs.BoolValue(req.RequestQuorumInfo) { result.QuorumHash = &validators.QuorumHash result.QuorumType = validators.QuorumType result.ThresholdPublicKey = &validators.ThresholdPublicKey @@ -61,56 +55,32 @@ func (env *Environment) Validators( // DumpConsensusState dumps consensus state. // UNSTABLE // More: https://docs.tendermint.com/master/rpc/#/Info/dump_consensus_state -func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) { +func (env *Environment) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { // Get Peer consensus states. var peerStates []coretypes.PeerStateInfo - switch { - case env.P2PPeers != nil: - peers := env.P2PPeers.Peers().List() - peerStates = make([]coretypes.PeerStateInfo, 0, len(peers)) - for _, peer := range peers { - peerState, ok := peer.Get(types.PeerStateKey).(*consensus.PeerState) - if !ok { // peer does not have a state yet - continue - } - peerStateJSON, err := peerState.ToJSON() - if err != nil { - return nil, err - } + peers := env.PeerManager.Peers() + peerStates = make([]coretypes.PeerStateInfo, 0, len(peers)) + for _, pid := range peers { + peerState, ok := env.ConsensusReactor.GetPeerState(pid) + if !ok { + continue + } + + peerStateJSON, err := peerState.ToJSON() + if err != nil { + return nil, err + } + + addr := env.PeerManager.Addresses(pid) + if len(addr) != 0 { peerStates = append(peerStates, coretypes.PeerStateInfo{ // Peer basic info. - NodeAddress: peer.SocketAddr().String(), + NodeAddress: addr[0].String(), // Peer consensus state. PeerState: peerStateJSON, }) } - case env.PeerManager != nil: - peers := env.PeerManager.Peers() - peerStates = make([]coretypes.PeerStateInfo, 0, len(peers)) - for _, pid := range peers { - peerState, ok := env.ConsensusReactor.GetPeerState(pid) - if !ok { - continue - } - - peerStateJSON, err := peerState.ToJSON() - if err != nil { - return nil, err - } - - addr := env.PeerManager.Addresses(pid) - if len(addr) >= 1 { - peerStates = append(peerStates, coretypes.PeerStateInfo{ - // Peer basic info. - NodeAddress: addr[0].String(), - // Peer consensus state. - PeerState: peerStateJSON, - }) - } - } - default: - return nil, errors.New("no peer system configured") } // Get self round state. @@ -120,13 +90,14 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*coretypes.Re } return &coretypes.ResultDumpConsensusState{ RoundState: roundState, - Peers: peerStates}, nil + Peers: peerStates, + }, nil } // ConsensusState returns a concise summary of the consensus state. // UNSTABLE // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_state -func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) { +func (env *Environment) GetConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { // Get self round state. bz, err := env.ConsensusState.GetRoundStateSimpleJSON() return &coretypes.ResultConsensusState{RoundState: bz}, err @@ -135,13 +106,10 @@ func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*coretypes.Res // ConsensusParams gets the consensus parameters at the given block height. // If no height is provided, it will fetch the latest consensus params. // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_params -func (env *Environment) ConsensusParams( - ctx *rpctypes.Context, - heightPtr *int64) (*coretypes.ResultConsensusParams, error) { - - // The latest consensus params that we know is the consensus params after the - // last block. - height, err := env.getHeight(env.latestUncommittedHeight(), heightPtr) +func (env *Environment) ConsensusParams(ctx context.Context, req *coretypes.RequestConsensusParams) (*coretypes.ResultConsensusParams, error) { + // The latest consensus params that we know is the consensus params after + // the last block. + height, err := env.getHeight(env.latestUncommittedHeight(), (*int64)(req.Height)) if err != nil { return nil, err } @@ -151,6 +119,9 @@ func (env *Environment) ConsensusParams( return nil, err } + consensusParams.Synchrony = consensusParams.Synchrony.SynchronyParamsOrDefaults() + consensusParams.Timeout = consensusParams.Timeout.TimeoutParamsOrDefaults() + return &coretypes.ResultConsensusParams{ BlockHeight: height, ConsensusParams: consensusParams}, nil diff --git a/internal/rpc/core/dev.go b/internal/rpc/core/dev.go index 21c5154ff5..702413ab89 100644 --- a/internal/rpc/core/dev.go +++ b/internal/rpc/core/dev.go @@ -1,12 +1,13 @@ package core import ( + "context" + "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // UnsafeFlushMempool removes all transactions from the mempool. -func (env *Environment) UnsafeFlushMempool(ctx *rpctypes.Context) (*coretypes.ResultUnsafeFlushMempool, error) { +func (env *Environment) UnsafeFlushMempool(ctx context.Context) (*coretypes.ResultUnsafeFlushMempool, error) { env.Mempool.Flush() return &coretypes.ResultUnsafeFlushMempool{}, nil } diff --git a/internal/rpc/core/env.go b/internal/rpc/core/env.go index 61c2466f95..7b7dc3087b 100644 --- a/internal/rpc/core/env.go +++ b/internal/rpc/core/env.go @@ -1,22 +1,34 @@ package core import ( + "context" "encoding/base64" + "encoding/json" "fmt" + "net" + "net/http" "time" + "github.com/rs/cors" + + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/internal/blocksync" "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/eventlog" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/proxy" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + "github.com/tendermint/tendermint/internal/pubsub/query" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/statesync" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/strings" "github.com/tendermint/tendermint/rpc/coretypes" + rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" "github.com/tendermint/tendermint/types" ) @@ -45,25 +57,6 @@ type consensusState interface { GetRoundStateSimpleJSON() ([]byte, error) } -type transport interface { - Listeners() []string - IsListening() bool - NodeInfo() types.NodeInfo -} - -type peers interface { - AddPersistentPeers([]string) error - AddUnconditionalPeerIDs([]string) error - AddPrivatePeerIDs([]string) error - DialPeersAsync([]string) error - Peers() p2p.IPeerSet -} - -type consensusReactor interface { - WaitSync() bool - GetPeerState(peerID types.NodeID) (*consensus.PeerState, bool) -} - type peerManager interface { Peers() []types.NodeID Addresses(types.NodeID) []p2p.NodeAddress @@ -74,19 +67,19 @@ type peerManager interface { // to be setup once during startup. type Environment struct { // external, thread safe interfaces - ProxyAppQuery proxy.AppConnQuery - ProxyAppMempool proxy.AppConnMempool + ProxyApp abciclient.Client // interfaces defined in types and above StateStore sm.Store BlockStore sm.BlockStore EvidencePool sm.EvidencePool ConsensusState consensusState - ConsensusReactor consensusReactor - P2PPeers peers + ConsensusReactor *consensus.Reactor + BlockSyncReactor *blocksync.Reactor - // Legacy p2p stack - P2PTransport transport + IsListening bool + Listeners []string + NodeInfo types.NodeInfo // interfaces for new p2p interfaces PeerManager peerManager @@ -95,9 +88,9 @@ type Environment struct { ProTxHash crypto.ProTxHash GenDoc *types.GenesisDoc // cache the genesis structure EventSinks []indexer.EventSink - EventBus *types.EventBus // thread safe + EventBus *eventbus.EventBus // thread safe + EventLog *eventlog.Log Mempool mempool.Mempool - BlockSyncReactor consensus.BlockSyncReactor StateSyncMetricer statesync.Metricer Logger log.Logger @@ -159,7 +152,7 @@ func (env *Environment) InitGenesisChunks() error { return nil } - data, err := tmjson.Marshal(env.GenDoc) + data, err := json.Marshal(env.GenDoc) if err != nil { return err } @@ -207,9 +200,154 @@ func (env *Environment) getHeight(latestHeight int64, heightPtr *int64) (int64, } func (env *Environment) latestUncommittedHeight() int64 { - nodeIsSyncing := env.ConsensusReactor.WaitSync() - if nodeIsSyncing { - return env.BlockStore.Height() + if env.ConsensusReactor != nil { + // consensus reactor can be nil in inspect mode. + + nodeIsSyncing := env.ConsensusReactor.WaitSync() + if nodeIsSyncing { + return env.BlockStore.Height() + } } return env.BlockStore.Height() + 1 } + +// StartService constructs and starts listeners for the RPC service +// according to the config object, returning an error if the service +// cannot be constructed or started. The listeners, which provide +// access to the service, run until the context is canceled. +func (env *Environment) StartService(ctx context.Context, conf *config.Config) ([]net.Listener, error) { + if err := env.InitGenesisChunks(); err != nil { + return nil, err + } + + env.Listeners = []string{ + fmt.Sprintf("Listener(@%v)", conf.P2P.ExternalAddress), + } + + listenAddrs := strings.SplitAndTrimEmpty(conf.RPC.ListenAddress, ",", " ") + routes := NewRoutesMap(env, &RouteOptions{ + Unsafe: conf.RPC.Unsafe, + }) + + cfg := rpcserver.DefaultConfig() + cfg.MaxBodyBytes = conf.RPC.MaxBodyBytes + cfg.MaxHeaderBytes = conf.RPC.MaxHeaderBytes + cfg.MaxOpenConnections = conf.RPC.MaxOpenConnections + // If necessary adjust global WriteTimeout to ensure it's greater than + // TimeoutBroadcastTxCommit. + // See https://github.com/tendermint/tendermint/issues/3435 + if cfg.WriteTimeout <= conf.RPC.TimeoutBroadcastTxCommit { + cfg.WriteTimeout = conf.RPC.TimeoutBroadcastTxCommit + 1*time.Second + } + + // If the event log is enabled, subscribe to all events published to the + // event bus, and forward them to the event log. + if lg := env.EventLog; lg != nil { + // TODO(creachadair): This is kind of a hack, ideally we'd share the + // observer with the indexer, but it's tricky to plumb them together. + // For now, use a "normal" subscription with a big buffer allowance. + // The event log should always be able to keep up. + const subscriberID = "event-log-subscriber" + sub, err := env.EventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{ + ClientID: subscriberID, + Query: query.All, + Limit: 1 << 16, // essentially "no limit" + }) + if err != nil { + return nil, fmt.Errorf("event log subscribe: %w", err) + } + go func() { + // N.B. Use background for unsubscribe, ctx is already terminated. + defer env.EventBus.UnsubscribeAll(context.Background(), subscriberID) // nolint:errcheck + for { + msg, err := sub.Next(ctx) + if err != nil { + env.Logger.Error("Subscription terminated", "err", err) + return + } + etype, ok := eventlog.FindType(msg.Events()) + if ok { + _ = lg.Add(etype, msg.Data()) + } + } + }() + + env.Logger.Info("Event log subscription enabled") + } + + // We may expose the RPC over both TCP and a Unix-domain socket. + listeners := make([]net.Listener, len(listenAddrs)) + for i, listenAddr := range listenAddrs { + mux := http.NewServeMux() + rpcLogger := env.Logger.With("module", "rpc-server") + rpcserver.RegisterRPCFuncs(mux, routes, rpcLogger) + + if conf.RPC.ExperimentalDisableWebsocket { + rpcLogger.Info("Disabling websocket endpoints (experimental-disable-websocket=true)") + } else { + rpcLogger.Info("WARNING: Websocket RPC access is deprecated and will be removed " + + "in Tendermint v0.37. See https://tinyurl.com/adr075 for more information.") + wmLogger := rpcLogger.With("protocol", "websocket") + wm := rpcserver.NewWebsocketManager(wmLogger, routes, + rpcserver.OnDisconnect(func(remoteAddr string) { + err := env.EventBus.UnsubscribeAll(context.Background(), remoteAddr) + if err != nil && err != tmpubsub.ErrSubscriptionNotFound { + wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) + } + }), + rpcserver.ReadLimit(cfg.MaxBodyBytes), + ) + mux.HandleFunc("/websocket", wm.WebsocketHandler) + } + + listener, err := rpcserver.Listen( + listenAddr, + cfg.MaxOpenConnections, + ) + if err != nil { + return nil, err + } + + var rootHandler http.Handler = mux + if conf.RPC.IsCorsEnabled() { + corsMiddleware := cors.New(cors.Options{ + AllowedOrigins: conf.RPC.CORSAllowedOrigins, + AllowedMethods: conf.RPC.CORSAllowedMethods, + AllowedHeaders: conf.RPC.CORSAllowedHeaders, + }) + rootHandler = corsMiddleware.Handler(mux) + } + if conf.RPC.IsTLSEnabled() { + go func() { + if err := rpcserver.ServeTLS( + ctx, + listener, + rootHandler, + conf.RPC.CertFile(), + conf.RPC.KeyFile(), + rpcLogger, + cfg, + ); err != nil { + env.Logger.Error("error serving server with TLS", "err", err) + } + }() + } else { + go func() { + if err := rpcserver.Serve( + ctx, + listener, + rootHandler, + rpcLogger, + cfg, + ); err != nil { + env.Logger.Error("error serving server", "err", err) + } + }() + } + + listeners[i] = listener + } + + return listeners, nil + +} diff --git a/internal/rpc/core/events.go b/internal/rpc/core/events.go index a65e0146db..3f289bfa70 100644 --- a/internal/rpc/core/events.go +++ b/internal/rpc/core/events.go @@ -6,13 +6,19 @@ import ( "fmt" "time" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/eventlog" + "github.com/tendermint/tendermint/internal/eventlog/cursor" + "github.com/tendermint/tendermint/internal/jsontypes" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) const ( + // Buffer on the Tendermint (server) side to allow some slowness in clients. + subBufferSize = 100 + // maxQueryLength is the maximum length of a query string that will be // accepted. This is just a safety check to avoid outlandish queries. maxQueryLength = 512 @@ -20,81 +26,74 @@ const ( // Subscribe for events via WebSocket. // More: https://docs.tendermint.com/master/rpc/#/Websocket/subscribe -func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*coretypes.ResultSubscribe, error) { - addr := ctx.RemoteAddr() +func (env *Environment) Subscribe(ctx context.Context, req *coretypes.RequestSubscribe) (*coretypes.ResultSubscribe, error) { + callInfo := rpctypes.GetCallInfo(ctx) + addr := callInfo.RemoteAddr() if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { return nil, fmt.Errorf("max_subscription_clients %d reached", env.Config.MaxSubscriptionClients) } else if env.EventBus.NumClientSubscriptions(addr) >= env.Config.MaxSubscriptionsPerClient { return nil, fmt.Errorf("max_subscriptions_per_client %d reached", env.Config.MaxSubscriptionsPerClient) - } else if len(query) > maxQueryLength { + } else if len(req.Query) > maxQueryLength { return nil, errors.New("maximum query length exceeded") } - env.Logger.Info("Subscribe to query", "remote", addr, "query", query) + env.Logger.Info("WARNING: Websocket subscriptions are deprecated and will be removed " + + "in Tendermint v0.37. See https://tinyurl.com/adr075 for more information.") + env.Logger.Info("Subscribe to query", "remote", addr, "query", req.Query) - q, err := tmquery.New(query) + q, err := tmquery.New(req.Query) if err != nil { return nil, fmt.Errorf("failed to parse query: %w", err) } - subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) + subCtx, cancel := context.WithTimeout(ctx, SubscribeTimeout) defer cancel() - sub, err := env.EventBus.Subscribe(subCtx, addr, q, env.Config.SubscriptionBufferSize) + sub, err := env.EventBus.SubscribeWithArgs(subCtx, tmpubsub.SubscribeArgs{ + ClientID: addr, + Query: q, + Limit: subBufferSize, + }) if err != nil { return nil, err } - closeIfSlow := env.Config.CloseOnSlowClient - // Capture the current ID, since it can change in the future. - subscriptionID := ctx.JSONReq.ID + subscriptionID := callInfo.RPCRequest.ID go func() { + opctx, opcancel := context.WithCancel(context.TODO()) + defer opcancel() + for { - select { - case msg := <-sub.Out(): - var ( - resultEvent = &coretypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()} - resp = rpctypes.NewRPCSuccessResponse(subscriptionID, resultEvent) - ) - writeCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - if err := ctx.WSConn.WriteRPCResponse(writeCtx, resp); err != nil { - env.Logger.Info("Can't write response (slow client)", + msg, err := sub.Next(opctx) + if errors.Is(err, tmpubsub.ErrUnsubscribed) { + // The subscription was removed by the client. + return + } else if errors.Is(err, tmpubsub.ErrTerminated) { + // The subscription was terminated by the publisher. + resp := callInfo.RPCRequest.MakeError(err) + ok := callInfo.WSConn.TryWriteRPCResponse(opctx, resp) + if !ok { + env.Logger.Info("Unable to write response (slow client)", "to", addr, "subscriptionID", subscriptionID, "err", err) - - if closeIfSlow { - var ( - err = errors.New("subscription was canceled (reason: slow client)") - resp = rpctypes.RPCServerError(subscriptionID, err) - ) - if !ctx.WSConn.TryWriteRPCResponse(resp) { - env.Logger.Info("Can't write response (slow client)", - "to", addr, "subscriptionID", subscriptionID, "err", err) - } - return - } - } - case <-sub.Canceled(): - if sub.Err() != tmpubsub.ErrUnsubscribed { - var reason string - if sub.Err() == nil { - reason = "Tendermint exited" - } else { - reason = sub.Err().Error() - } - var ( - err = fmt.Errorf("subscription was canceled (reason: %s)", reason) - resp = rpctypes.RPCServerError(subscriptionID, err) - ) - if ok := ctx.WSConn.TryWriteRPCResponse(resp); !ok { - env.Logger.Info("Can't write response (slow client)", - "to", addr, "subscriptionID", subscriptionID, "err", err) - } } return } + + // We have a message to deliver to the client. + resp := callInfo.RPCRequest.MakeResponse(&coretypes.ResultEvent{ + Query: req.Query, + Data: msg.Data(), + Events: msg.Events(), + }) + wctx, cancel := context.WithTimeout(opctx, 10*time.Second) + err = callInfo.WSConn.WriteRPCResponse(wctx, resp) + cancel() + if err != nil { + env.Logger.Info("Unable to write response (slow client)", + "to", addr, "subscriptionID", subscriptionID, "err", err) + } } }() @@ -103,18 +102,18 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*coretyp // Unsubscribe from events via WebSocket. // More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe -func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*coretypes.ResultUnsubscribe, error) { - args := tmpubsub.UnsubscribeArgs{Subscriber: ctx.RemoteAddr()} - env.Logger.Info("Unsubscribe from query", "remote", args.Subscriber, "subscription", query) +func (env *Environment) Unsubscribe(ctx context.Context, req *coretypes.RequestUnsubscribe) (*coretypes.ResultUnsubscribe, error) { + args := tmpubsub.UnsubscribeArgs{Subscriber: rpctypes.GetCallInfo(ctx).RemoteAddr()} + env.Logger.Info("Unsubscribe from query", "remote", args.Subscriber, "subscription", req.Query) var err error - args.Query, err = tmquery.New(query) + args.Query, err = tmquery.New(req.Query) if err != nil { - args.ID = query + args.ID = req.Query } - err = env.EventBus.Unsubscribe(ctx.Context(), args) + err = env.EventBus.Unsubscribe(ctx, args) if err != nil { return nil, err } @@ -123,12 +122,149 @@ func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*coret // UnsubscribeAll from all events via WebSocket. // More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe_all -func (env *Environment) UnsubscribeAll(ctx *rpctypes.Context) (*coretypes.ResultUnsubscribe, error) { - addr := ctx.RemoteAddr() +func (env *Environment) UnsubscribeAll(ctx context.Context) (*coretypes.ResultUnsubscribe, error) { + addr := rpctypes.GetCallInfo(ctx).RemoteAddr() env.Logger.Info("Unsubscribe from all", "remote", addr) - err := env.EventBus.UnsubscribeAll(ctx.Context(), addr) + err := env.EventBus.UnsubscribeAll(ctx, addr) if err != nil { return nil, err } return &coretypes.ResultUnsubscribe{}, nil } + +// Events applies a query to the event log. If an event log is not enabled, +// Events reports an error. Otherwise, it filters the current contents of the +// log to return matching events. +// +// Events returns up to maxItems of the newest eligible event items. An item is +// eligible if it is older than before (or before is zero), it is newer than +// after (or after is zero), and its data matches the filter. A nil filter +// matches all event data. +// +// If before is zero and no eligible event items are available, Events waits +// for up to waitTime for a matching item to become available. The wait is +// terminated early if ctx ends. +// +// If maxItems ≤ 0, a default positive number of events is chosen. The values +// of maxItems and waitTime may be capped to sensible internal maxima without +// reporting an error to the caller. +func (env *Environment) Events(ctx context.Context, req *coretypes.RequestEvents) (*coretypes.ResultEvents, error) { + if env.EventLog == nil { + return nil, errors.New("the event log is not enabled") + } + + // Parse and validate parameters. + maxItems := req.MaxItems + if maxItems <= 0 { + maxItems = 10 + } else if maxItems > 100 { + maxItems = 100 + } + + const minWaitTime = 1 * time.Second + const maxWaitTime = 30 * time.Second + + waitTime := req.WaitTime + if waitTime < minWaitTime { + waitTime = minWaitTime + } else if waitTime > maxWaitTime { + waitTime = maxWaitTime + } + + query := tmquery.All + if req.Filter != nil && req.Filter.Query != "" { + q, err := tmquery.New(req.Filter.Query) + if err != nil { + return nil, fmt.Errorf("invalid filter query: %w", err) + } + query = q + } + + var before, after cursor.Cursor + if err := before.UnmarshalText([]byte(req.Before)); err != nil { + return nil, fmt.Errorf("invalid cursor %q: %w", req.Before, err) + } + if err := after.UnmarshalText([]byte(req.After)); err != nil { + return nil, fmt.Errorf("invalid cursor %q: %w", req.After, err) + } + + var info eventlog.Info + var items []*eventlog.Item + var err error + accept := func(itm *eventlog.Item) error { + // N.B. We accept up to one item more than requested, so we can tell how + // to set the "more" flag in the response. + if len(items) > maxItems || itm.Cursor.Before(after) { + return eventlog.ErrStopScan + } + if cursorInRange(itm.Cursor, before, after) && query.Matches(itm.Events) { + items = append(items, itm) + } + return nil + } + + if before.IsZero() { + ctx, cancel := context.WithTimeout(ctx, waitTime) + defer cancel() + + // Long poll. The loop here is because new items may not match the query, + // and we want to keep waiting until we have relevant results (or time out). + cur := after + for len(items) == 0 { + info, err = env.EventLog.WaitScan(ctx, cur, accept) + if err != nil { + // Don't report a timeout as a request failure. + if errors.Is(err, context.DeadlineExceeded) { + err = nil + } + break + } + cur = info.Newest + } + } else { + // Quick poll, return only what is already available. + info, err = env.EventLog.Scan(accept) + } + if err != nil { + return nil, err + } + + more := len(items) > maxItems + if more { + items = items[:len(items)-1] + } + enc, err := marshalItems(items) + if err != nil { + return nil, err + } + return &coretypes.ResultEvents{ + Items: enc, + More: more, + Oldest: cursorString(info.Oldest), + Newest: cursorString(info.Newest), + }, nil +} + +func cursorString(c cursor.Cursor) string { + if c.IsZero() { + return "" + } + return c.String() +} + +func cursorInRange(c, before, after cursor.Cursor) bool { + return (before.IsZero() || c.Before(before)) && (after.IsZero() || after.Before(c)) +} + +func marshalItems(items []*eventlog.Item) ([]*coretypes.EventItem, error) { + out := make([]*coretypes.EventItem, len(items)) + for i, itm := range items { + v, err := jsontypes.Marshal(itm.Data) + if err != nil { + return nil, fmt.Errorf("encoding event data: %w", err) + } + out[i] = &coretypes.EventItem{Cursor: itm.Cursor.String(), Event: itm.Type} + out[i].Data = v + } + return out, nil +} diff --git a/internal/rpc/core/evidence.go b/internal/rpc/core/evidence.go index a7641b99d4..5de93d2c2b 100644 --- a/internal/rpc/core/evidence.go +++ b/internal/rpc/core/evidence.go @@ -1,29 +1,23 @@ package core import ( + "context" "fmt" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) // BroadcastEvidence broadcasts evidence of the misbehavior. // More: https://docs.tendermint.com/master/rpc/#/Evidence/broadcast_evidence -func (env *Environment) BroadcastEvidence( - ctx *rpctypes.Context, - ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { - - if ev == nil { +func (env *Environment) BroadcastEvidence(ctx context.Context, req *coretypes.RequestBroadcastEvidence) (*coretypes.ResultBroadcastEvidence, error) { + if req.Evidence == nil { return nil, fmt.Errorf("%w: no evidence was provided", coretypes.ErrInvalidRequest) } - - if err := ev.ValidateBasic(); err != nil { + if err := req.Evidence.ValidateBasic(); err != nil { return nil, fmt.Errorf("evidence.ValidateBasic failed: %w", err) } - - if err := env.EvidencePool.AddEvidence(ev); err != nil { + if err := env.EvidencePool.AddEvidence(ctx, req.Evidence); err != nil { return nil, fmt.Errorf("failed to add evidence: %w", err) } - return &coretypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil + return &coretypes.ResultBroadcastEvidence{Hash: req.Evidence.Hash()}, nil } diff --git a/internal/rpc/core/health.go b/internal/rpc/core/health.go index fc355c7e70..c55aa58dca 100644 --- a/internal/rpc/core/health.go +++ b/internal/rpc/core/health.go @@ -1,13 +1,14 @@ package core import ( + "context" + "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Health gets node health. Returns empty result (200 OK) on success, no // response - in case of an error. // More: https://docs.tendermint.com/master/rpc/#/Info/health -func (env *Environment) Health(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) { +func (env *Environment) Health(ctx context.Context) (*coretypes.ResultHealth, error) { return &coretypes.ResultHealth{}, nil } diff --git a/internal/rpc/core/mempool.go b/internal/rpc/core/mempool.go index 899d516359..5fc2b9fcf7 100644 --- a/internal/rpc/core/mempool.go +++ b/internal/rpc/core/mempool.go @@ -1,6 +1,7 @@ package core import ( + "context" "errors" "fmt" "math/rand" @@ -9,9 +10,8 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/state/indexer" + tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) //----------------------------------------------------------------------------- @@ -20,127 +20,154 @@ import ( // BroadcastTxAsync returns right away, with no response. Does not wait for // CheckTx nor DeliverTx results. // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async -func (env *Environment) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - err := env.Mempool.CheckTx(ctx.Context(), tx, nil, mempool.TxInfo{}) +func (env *Environment) BroadcastTxAsync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { + err := env.Mempool.CheckTx(ctx, req.Tx, nil, mempool.TxInfo{}) if err != nil { return nil, err } - return &coretypes.ResultBroadcastTx{Hash: tx.Hash()}, nil + return &coretypes.ResultBroadcastTx{Hash: req.Tx.Hash()}, nil } // BroadcastTxSync returns with the response from CheckTx. Does not wait for // DeliverTx result. // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync -func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - resCh := make(chan *abci.Response, 1) +func (env *Environment) BroadcastTxSync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { + resCh := make(chan *abci.ResponseCheckTx, 1) err := env.Mempool.CheckTx( - ctx.Context(), - tx, - func(res *abci.Response) { resCh <- res }, + ctx, + req.Tx, + func(res *abci.ResponseCheckTx) { + select { + case <-ctx.Done(): + case resCh <- res: + } + }, mempool.TxInfo{}, ) if err != nil { return nil, err } - res := <-resCh - r := res.GetCheckTx() - - return &coretypes.ResultBroadcastTx{ - Code: r.Code, - Data: r.Data, - Log: r.Log, - Codespace: r.Codespace, - MempoolError: r.MempoolError, - Hash: tx.Hash(), - }, nil + select { + case <-ctx.Done(): + return nil, fmt.Errorf("broadcast confirmation not received: %w", ctx.Err()) + case r := <-resCh: + return &coretypes.ResultBroadcastTx{ + Code: r.Code, + Data: r.Data, + Log: r.Log, + Codespace: r.Codespace, + MempoolError: r.MempoolError, + Hash: req.Tx.Hash(), + }, nil + } } // BroadcastTxCommit returns with the responses from CheckTx and DeliverTx. // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_commit -func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { - resCh := make(chan *abci.Response, 1) +func (env *Environment) BroadcastTxCommit(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTxCommit, error) { + resCh := make(chan *abci.ResponseCheckTx, 1) err := env.Mempool.CheckTx( - ctx.Context(), - tx, - func(res *abci.Response) { resCh <- res }, + ctx, + req.Tx, + func(res *abci.ResponseCheckTx) { + select { + case <-ctx.Done(): + case resCh <- res: + } + }, mempool.TxInfo{}, ) if err != nil { return nil, err } - r := (<-resCh).GetCheckTx() - if r.Code != abci.CodeTypeOK { - return &coretypes.ResultBroadcastTxCommit{ - CheckTx: *r, - Hash: tx.Hash(), - }, fmt.Errorf("transaction encountered error (%s)", r.MempoolError) - } - - if !indexer.KVSinkEnabled(env.EventSinks) { - return &coretypes.ResultBroadcastTxCommit{ + select { + case <-ctx.Done(): + return nil, fmt.Errorf("broadcast confirmation not received: %w", ctx.Err()) + case r := <-resCh: + if r.Code != abci.CodeTypeOK { + return &coretypes.ResultBroadcastTxCommit{ CheckTx: *r, - Hash: tx.Hash(), - }, - errors.New("cannot wait for commit because kvEventSync is not enabled") - } + Hash: req.Tx.Hash(), + }, fmt.Errorf("transaction encountered error (%s)", r.MempoolError) + } - startAt := time.Now() - timer := time.NewTimer(0) - defer timer.Stop() - - count := 0 - for { - count++ - select { - case <-ctx.Context().Done(): - env.Logger.Error("Error on broadcastTxCommit", - "duration", time.Since(startAt), - "err", err) + if !indexer.KVSinkEnabled(env.EventSinks) { return &coretypes.ResultBroadcastTxCommit{ CheckTx: *r, - Hash: tx.Hash(), - }, fmt.Errorf("timeout waiting for commit of tx %s (%s)", - tx.Hash(), time.Since(startAt)) - case <-timer.C: - txres, err := env.Tx(ctx, tx.Hash(), false) - if err != nil { - jitter := 100*time.Millisecond + time.Duration(rand.Int63n(int64(time.Second))) // nolint: gosec - backoff := 100 * time.Duration(count) * time.Millisecond - timer.Reset(jitter + backoff) - continue - } + Hash: req.Tx.Hash(), + }, + errors.New("cannot confirm transaction because kvEventSink is not enabled") + } - return &coretypes.ResultBroadcastTxCommit{ - CheckTx: *r, - DeliverTx: txres.TxResult, - Hash: tx.Hash(), - Height: txres.Height, - }, nil + startAt := time.Now() + timer := time.NewTimer(0) + defer timer.Stop() + + count := 0 + for { + count++ + select { + case <-ctx.Done(): + env.Logger.Error("error on broadcastTxCommit", + "duration", time.Since(startAt), + "err", err) + return &coretypes.ResultBroadcastTxCommit{ + CheckTx: *r, + Hash: req.Tx.Hash(), + }, fmt.Errorf("timeout waiting for commit of tx %s (%s)", + req.Tx.Hash(), time.Since(startAt)) + case <-timer.C: + txres, err := env.Tx(ctx, &coretypes.RequestTx{ + Hash: req.Tx.Hash(), + Prove: false, + }) + if err != nil { + jitter := 100*time.Millisecond + time.Duration(rand.Int63n(int64(time.Second))) // nolint: gosec + backoff := 100 * time.Duration(count) * time.Millisecond + timer.Reset(jitter + backoff) + continue + } + + return &coretypes.ResultBroadcastTxCommit{ + CheckTx: *r, + TxResult: txres.TxResult, + Hash: req.Tx.Hash(), + Height: txres.Height, + }, nil + } } } } -// UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries) -// including their number. +// UnconfirmedTxs gets unconfirmed transactions from the mempool in order of priority // More: https://docs.tendermint.com/master/rpc/#/Info/unconfirmed_txs -func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*coretypes.ResultUnconfirmedTxs, error) { - // reuse per_page validator - limit := env.validatePerPage(limitPtr) +func (env *Environment) UnconfirmedTxs(ctx context.Context, req *coretypes.RequestUnconfirmedTxs) (*coretypes.ResultUnconfirmedTxs, error) { + totalCount := env.Mempool.Size() + perPage := env.validatePerPage(req.PerPage.IntPtr()) + page, err := validatePage(req.Page.IntPtr(), perPage, totalCount) + if err != nil { + return nil, err + } + + skipCount := validateSkipCount(page, perPage) + + txs := env.Mempool.ReapMaxTxs(skipCount + tmmath.MinInt(perPage, totalCount-skipCount)) + result := txs[skipCount:] - txs := env.Mempool.ReapMaxTxs(limit) return &coretypes.ResultUnconfirmedTxs{ - Count: len(txs), - Total: env.Mempool.Size(), + Count: len(result), + Total: totalCount, TotalBytes: env.Mempool.SizeBytes(), - Txs: txs}, nil + Txs: result, + }, nil } // NumUnconfirmedTxs gets number of unconfirmed transactions. // More: https://docs.tendermint.com/master/rpc/#/Info/num_unconfirmed_txs -func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) { +func (env *Environment) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { return &coretypes.ResultUnconfirmedTxs{ Count: env.Mempool.Size(), Total: env.Mempool.Size(), @@ -150,14 +177,14 @@ func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*coretypes.Res // CheckTx checks the transaction without executing it. The transaction won't // be added to the mempool either. // More: https://docs.tendermint.com/master/rpc/#/Tx/check_tx -func (env *Environment) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { - res, err := env.ProxyAppMempool.CheckTxSync(ctx.Context(), abci.RequestCheckTx{Tx: tx}) +func (env *Environment) CheckTx(ctx context.Context, req *coretypes.RequestCheckTx) (*coretypes.ResultCheckTx, error) { + res, err := env.ProxyApp.CheckTx(ctx, &abci.RequestCheckTx{Tx: req.Tx}) if err != nil { return nil, err } return &coretypes.ResultCheckTx{ResponseCheckTx: *res}, nil } -func (env *Environment) RemoveTx(ctx *rpctypes.Context, txkey types.TxKey) error { - return env.Mempool.RemoveTxByKey(txkey) +func (env *Environment) RemoveTx(ctx context.Context, req *coretypes.RequestRemoveTx) error { + return env.Mempool.RemoveTxByKey(req.TxKey) } diff --git a/internal/rpc/core/net.go b/internal/rpc/core/net.go index 8bcc04dd0c..b18f1e2fc5 100644 --- a/internal/rpc/core/net.go +++ b/internal/rpc/core/net.go @@ -1,122 +1,42 @@ package core import ( + "context" "errors" "fmt" - "strings" - "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // NetInfo returns network info. // More: https://docs.tendermint.com/master/rpc/#/Info/net_info -func (env *Environment) NetInfo(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) { - var peers []coretypes.Peer - - switch { - case env.P2PPeers != nil: - peersList := env.P2PPeers.Peers().List() - peers = make([]coretypes.Peer, 0, len(peersList)) - for _, peer := range peersList { - peers = append(peers, coretypes.Peer{ - ID: peer.ID(), - URL: peer.SocketAddr().String(), - }) +func (env *Environment) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { + peerList := env.PeerManager.Peers() + + peers := make([]coretypes.Peer, 0, len(peerList)) + for _, peer := range peerList { + addrs := env.PeerManager.Addresses(peer) + if len(addrs) == 0 { + continue } - case env.PeerManager != nil: - peerList := env.PeerManager.Peers() - for _, peer := range peerList { - addrs := env.PeerManager.Addresses(peer) - if len(addrs) == 0 { - continue - } - peers = append(peers, coretypes.Peer{ - ID: peer, - URL: addrs[0].String(), - }) - } - default: - return nil, errors.New("peer management system does not support NetInfo responses") + peers = append(peers, coretypes.Peer{ + ID: peer, + URL: addrs[0].String(), + }) } return &coretypes.ResultNetInfo{ - Listening: env.P2PTransport.IsListening(), - Listeners: env.P2PTransport.Listeners(), + Listening: env.IsListening, + Listeners: env.Listeners, NPeers: len(peers), Peers: peers, }, nil } -// UnsafeDialSeeds dials the given seeds (comma-separated id@IP:PORT). -func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*coretypes.ResultDialSeeds, error) { - if env.P2PPeers == nil { - return nil, errors.New("peer management system does not support this operation") - } - - if len(seeds) == 0 { - return &coretypes.ResultDialSeeds{}, fmt.Errorf("%w: no seeds provided", coretypes.ErrInvalidRequest) - } - env.Logger.Info("DialSeeds", "seeds", seeds) - if err := env.P2PPeers.DialPeersAsync(seeds); err != nil { - return &coretypes.ResultDialSeeds{}, err - } - return &coretypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil -} - -// UnsafeDialPeers dials the given peers (comma-separated id@IP:PORT), -// optionally making them persistent. -func (env *Environment) UnsafeDialPeers( - ctx *rpctypes.Context, - peers []string, - persistent, unconditional, private bool) (*coretypes.ResultDialPeers, error) { - - if env.P2PPeers == nil { - return nil, errors.New("peer management system does not support this operation") - } - - if len(peers) == 0 { - return &coretypes.ResultDialPeers{}, fmt.Errorf("%w: no peers provided", coretypes.ErrInvalidRequest) - } - - ids, err := getIDs(peers) - if err != nil { - return &coretypes.ResultDialPeers{}, err - } - - env.Logger.Info("DialPeers", "peers", peers, "persistent", - persistent, "unconditional", unconditional, "private", private) - - if persistent { - if err := env.P2PPeers.AddPersistentPeers(peers); err != nil { - return &coretypes.ResultDialPeers{}, err - } - } - - if private { - if err := env.P2PPeers.AddPrivatePeerIDs(ids); err != nil { - return &coretypes.ResultDialPeers{}, err - } - } - - if unconditional { - if err := env.P2PPeers.AddUnconditionalPeerIDs(ids); err != nil { - return &coretypes.ResultDialPeers{}, err - } - } - - if err := env.P2PPeers.DialPeersAsync(peers); err != nil { - return &coretypes.ResultDialPeers{}, err - } - - return &coretypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil -} - // Genesis returns genesis file. // More: https://docs.tendermint.com/master/rpc/#/Info/genesis -func (env *Environment) Genesis(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) { +func (env *Environment) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { if len(env.genChunks) > 1 { return nil, errors.New("genesis response is large, please use the genesis_chunked API instead") } @@ -124,7 +44,7 @@ func (env *Environment) Genesis(ctx *rpctypes.Context) (*coretypes.ResultGenesis return &coretypes.ResultGenesis{Genesis: env.GenDoc}, nil } -func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) { +func (env *Environment) GenesisChunked(ctx context.Context, req *coretypes.RequestGenesisChunked) (*coretypes.ResultGenesisChunk, error) { if env.genChunks == nil { return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized") } @@ -133,7 +53,7 @@ func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*core return nil, fmt.Errorf("service configuration error, there are no chunks") } - id := int(chunk) + id := int(req.Chunk) if id > len(env.genChunks)-1 { return nil, fmt.Errorf("there are %d chunks, %d is invalid", len(env.genChunks)-1, id) @@ -145,18 +65,3 @@ func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*core Data: env.genChunks[id], }, nil } - -func getIDs(peers []string) ([]string, error) { - ids := make([]string, 0, len(peers)) - - for _, peer := range peers { - - spl := strings.Split(peer, "@") - if len(spl) != 2 { - return nil, p2p.ErrNetAddressNoID{Addr: peer} - } - ids = append(ids, spl[0]) - - } - return ids, nil -} diff --git a/internal/rpc/core/net_test.go b/internal/rpc/core/net_test.go deleted file mode 100644 index 3894988a93..0000000000 --- a/internal/rpc/core/net_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package core - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/log" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" -) - -func TestUnsafeDialSeeds(t *testing.T) { - sw := p2p.MakeSwitch(config.DefaultP2PConfig(), 1, "testing", "123.123.123", nil, - func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - env := &Environment{} - env.Logger = log.TestingLogger() - env.P2PPeers = sw - - testCases := []struct { - seeds []string - isErr bool - }{ - {[]string{}, true}, - {[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, false}, - {[]string{"127.0.0.1:41198"}, true}, - } - - for _, tc := range testCases { - res, err := env.UnsafeDialSeeds(&rpctypes.Context{}, tc.seeds) - if tc.isErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.NotNil(t, res) - } - } -} - -func TestUnsafeDialPeers(t *testing.T) { - sw := p2p.MakeSwitch(config.DefaultP2PConfig(), 1, "testing", "123.123.123", nil, - func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger()) - sw.SetAddrBook(&p2p.AddrBookMock{ - Addrs: make(map[string]struct{}), - OurAddrs: make(map[string]struct{}), - PrivateAddrs: make(map[string]struct{}), - }) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - env := &Environment{} - env.Logger = log.TestingLogger() - env.P2PPeers = sw - - testCases := []struct { - peers []string - persistence, unconditional, private bool - isErr bool - }{ - {[]string{}, false, false, false, true}, - {[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, true, true, true, false}, - {[]string{"127.0.0.1:41198"}, true, true, false, true}, - } - - for _, tc := range testCases { - res, err := env.UnsafeDialPeers(&rpctypes.Context{}, tc.peers, tc.persistence, tc.unconditional, tc.private) - if tc.isErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.NotNil(t, res) - } - } -} diff --git a/internal/rpc/core/routes.go b/internal/rpc/core/routes.go index 7399490823..4bc1ca4140 100644 --- a/internal/rpc/core/routes.go +++ b/internal/rpc/core/routes.go @@ -1,6 +1,9 @@ package core import ( + "context" + + "github.com/tendermint/tendermint/rpc/coretypes" rpc "github.com/tendermint/tendermint/rpc/jsonrpc/server" ) @@ -8,57 +11,111 @@ import ( type RoutesMap map[string]*rpc.RPCFunc -// Routes is a map of available routes. -func (env *Environment) GetRoutes() RoutesMap { - return RoutesMap{ - // subscribe/unsubscribe are reserved for websocket events. - "subscribe": rpc.NewWSRPCFunc(env.Subscribe, "query"), - "unsubscribe": rpc.NewWSRPCFunc(env.Unsubscribe, "query"), - "unsubscribe_all": rpc.NewWSRPCFunc(env.UnsubscribeAll, ""), +// RouteOptions provide optional settings to NewRoutesMap. A nil *RouteOptions +// is ready for use and provides defaults as specified. +type RouteOptions struct { + Unsafe bool // include "unsafe" methods (default false) +} + +// NewRoutesMap constructs an RPC routing map for the given service +// implementation. If svc implements RPCUnsafe and opts.Unsafe is true, the +// "unsafe" methods will also be added to the map. The caller may also edit the +// map after construction; each call to NewRoutesMap returns a fresh map. +func NewRoutesMap(svc RPCService, opts *RouteOptions) RoutesMap { + if opts == nil { + opts = new(RouteOptions) + } + out := RoutesMap{ + // Event subscription. Note that subscribe, unsubscribe, and + // unsubscribe_all are only available via the websocket endpoint. + "events": rpc.NewRPCFunc(svc.Events), + "subscribe": rpc.NewWSRPCFunc(svc.Subscribe), + "unsubscribe": rpc.NewWSRPCFunc(svc.Unsubscribe), + "unsubscribe_all": rpc.NewWSRPCFunc(svc.UnsubscribeAll), // info API - "health": rpc.NewRPCFunc(env.Health, "", false), - "status": rpc.NewRPCFunc(env.Status, "", false), - "net_info": rpc.NewRPCFunc(env.NetInfo, "", false), - "blockchain": rpc.NewRPCFunc(env.BlockchainInfo, "minHeight,maxHeight", true), - "genesis": rpc.NewRPCFunc(env.Genesis, "", true), - "genesis_chunked": rpc.NewRPCFunc(env.GenesisChunked, "chunk", true), - "header": rpc.NewRPCFunc(env.Header, "height", true), - "header_by_hash": rpc.NewRPCFunc(env.HeaderByHash, "hash", true), - "block": rpc.NewRPCFunc(env.Block, "height", true), - "block_by_hash": rpc.NewRPCFunc(env.BlockByHash, "hash", true), - "block_results": rpc.NewRPCFunc(env.BlockResults, "height", true), - "commit": rpc.NewRPCFunc(env.Commit, "height", true), - "check_tx": rpc.NewRPCFunc(env.CheckTx, "tx", true), - "remove_tx": rpc.NewRPCFunc(env.RemoveTx, "txkey", false), - "tx": rpc.NewRPCFunc(env.Tx, "hash,prove", true), - "tx_search": rpc.NewRPCFunc(env.TxSearch, "query,prove,page,per_page,order_by", false), - "block_search": rpc.NewRPCFunc(env.BlockSearch, "query,page,per_page,order_by", false), - "validators": rpc.NewRPCFunc(env.Validators, "height,page,per_page,request_quorum_info", true), - "dump_consensus_state": rpc.NewRPCFunc(env.DumpConsensusState, "", false), - "consensus_state": rpc.NewRPCFunc(env.GetConsensusState, "", false), - "consensus_params": rpc.NewRPCFunc(env.ConsensusParams, "height", true), - "unconfirmed_txs": rpc.NewRPCFunc(env.UnconfirmedTxs, "limit", false), - "num_unconfirmed_txs": rpc.NewRPCFunc(env.NumUnconfirmedTxs, "", false), + "health": rpc.NewRPCFunc(svc.Health), + "status": rpc.NewRPCFunc(svc.Status), + "net_info": rpc.NewRPCFunc(svc.NetInfo), + "blockchain": rpc.NewRPCFunc(svc.BlockchainInfo), + "genesis": rpc.NewRPCFunc(svc.Genesis), + "genesis_chunked": rpc.NewRPCFunc(svc.GenesisChunked), + "header": rpc.NewRPCFunc(svc.Header), + "header_by_hash": rpc.NewRPCFunc(svc.HeaderByHash), + "block": rpc.NewRPCFunc(svc.Block), + "block_by_hash": rpc.NewRPCFunc(svc.BlockByHash), + "block_results": rpc.NewRPCFunc(svc.BlockResults), + "commit": rpc.NewRPCFunc(svc.Commit), + "check_tx": rpc.NewRPCFunc(svc.CheckTx), + "remove_tx": rpc.NewRPCFunc(svc.RemoveTx), + "tx": rpc.NewRPCFunc(svc.Tx), + "tx_search": rpc.NewRPCFunc(svc.TxSearch), + "block_search": rpc.NewRPCFunc(svc.BlockSearch), + "validators": rpc.NewRPCFunc(svc.Validators), + "dump_consensus_state": rpc.NewRPCFunc(svc.DumpConsensusState), + "consensus_state": rpc.NewRPCFunc(svc.GetConsensusState), + "consensus_params": rpc.NewRPCFunc(svc.ConsensusParams), + "unconfirmed_txs": rpc.NewRPCFunc(svc.UnconfirmedTxs), + "num_unconfirmed_txs": rpc.NewRPCFunc(svc.NumUnconfirmedTxs), // tx broadcast API - "broadcast_tx_commit": rpc.NewRPCFunc(env.BroadcastTxCommit, "tx", false), - "broadcast_tx_sync": rpc.NewRPCFunc(env.BroadcastTxSync, "tx", false), - "broadcast_tx_async": rpc.NewRPCFunc(env.BroadcastTxAsync, "tx", false), + "broadcast_tx_commit": rpc.NewRPCFunc(svc.BroadcastTxCommit), + "broadcast_tx_sync": rpc.NewRPCFunc(svc.BroadcastTxSync), + "broadcast_tx_async": rpc.NewRPCFunc(svc.BroadcastTxAsync), // abci API - "abci_query": rpc.NewRPCFunc(env.ABCIQuery, "path,data,height,prove", false), - "abci_info": rpc.NewRPCFunc(env.ABCIInfo, "", true), + "abci_query": rpc.NewRPCFunc(svc.ABCIQuery), + "abci_info": rpc.NewRPCFunc(svc.ABCIInfo), // evidence API - "broadcast_evidence": rpc.NewRPCFunc(env.BroadcastEvidence, "evidence", false), + "broadcast_evidence": rpc.NewRPCFunc(svc.BroadcastEvidence), } + if u, ok := svc.(RPCUnsafe); ok && opts.Unsafe { + out["unsafe_flush_mempool"] = rpc.NewRPCFunc(u.UnsafeFlushMempool) + } + return out +} + +// RPCService defines the set of methods exported by the RPC service +// implementation, for use in constructing a routing table. +type RPCService interface { + ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) + ABCIQuery(ctx context.Context, req *coretypes.RequestABCIQuery) (*coretypes.ResultABCIQuery, error) + Block(ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultBlock, error) + BlockByHash(ctx context.Context, req *coretypes.RequestBlockByHash) (*coretypes.ResultBlock, error) + BlockResults(ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultBlockResults, error) + BlockSearch(ctx context.Context, req *coretypes.RequestBlockSearch) (*coretypes.ResultBlockSearch, error) + BlockchainInfo(ctx context.Context, req *coretypes.RequestBlockchainInfo) (*coretypes.ResultBlockchainInfo, error) + BroadcastEvidence(ctx context.Context, req *coretypes.RequestBroadcastEvidence) (*coretypes.ResultBroadcastEvidence, error) + BroadcastTxAsync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) + BroadcastTxCommit(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTxCommit, error) + BroadcastTxSync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) + CheckTx(ctx context.Context, req *coretypes.RequestCheckTx) (*coretypes.ResultCheckTx, error) + Commit(ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultCommit, error) + ConsensusParams(ctx context.Context, req *coretypes.RequestConsensusParams) (*coretypes.ResultConsensusParams, error) + DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) + Events(ctx context.Context, req *coretypes.RequestEvents) (*coretypes.ResultEvents, error) + Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) + GenesisChunked(ctx context.Context, req *coretypes.RequestGenesisChunked) (*coretypes.ResultGenesisChunk, error) + GetConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) + Header(ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultHeader, error) + HeaderByHash(ctx context.Context, req *coretypes.RequestBlockByHash) (*coretypes.ResultHeader, error) + Health(ctx context.Context) (*coretypes.ResultHealth, error) + NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) + NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) + RemoveTx(ctx context.Context, req *coretypes.RequestRemoveTx) error + Status(ctx context.Context) (*coretypes.ResultStatus, error) + Subscribe(ctx context.Context, req *coretypes.RequestSubscribe) (*coretypes.ResultSubscribe, error) + Tx(ctx context.Context, req *coretypes.RequestTx) (*coretypes.ResultTx, error) + TxSearch(ctx context.Context, req *coretypes.RequestTxSearch) (*coretypes.ResultTxSearch, error) + UnconfirmedTxs(ctx context.Context, req *coretypes.RequestUnconfirmedTxs) (*coretypes.ResultUnconfirmedTxs, error) + Unsubscribe(ctx context.Context, req *coretypes.RequestUnsubscribe) (*coretypes.ResultUnsubscribe, error) + UnsubscribeAll(ctx context.Context) (*coretypes.ResultUnsubscribe, error) + Validators(ctx context.Context, req *coretypes.RequestValidators) (*coretypes.ResultValidators, error) } -// AddUnsafeRoutes adds unsafe routes. -func (env *Environment) AddUnsafe(routes RoutesMap) { - // control API - routes["dial_seeds"] = rpc.NewRPCFunc(env.UnsafeDialSeeds, "seeds", false) - routes["dial_peers"] = rpc.NewRPCFunc(env.UnsafeDialPeers, "peers,persistent,unconditional,private", false) - routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(env.UnsafeFlushMempool, "", false) +// RPCUnsafe defines the set of "unsafe" methods that may optionally be +// exported by the RPC service. +type RPCUnsafe interface { + UnsafeFlushMempool(ctx context.Context) (*coretypes.ResultUnsafeFlushMempool, error) } diff --git a/internal/rpc/core/status.go b/internal/rpc/core/status.go index 678a930175..85a788094d 100644 --- a/internal/rpc/core/status.go +++ b/internal/rpc/core/status.go @@ -1,18 +1,19 @@ package core import ( + "context" + "fmt" "time" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) // Status returns Tendermint status including node info, pubkey, latest block // hash, app hash, block height, current max peer block height, and time. // More: https://docs.tendermint.com/master/rpc/#/Info/status -func (env *Environment) Status(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) { +func (env *Environment) Status(ctx context.Context) (*coretypes.ResultStatus, error) { var ( earliestBlockHeight int64 earliestBlockHash tmbytes.HexBytes @@ -51,8 +52,14 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*coretypes.ResultStatus, validatorInfo.ProTxHash = env.ProTxHash } + var applicationInfo coretypes.ApplicationInfo + if abciInfo, err := env.ABCIInfo(ctx); err == nil { + applicationInfo.Version = fmt.Sprint(abciInfo.Response.AppVersion) + } + result := &coretypes.ResultStatus{ - NodeInfo: env.P2PTransport.NodeInfo(), + NodeInfo: env.NodeInfo, + ApplicationInfo: applicationInfo, SyncInfo: coretypes.SyncInfo{ LatestBlockHash: latestBlockHash, LatestAppHash: latestAppHash, @@ -62,14 +69,25 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*coretypes.ResultStatus, EarliestAppHash: earliestAppHash, EarliestBlockHeight: earliestBlockHeight, EarliestBlockTime: time.Unix(0, earliestBlockTimeNano), - MaxPeerBlockHeight: env.BlockSyncReactor.GetMaxPeerBlockHeight(), - CatchingUp: env.ConsensusReactor.WaitSync(), - TotalSyncedTime: env.BlockSyncReactor.GetTotalSyncedTime(), - RemainingTime: env.BlockSyncReactor.GetRemainingSyncTime(), + // this should start as true, if consensus + // hasn't started yet, and then flip to false + // (or true,) depending on what's actually + // happening. + CatchingUp: true, }, ValidatorInfo: validatorInfo, } + if env.ConsensusReactor != nil { + result.SyncInfo.CatchingUp = env.ConsensusReactor.WaitSync() + } + + if env.BlockSyncReactor != nil { + result.SyncInfo.MaxPeerBlockHeight = env.BlockSyncReactor.GetMaxPeerBlockHeight() + result.SyncInfo.TotalSyncedTime = env.BlockSyncReactor.GetTotalSyncedTime() + result.SyncInfo.RemainingTime = env.BlockSyncReactor.GetRemainingSyncTime() + } + if env.StateSyncMetricer != nil { result.SyncInfo.TotalSnapshots = env.StateSyncMetricer.TotalSnapshots() result.SyncInfo.ChunkProcessAvgTime = env.StateSyncMetricer.ChunkProcessAvgTime() diff --git a/internal/rpc/core/tx.go b/internal/rpc/core/tx.go index 7ba2bf90c0..cd643b8441 100644 --- a/internal/rpc/core/tx.go +++ b/internal/rpc/core/tx.go @@ -1,16 +1,15 @@ package core import ( + "context" "errors" "fmt" "sort" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -18,37 +17,29 @@ import ( // transaction is in the mempool, invalidated, or was not sent in the first // place. // More: https://docs.tendermint.com/master/rpc/#/Info/tx -func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { +func (env *Environment) Tx(ctx context.Context, req *coretypes.RequestTx) (*coretypes.ResultTx, error) { // if index is disabled, return error - - // N.B. The hash parameter is HexBytes so that the reflective parameter - // decoding logic in the HTTP service will correctly translate from JSON. - // See https://github.com/tendermint/tendermint/issues/6802 for context. - if !indexer.KVSinkEnabled(env.EventSinks) { return nil, errors.New("transaction querying is disabled due to no kvEventSink") } for _, sink := range env.EventSinks { if sink.Type() == indexer.KV { - r, err := sink.GetTxByHash(hash) + r, err := sink.GetTxByHash(req.Hash) if r == nil { - return nil, fmt.Errorf("tx (%X) not found, err: %w", hash, err) + return nil, fmt.Errorf("tx (%X) not found, err: %w", req.Hash, err) } - height := r.Height - index := r.Index - var proof types.TxProof - if prove { - block := env.BlockStore.LoadBlock(height) - proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines + if req.Prove { + block := env.BlockStore.LoadBlock(r.Height) + proof = block.Data.Txs.Proof(int(r.Index)) } return &coretypes.ResultTx{ - Hash: hash, - Height: height, - Index: index, + Hash: req.Hash, + Height: r.Height, + Index: r.Index, TxResult: r.Result, Tx: r.Tx, Proof: proof, @@ -62,34 +53,27 @@ func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove boo // TxSearch allows you to query for multiple transactions results. It returns a // list of transactions (maximum ?per_page entries) and the total count. // More: https://docs.tendermint.com/master/rpc/#/Info/tx_search -func (env *Environment) TxSearch( - ctx *rpctypes.Context, - query string, - prove bool, - pagePtr, perPagePtr *int, - orderBy string, -) (*coretypes.ResultTxSearch, error) { - +func (env *Environment) TxSearch(ctx context.Context, req *coretypes.RequestTxSearch) (*coretypes.ResultTxSearch, error) { if !indexer.KVSinkEnabled(env.EventSinks) { return nil, fmt.Errorf("transaction searching is disabled due to no kvEventSink") - } else if len(query) > maxQueryLength { + } else if len(req.Query) > maxQueryLength { return nil, errors.New("maximum query length exceeded") } - q, err := tmquery.New(query) + q, err := tmquery.New(req.Query) if err != nil { return nil, err } for _, sink := range env.EventSinks { if sink.Type() == indexer.KV { - results, err := sink.SearchTxEvents(ctx.Context(), q) + results, err := sink.SearchTxEvents(ctx, q) if err != nil { return nil, err } // sort results (must be done before pagination) - switch orderBy { + switch req.OrderBy { case "desc", "": sort.Slice(results, func(i, j int) bool { if results[i].Height == results[j].Height { @@ -110,9 +94,9 @@ func (env *Environment) TxSearch( // paginate results totalCount := len(results) - perPage := env.validatePerPage(perPagePtr) + perPage := env.validatePerPage(req.PerPage.IntPtr()) - page, err := validatePage(pagePtr, perPage, totalCount) + page, err := validatePage(req.Page.IntPtr(), perPage, totalCount) if err != nil { return nil, err } @@ -125,9 +109,9 @@ func (env *Environment) TxSearch( r := results[i] var proof types.TxProof - if prove { + if req.Prove { block := env.BlockStore.LoadBlock(r.Height) - proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines + proof = block.Data.Txs.Proof(int(r.Index)) } apiResults = append(apiResults, &coretypes.ResultTx{ diff --git a/internal/state/errors.go b/internal/state/errors.go index 6e0cdfa479..e8ad776f46 100644 --- a/internal/state/errors.go +++ b/internal/state/errors.go @@ -39,6 +39,7 @@ type ( ErrNoValSetForHeight struct { Height int64 + Err error } ErrNoConsensusParamsForHeight struct { @@ -89,9 +90,14 @@ func (e ErrStateMismatch) Error() string { } func (e ErrNoValSetForHeight) Error() string { - return fmt.Sprintf("could not find validator set for height #%d", e.Height) + if e.Err == nil { + return fmt.Sprintf("could not find validator set for height #%d", e.Height) + } + return fmt.Sprintf("could not find validator set for height #%d: %s", e.Height, e.Err.Error()) } +func (e ErrNoValSetForHeight) Unwrap() error { return e.Err } + func (e ErrNoConsensusParamsForHeight) Error() string { return fmt.Sprintf("could not find consensus params for height #%d", e.Height) } diff --git a/internal/state/execution.go b/internal/state/execution.go index 0139c4563c..30a1e27137 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -7,15 +7,17 @@ import ( "fmt" "time" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" - "github.com/tendermint/tendermint/internal/libs/fail" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/internal/proxy" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + tmtypes "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -33,8 +35,7 @@ type BlockExecutor struct { blockStore BlockStore // execute the app against this - proxyApp proxy.AppConnConsensus - queryApp proxy.AppConnQuery + appClient abciclient.Client // events eventBus types.BlockEventPublisher @@ -43,89 +44,63 @@ type BlockExecutor struct { // and update both with block results after commit. mempool mempool.Mempool evpool EvidencePool - // the next core chain lock that we can propose - NextCoreChainLock *types.CoreChainLock logger log.Logger metrics *Metrics appHashSize int + // cache the verification results over a single height cache map[string]struct{} -} - -type BlockExecutorOption func(executor *BlockExecutor) - -func BlockExecutorWithMetrics(metrics *Metrics) BlockExecutorOption { - return func(blockExec *BlockExecutor) { - blockExec.metrics = metrics - } -} -// BlockExecutorWithAppHashSize is used to specify app-hash-size -func BlockExecutorWithAppHashSize(size int) BlockExecutorOption { - return func(blockExec *BlockExecutor) { - blockExec.appHashSize = size - } + // the next core chain lock that we can propose + NextCoreChainLock *types.CoreChainLock } -// NewBlockExecutor returns a new BlockExecutor with a NopEventBus. -// Call SetEventBus to provide one. +// NewBlockExecutor returns a new BlockExecutor with the passed-in EventBus. func NewBlockExecutor( stateStore Store, logger log.Logger, - proxyApp proxy.AppConnConsensus, - queryApp proxy.AppConnQuery, - mempool mempool.Mempool, + appClient abciclient.Client, + pool mempool.Mempool, evpool EvidencePool, blockStore BlockStore, - nextCoreChainLock *types.CoreChainLock, - options ...BlockExecutorOption, + eventBus *eventbus.EventBus, + metrics *Metrics, ) *BlockExecutor { - res := &BlockExecutor{ - store: stateStore, - proxyApp: proxyApp, - queryApp: queryApp, - eventBus: types.NopEventBus{}, - mempool: mempool, - evpool: evpool, - NextCoreChainLock: nextCoreChainLock, - logger: logger, - metrics: NopMetrics(), - // TODO: appHashSize should be read from config - appHashSize: crypto.DefaultAppHashSize, + return &BlockExecutor{ + eventBus: eventBus, + store: stateStore, + appClient: appClient, + mempool: pool, + evpool: evpool, + logger: logger, + metrics: metrics, cache: make(map[string]struct{}), blockStore: blockStore, + appHashSize: 32, // TODO change on constant } - - for _, option := range options { - option(res) - } - - return res } func (blockExec *BlockExecutor) Store() Store { return blockExec.store } -// SetEventBus - sets the event bus for publishing block related events. -// If not called, it defaults to types.NopEventBus. -func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) { - blockExec.eventBus = eventBus -} - // CreateProposalBlock calls state.MakeBlock with evidence from the evpool // and txs from the mempool. The max bytes must be big enough to fit the commit. // Up to 1/10th of the block space is allocated for maximum sized evidence. // The rest is given to txs, up to the max gas. +// +// Contract: application will not return more bytes than are sent over the wire. func (blockExec *BlockExecutor) CreateProposalBlock( + ctx context.Context, height int64, state State, commit *types.Commit, proposerProTxHash []byte, proposedAppVersion uint64, -) (*types.Block, *types.PartSet) { + votes []*types.Vote, +) (*types.Block, error) { maxBytes := state.ConsensusParams.Block.MaxBytes maxGas := state.ConsensusParams.Block.MaxGas @@ -135,10 +110,7 @@ func (blockExec *BlockExecutor) CreateProposalBlock( // Fetch a limited amount of valid txs maxDataBytes := types.MaxDataBytes(maxBytes, crypto.BLS12381, evSize, state.Validators.Size()) - txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) - nextCoreChainLock := blockExec.NextCoreChainLock - if nextCoreChainLock != nil && nextCoreChainLock.CoreBlockHeight <= state.LastCoreChainLockedBlockHeight { nextCoreChainLock = nil @@ -149,22 +121,88 @@ func (blockExec *BlockExecutor) CreateProposalBlock( proposedAppVersion = 0 } + txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) + block := state.MakeBlock(height, nextCoreChainLock, txs, commit, evidence, proposerProTxHash, proposedAppVersion) + + localLastCommit := buildLastCommitInfo(block, blockExec.store, state.InitialHeight) + rpp, err := blockExec.appClient.PrepareProposal( + ctx, + &abci.RequestPrepareProposal{ + MaxTxBytes: maxDataBytes, + Txs: block.Txs.ToSliceOfBytes(), + LocalLastCommit: extendedCommitInfo(localLastCommit, votes), + ByzantineValidators: block.Evidence.ToABCI(), + Height: block.Height, + Time: block.Time, + NextValidatorsHash: block.NextValidatorsHash, + ProposerProTxHash: block.ProposerProTxHash, + }, + ) + if err != nil { + // The App MUST ensure that only valid (and hence 'processable') transactions + // enter the mempool. Hence, at this point, we can't have any non-processable + // transaction causing an error. + // + // Also, the App can simply skip any transaction that could cause any kind of trouble. + // Either way, we cannot recover in a meaningful way, unless we skip proposing + // this block, repair what caused the error and try again. Hence, we return an + // error for now (the production code calling this function is expected to panic). + return nil, err + } + txrSet := types.NewTxRecordSet(rpp.TxRecords) + + if err := txrSet.Validate(maxDataBytes, block.Txs); err != nil { + return nil, err + } + + for _, rtx := range txrSet.RemovedTxs() { + if err := blockExec.mempool.RemoveTxByKey(rtx.Key()); err != nil { + blockExec.logger.Debug("error removing transaction from the mempool", "error", err, "tx hash", rtx.Hash()) + } + } + itxs := txrSet.IncludedTxs() + return state.MakeBlock( height, nextCoreChainLock, - txs, + itxs, commit, evidence, proposerProTxHash, proposedAppVersion, - ) + ), nil +} + +func (blockExec *BlockExecutor) ProcessProposal( + ctx context.Context, + block *types.Block, + state State, +) (bool, error) { + resp, err := blockExec.appClient.ProcessProposal(ctx, &abci.RequestProcessProposal{ + Hash: block.Header.Hash(), + Height: block.Header.Height, + Time: block.Header.Time, + Txs: block.Data.Txs.ToSliceOfBytes(), + ProposedLastCommit: buildLastCommitInfo(block, blockExec.store, state.InitialHeight), + ByzantineValidators: block.Evidence.ToABCI(), + ProposerProTxHash: block.ProposerProTxHash, + NextValidatorsHash: block.NextValidatorsHash, + }) + if err != nil { + return false, ErrInvalidBlock(err) + } + if resp.IsStatusUnknown() { + panic(fmt.Sprintf("ProcessProposal responded with status %s", resp.Status.String())) + } + + return resp.IsAccepted(), nil } // ValidateBlock validates the given block against the given state. // If the block is invalid, it returns an error. // Validation does not mutate state, but does require historical information from the stateDB, // ie. to verify evidence from a validator at an old height. -func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) error { +func (blockExec *BlockExecutor) ValidateBlock(ctx context.Context, state State, block *types.Block) error { hash := block.Hash() if _, ok := blockExec.cache[hash.String()]; ok { return nil @@ -175,7 +213,7 @@ func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) e return err } - err = blockExec.evpool.CheckEvidence(block.Evidence.Evidence) + err = blockExec.evpool.CheckEvidence(ctx, block.Evidence) if err != nil { return err } @@ -188,8 +226,8 @@ func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) e // If the block is invalid, it returns an error. // Validation does not mutate state, but does require historical information from the stateDB, // ie. to verify evidence from a validator at an old height. -func (blockExec *BlockExecutor) ValidateBlockChainLock(state State, block *types.Block) error { - return validateBlockChainLock(blockExec.queryApp, state, block) +func (blockExec *BlockExecutor) ValidateBlockChainLock(ctx context.Context, state State, block *types.Block) error { + return validateBlockChainLock(ctx, blockExec.appClient, state, block) } // ValidateBlockTime validates the given block time against the given state. @@ -211,26 +249,30 @@ func (blockExec *BlockExecutor) ValidateBlockTime( // from outside this package to process and commit an entire block. // It takes a blockID to avoid recomputing the parts hash. func (blockExec *BlockExecutor) ApplyBlock( - state State, proTxHash crypto.ProTxHash, blockID types.BlockID, block *types.Block, -) (State, error) { - return blockExec.ApplyBlockWithLogger(state, proTxHash, blockID, block, blockExec.logger) -} - -// ApplyBlockWithLogger calls ApplyBlock with a specified logger making things easier for debugging -func (blockExec *BlockExecutor) ApplyBlockWithLogger( + ctx context.Context, state State, proTxHash crypto.ProTxHash, - blockID types.BlockID, - block *types.Block, - logger log.Logger, + blockID types.BlockID, block *types.Block, ) (State, error) { - if err := blockExec.ValidateBlock(state, block); err != nil { + + // validate the block if we haven't already + if err := blockExec.ValidateBlock(ctx, state, block); err != nil { return state, ErrInvalidBlock(err) } - startTime := time.Now().UnixNano() - abciResponses, err := execBlockOnProxyApp( - logger, blockExec.proxyApp, block, blockExec.store, state.InitialHeight, + txs := block.Txs.ToSliceOfBytes() + finalizeBlockResponse, err := blockExec.appClient.FinalizeBlock( + ctx, + &abci.RequestFinalizeBlock{ + Hash: block.Hash(), + Height: block.Header.Height, + Time: block.Header.Time, + Txs: txs, + DecidedLastCommit: buildLastCommitInfo(block, blockExec.store, state.InitialHeight), + ByzantineValidators: block.Evidence.ToABCI(), + ProposerProTxHash: block.ProposerProTxHash, + NextValidatorsHash: block.NextValidatorsHash, + }, ) endTime := time.Now().UnixNano() blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000) @@ -238,34 +280,32 @@ func (blockExec *BlockExecutor) ApplyBlockWithLogger( return state, ErrProxyAppConn(err) } - fail.Fail() // XXX + abciResponses := &tmstate.ABCIResponses{ + FinalizeBlock: finalizeBlockResponse, + } // Save the results before we commit. if err := blockExec.store.SaveABCIResponses(block.Height, abciResponses); err != nil { return state, err } - fail.Fail() // XXX - // validate the validator updates and convert to tendermint types - abciValidatorSetUpdates := abciResponses.EndBlock.ValidatorSetUpdate - err = validateValidatorSetUpdate(abciValidatorSetUpdates, state.ConsensusParams.Validator) + err = validateValidatorSetUpdate(finalizeBlockResponse.ValidatorSetUpdate, state.ConsensusParams.Validator) if err != nil { - return state, fmt.Errorf("error in validator updates: %v", err) + return state, fmt.Errorf("error in validator updates: %w", err) } nextCoreChainLock, err := types.CoreChainLockFromProto( - abciResponses.EndBlock.NextCoreChainLockUpdate, + finalizeBlockResponse.NextCoreChainLockUpdate, ) if err != nil { return state, fmt.Errorf("error in chain lock from proto: %v", err) } // The quorum type should not even matter here - validators, thresholdPublicKey, quorumHash, err := - types.PB2TM.ValidatorUpdatesFromValidatorSet(abciValidatorSetUpdates) + validators, thresholdPublicKey, quorumHash, err := types.PB2TM.ValidatorUpdatesFromValidatorSet(finalizeBlockResponse.ValidatorSetUpdate) if err != nil { - return state, fmt.Errorf("error when converting abci validator updates: %w", err) + return state, fmt.Errorf("error in chain lock from proto: %v", err) } if len(validators) > 0 { @@ -278,42 +318,35 @@ func (blockExec *BlockExecutor) ApplyBlockWithLogger( ) } - /* - _, err = blockExec.store.Load() - if err != nil { - return state, 0, fmt.Errorf("unable to load store when applying block: %v", err) - }*/ // Update the state with the block and responses. - state, err = updateState( - state, proTxHash, blockID, &block.Header, - abciResponses, validators, thresholdPublicKey, quorumHash, - ) + rs, err := abci.MarshalTxResults(finalizeBlockResponse.TxResults) if err != nil { - return state, fmt.Errorf("commit failed for application: %v", err) + return state, fmt.Errorf("marshaling TxResults: %w", err) + } + h := merkle.HashFromByteSlices(rs) + state, err = state.Update(proTxHash, blockID, &block.Header, h, finalizeBlockResponse.ConsensusParamUpdates, validators, thresholdPublicKey, quorumHash) + if err != nil { + return state, fmt.Errorf("commit failed for application: %w", err) } // Lock mempool, commit app state, update mempoool. - appHash, retainHeight, err := blockExec.Commit(state, block, abciResponses.DeliverTxs) + appHash, retainHeight, err := blockExec.Commit(ctx, state, block, finalizeBlockResponse.TxResults) if err != nil { - return state, fmt.Errorf("commit failed for application: %v", err) + return state, fmt.Errorf("commit failed for application: %w", err) } // Update evpool with the latest state. - blockExec.evpool.Update(state, block.Evidence.Evidence) + blockExec.evpool.Update(ctx, state, block.Evidence) // Update the next core chain lock that we can propose blockExec.NextCoreChainLock = nextCoreChainLock - fail.Fail() // XXX - // Update the app hash and save the state. state.AppHash = appHash if err := blockExec.store.Save(state); err != nil { return state, err } - fail.Fail() // XXX - // Prune old heights, if requested by ABCI app. if retainHeight > 0 { pruned, err := blockExec.pruneBlocks(retainHeight) @@ -329,11 +362,40 @@ func (blockExec *BlockExecutor) ApplyBlockWithLogger( // Events are fired after everything else. // NOTE: if we crash between Commit and Save, events wont be fired during replay - fireEvents(logger, blockExec.eventBus, block, blockID, abciResponses, state.NextValidators) + fireEvents(blockExec.logger, blockExec.eventBus, block, blockID, finalizeBlockResponse, state.NextValidators) return state, nil } +func (blockExec *BlockExecutor) ExtendVote(ctx context.Context, vote *types.Vote) ([]byte, error) { + resp, err := blockExec.appClient.ExtendVote(ctx, &abci.RequestExtendVote{ + Hash: vote.BlockID.Hash, + Height: vote.Height, + }) + if err != nil { + panic(fmt.Errorf("ExtendVote call failed: %w", err)) + } + return resp.VoteExtension, nil +} + +func (blockExec *BlockExecutor) VerifyVoteExtension(ctx context.Context, vote *types.Vote) error { + resp, err := blockExec.appClient.VerifyVoteExtension(ctx, &abci.RequestVerifyVoteExtension{ + Hash: vote.BlockID.Hash, + Height: vote.Height, + ValidatorProTxHash: vote.ValidatorProTxHash, + VoteExtension: vote.Extension, + }) + if err != nil { + panic(fmt.Errorf("VerifyVoteExtension call failed: %w", err)) + } + + if !resp.IsOK() { + return types.ErrVoteInvalidExtension + } + + return nil +} + // Commit locks the mempool, runs the ABCI Commit message, and updates the // mempool. // It returns the result of calling abci.Commit (the AppHash) and the height to retain (if any). @@ -341,25 +403,26 @@ func (blockExec *BlockExecutor) ApplyBlockWithLogger( // typically reset on Commit and old txs must be replayed against committed // state before new txs are run in the mempool, lest they be invalid. func (blockExec *BlockExecutor) Commit( + ctx context.Context, state State, block *types.Block, - deliverTxResponses []*abci.ResponseDeliverTx, + txResults []*abci.ExecTxResult, ) ([]byte, int64, error) { blockExec.mempool.Lock() defer blockExec.mempool.Unlock() // while mempool is Locked, flush to ensure all async requests have completed // in the ABCI app before Commit. - err := blockExec.mempool.FlushAppConn() + err := blockExec.mempool.FlushAppConn(ctx) if err != nil { blockExec.logger.Error("client error during mempool.FlushAppConn", "err", err) return nil, 0, err } // Commit block, get hash back - res, err := blockExec.proxyApp.CommitSync(context.Background()) + res, err := blockExec.appClient.Commit(ctx) if err != nil { - blockExec.logger.Error("client error during proxyAppConn.CommitSync", "err", err) + blockExec.logger.Error("client error during proxyAppConn.Commit", "err", err) return nil, 0, err } @@ -382,113 +445,82 @@ func (blockExec *BlockExecutor) Commit( // Update mempool. err = blockExec.mempool.Update( + ctx, block.Height, block.Txs, - deliverTxResponses, - TxPreCheck(state), - TxPostCheck(state), + txResults, + TxPreCheckForState(state), + TxPostCheckForState(state), ) return res.Data, res.RetainHeight, err } -//--------------------------------------------------------- -// Helper functions for executing blocks and updating state - -// Executes block's transactions on proxyAppConn. -// Returns a list of transaction results and updates to the validator set -func execBlockOnProxyApp( - logger log.Logger, - proxyAppConn proxy.AppConnConsensus, - block *types.Block, - store Store, - initialHeight int64, -) (*tmstate.ABCIResponses, error) { - var validTxs, invalidTxs = 0, 0 - - txIndex := 0 - abciResponses := new(tmstate.ABCIResponses) - dtxs := make([]*abci.ResponseDeliverTx, len(block.Txs)) - abciResponses.DeliverTxs = dtxs - - // Execute transactions and get hash. - proxyCb := func(req *abci.Request, res *abci.Response) { - if r, ok := res.Value.(*abci.Response_DeliverTx); ok { - // TODO: make use of res.Log - // TODO: make use of this info - // Blocks may include invalid txs. - txRes := r.DeliverTx - if txRes.Code == abci.CodeTypeOK { - validTxs++ - } else { - logger.Debug("invalid tx", "code", txRes.Code, "log", txRes.Log) - invalidTxs++ - } - - abciResponses.DeliverTxs[txIndex] = txRes - txIndex++ - } +func buildLastCommitInfo(block *types.Block, store Store, initialHeight int64) abci.CommitInfo { + if block.Height == initialHeight { + // there is no last commmit for the initial height. + // return an empty value. + return abci.CommitInfo{} } - proxyAppConn.SetResponseCallback(proxyCb) - - commitInfo := abci.LastCommitInfo{ + return abci.CommitInfo{ Round: block.LastCommit.Round, QuorumHash: block.LastCommit.QuorumHash, BlockSignature: block.LastCommit.ThresholdBlockSignature, StateSignature: block.LastCommit.ThresholdStateSignature, } +} - byzVals := make([]abci.Evidence, 0) - for _, evidence := range block.Evidence.Evidence { - byzVals = append(byzVals, evidence.ABCI()...) - } - - ctx := context.Background() - - // Begin block - var err error - pbh := block.Header.ToProto() - if pbh == nil { - return nil, errors.New("nil header") - } - - abciResponses.BeginBlock, err = proxyAppConn.BeginBlockSync( - ctx, - abci.RequestBeginBlock{ - Hash: block.Hash(), - Header: *pbh, - LastCommitInfo: commitInfo, - ByzantineValidators: byzVals, - }) - if err != nil { - logger.Error("error in proxyAppConn.BeginBlock", "err", err) - return nil, err - } - - // run txs of block - for _, tx := range block.Txs { - _, err = proxyAppConn.DeliverTxAsync(ctx, abci.RequestDeliverTx{Tx: tx}) - if err != nil { - return nil, err +// extendedCommitInfo expects a CommitInfo struct along with all of the +// original votes relating to that commit, including their vote extensions. The +// order of votes does not matter. +func extendedCommitInfo(c abci.CommitInfo, votes []*types.Vote) abci.ExtendedCommitInfo { + // TODO this function must be adopted for using the dash approach with BLS signature + + //if len(c.Votes) != len(votes) { + // panic(fmt.Sprintf("extendedCommitInfo: number of votes from commit differ from the number of votes supplied (%d != %d)", len(c.Votes), len(votes))) + //} + //votesByVal := make(map[string]*types.Vote) + //for _, vote := range votes { + // if vote != nil { + // valProTxHash := vote.ValidatorProTxHash + // if _, ok := votesByVal[valProTxHash.String()]; ok { + // panic(fmt.Sprintf("extendedCommitInfo: found duplicate vote for validator with address %s", valProTxHash.ShortString())) + // } + // votesByVal[valProTxHash.String()] = vote + // } + //} + //vs := make([]abci.ExtendedVoteInfo, len(c.Votes)) + //for i := range vs { + // var ext []byte + // // votes[i] will be nil if c.Votes[i].SignedLastBlock is false + // if c.Votes[i].SignedLastBlock { + // valAddr := crypto.Address(c.Votes[i].Validator.Address).String() + // vote, ok := votesByVal[valAddr] + // if !ok || vote == nil { + // panic(fmt.Sprintf("extendedCommitInfo: validator with address %s signed last block, but could not find vote for it", valAddr)) + // } + // ext = vote.Extension + // } + // vs[i] = abci.ExtendedVoteInfo{ + // Validator: c.Votes[i].Validator, + // SignedLastBlock: c.Votes[i].SignedLastBlock, + // VoteExtension: ext, + // } + //} + //return abci.ExtendedCommitInfo{ + // Round: c.Round, + // Votes: vs, + //} + vs := make([]abci.ExtendedVoteInfo, len(votes)) + for i, vote := range votes { + if vote != nil { + vs[i].VoteExtension = vote.Extension } } - - // End block. - abciResponses.EndBlock, err = proxyAppConn.EndBlockSync( - ctx, - abci.RequestEndBlock{Height: block.Height}, - ) - if err != nil { - logger.Error("error in proxyAppConn.EndBlock", "err", err) - return nil, err + return abci.ExtendedCommitInfo{ + Round: c.Round, + Votes: vs, } - - logger.Info( - "executed block", "height", block.Height, "coreHeight", - block.CoreChainLockedHeight, "num_valid_txs", validTxs, - "num_invalid_txs", invalidTxs, - ) - return abciResponses, nil } func validateValidatorSetUpdate( @@ -506,8 +538,7 @@ func validateValidatorSetUpdate( return validateValidatorUpdates(abciValidatorSetUpdate.ValidatorUpdates, params) } -func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, - params types.ValidatorParams) error { +func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params types.ValidatorParams) error { for _, valUpdate := range abciUpdates { if valUpdate.GetPower() < 0 { return fmt.Errorf("voting power can't be negative %v", valUpdate) @@ -560,19 +591,19 @@ func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, return nil } -// updateState returns a new State updated according to the header and responses. -func updateState( - state State, +// Update returns a copy of state with the fields set using the arguments passed in. +func (state State) Update( nodeProTxHash crypto.ProTxHash, blockID types.BlockID, header *types.Header, - abciResponses *tmstate.ABCIResponses, + resultsHash []byte, + consensusParamUpdates *tmtypes.ConsensusParams, validatorUpdates []*types.Validator, newThresholdPublicKey crypto.PubKey, quorumHash crypto.QuorumHash, ) (State, error) { - // Copy the valset so we can apply changes from EndBlock + // Copy the valset so we can apply changes from FinalizeBlock // and update s.LastValidators and s.Validators. nValSet := state.NextValidators.Copy() @@ -585,7 +616,7 @@ func updateState( if bytes.Equal(nValSet.QuorumHash, quorumHash) { err := nValSet.UpdateWithChangeSet(validatorUpdates, newThresholdPublicKey, quorumHash) if err != nil { - return state, fmt.Errorf("error changing validator set: %v", err) + return state, fmt.Errorf("error changing validator set: %w", err) } // Change results from this height but only applies to the next next height. lastHeightValsChanged = header.Height + 1 + 1 @@ -603,12 +634,12 @@ func updateState( // Update the params with the latest abciResponses. nextParams := state.ConsensusParams lastHeightParamsChanged := state.LastHeightConsensusParamsChanged - if abciResponses.EndBlock.ConsensusParamUpdates != nil { - // NOTE: must not mutate s.ConsensusParams - nextParams = state.ConsensusParams.UpdateConsensusParams(abciResponses.EndBlock.ConsensusParamUpdates) + if consensusParamUpdates != nil { + // NOTE: must not mutate state.ConsensusParams + nextParams = state.ConsensusParams.UpdateConsensusParams(consensusParamUpdates) err := nextParams.ValidateConsensusParams() if err != nil { - return state, fmt.Errorf("error updating consensus params: %v", err) + return state, fmt.Errorf("error updating consensus params: %w", err) } state.Version.Consensus.App = nextParams.Version.AppVersion @@ -619,7 +650,7 @@ func updateState( nextVersion := state.Version - // NOTE: the AppHash has not been populated. + // NOTE: the AppHash and the VoteExtension has not been populated. // It will be filled on state.Save. return State{ Version: nextVersion, @@ -636,11 +667,21 @@ func updateState( LastHeightValidatorsChanged: lastHeightValsChanged, ConsensusParams: nextParams, LastHeightConsensusParamsChanged: lastHeightParamsChanged, - LastResultsHash: ABCIResponsesResultsHash(abciResponses), + LastResultsHash: resultsHash, AppHash: nil, }, nil } +// SetNextCoreChainLock ... +func (blockExec *BlockExecutor) SetNextCoreChainLock(coreChainLock *types.CoreChainLock) { + blockExec.NextCoreChainLock = coreChainLock +} + +// SetAppHashSize ... +func (blockExec *BlockExecutor) SetAppHashSize(size int) { + blockExec.appHashSize = size +} + // Fire NewBlock, NewBlockHeader. // Fire TxEvent for every tx. // NOTE: if Tendermint crashes before commit, some or all of these events may be published again. @@ -649,29 +690,27 @@ func fireEvents( eventBus types.BlockEventPublisher, block *types.Block, blockID types.BlockID, - abciResponses *tmstate.ABCIResponses, + finalizeBlockResponse *abci.ResponseFinalizeBlock, validatorSetUpdate *types.ValidatorSet, ) { if err := eventBus.PublishEventNewBlock(types.EventDataNewBlock{ - Block: block, - BlockID: blockID, - ResultBeginBlock: *abciResponses.BeginBlock, - ResultEndBlock: *abciResponses.EndBlock, + Block: block, + BlockID: blockID, + ResultFinalizeBlock: *finalizeBlockResponse, }); err != nil { logger.Error("failed publishing new block", "err", err) } if err := eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ - Header: block.Header, - NumTxs: int64(len(block.Txs)), - ResultBeginBlock: *abciResponses.BeginBlock, - ResultEndBlock: *abciResponses.EndBlock, + Header: block.Header, + NumTxs: int64(len(block.Txs)), + ResultFinalizeBlock: *finalizeBlockResponse, }); err != nil { logger.Error("failed publishing new block header", "err", err) } - if len(block.Evidence.Evidence) != 0 { - for _, ev := range block.Evidence.Evidence { + if len(block.Evidence) != 0 { + for _, ev := range block.Evidence { if err := eventBus.PublishEventNewEvidence(types.EventDataNewEvidence{ Evidence: ev, Height: block.Height, @@ -681,13 +720,21 @@ func fireEvents( } } + // sanity check + if len(finalizeBlockResponse.TxResults) != len(block.Data.Txs) { + panic(fmt.Sprintf("number of TXs (%d) and ABCI TX responses (%d) do not match", + len(block.Data.Txs), len(finalizeBlockResponse.TxResults))) + } + for i, tx := range block.Data.Txs { - if err := eventBus.PublishEventTx(types.EventDataTx{TxResult: abci.TxResult{ - Height: block.Height, - Index: uint32(i), - Tx: tx, - Result: *(abciResponses.DeliverTxs[i]), - }}); err != nil { + if err := eventBus.PublishEventTx(types.EventDataTx{ + TxResult: abci.TxResult{ + Height: block.Height, + Index: uint32(i), + Tx: tx, + Result: *(finalizeBlockResponse.TxResults[i]), + }, + }); err != nil { logger.Error("failed publishing event TX", "err", err) } } @@ -710,32 +757,45 @@ func fireEvents( // ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state. // It returns the application root hash (result of abci.Commit). func ExecCommitBlock( + ctx context.Context, be *BlockExecutor, - appConnConsensus proxy.AppConnConsensus, + appConn abciclient.Client, block *types.Block, logger log.Logger, store Store, initialHeight int64, s State, ) ([]byte, error) { - abciResponses, err := execBlockOnProxyApp(logger, appConnConsensus, block, store, initialHeight) + finalizeBlockResponse, err := appConn.FinalizeBlock( + ctx, + &abci.RequestFinalizeBlock{ + Hash: block.Hash(), + Height: block.Height, + Time: block.Time, + Txs: block.Txs.ToSliceOfBytes(), + DecidedLastCommit: buildLastCommitInfo(block, store, initialHeight), + ByzantineValidators: block.Evidence.ToABCI(), + }, + ) + if err != nil { - logger.Error("failed executing block on proxy app", "height", block.Height, "err", err) + logger.Error("executing block", "err", err) return nil, err } + logger.Info("executed block", "height", block.Height) // the BlockExecutor condition is using for the final block replay process. if be != nil { - abciValSetUpdate := abciResponses.EndBlock.ValidatorSetUpdate - err = validateValidatorSetUpdate(abciValSetUpdate, s.ConsensusParams.Validator) + err = validateValidatorSetUpdate(finalizeBlockResponse.ValidatorSetUpdate, s.ConsensusParams.Validator) if err != nil { - logger.Error("err", err) + logger.Error("validating validator updates", "err", err) return nil, err } validatorUpdates, thresholdPublicKeyUpdate, quorumHash, err := - types.PB2TM.ValidatorUpdatesFromValidatorSet(abciValSetUpdate) + types.PB2TM.ValidatorUpdatesFromValidatorSet(finalizeBlockResponse.ValidatorSetUpdate) if err != nil { + logger.Error("converting validator updates to native types", "err", err) return nil, err } @@ -745,14 +805,19 @@ func ExecCommitBlock( return nil, err } - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()} - fireEvents(be.logger, be.eventBus, block, blockID, abciResponses, validatorSetUpdate) + bps, err := block.MakePartSet(types.BlockPartSizeBytes) + if err != nil { + return nil, err + } + + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} + fireEvents(be.logger, be.eventBus, block, blockID, finalizeBlockResponse, validatorSetUpdate) } // Commit block, get hash back - res, err := appConnConsensus.CommitSync(context.Background()) + res, err := appConn.Commit(ctx) if err != nil { - logger.Error("client error during proxyAppConn.CommitSync", "err", res) + logger.Error("client error during proxyAppConn.Commit", "err", res) return nil, err } diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index 8ab49051c6..34c3233012 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -3,6 +3,7 @@ package state_test import ( "bytes" "context" + "errors" "fmt" "testing" "time" @@ -13,73 +14,101 @@ import ( dbm "github.com/tendermint/tm-db" abciclient "github.com/tendermint/tendermint/abci/client" + abciclientmocks "github.com/tendermint/tendermint/abci/client/mocks" abci "github.com/tendermint/tendermint/abci/types" + abcimocks "github.com/tendermint/tendermint/abci/types/mocks" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" "github.com/tendermint/tendermint/crypto/encoding" - mmock "github.com/tendermint/tendermint/internal/mempool/mock" + "github.com/tendermint/tendermint/internal/eventbus" + mpmocks "github.com/tendermint/tendermint/internal/mempool/mocks" "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/mocks" sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/store" + "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) var ( - chainID = "execution_chain" + chainID = "execution_chain" + testPartSize uint32 = 65536 ) func TestApplyBlock(t *testing.T) { app := &testApp{} - cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + logger := log.NewNopLogger() + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + require.NoError(t, proxyApp.Start(ctx)) + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) - state, stateDB, _ := makeState(1, 1) + state, stateDB, _ := makeState(t, 1, 1) + stateStore := sm.NewStore(stateDB) // The state is local, so we just take the first proTxHash nodeProTxHash := state.Validators.Validators[0].ProTxHash - stateStore := sm.NewStore(stateDB) app.ValidatorSetUpdate = state.Validators.ABCIEquivalentValidatorUpdates() blockStore := store.NewBlockStore(dbm.NewMemDB()) + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), - proxyApp.Consensus(), - proxyApp.Query(), - mmock.Mempool{}, + logger, + proxyApp, + mp, sm.EmptyEvidencePool{}, blockStore, - nil, + eventBus, + sm.NopMetrics(), ) block, err := sf.MakeBlock(state, 1, new(types.Commit), nil, 0) require.NoError(t, err) - blockID := block.BlockID() + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - state, err = blockExec.ApplyBlock(state, nodeProTxHash, blockID, block) + state, err = blockExec.ApplyBlock(ctx, state, nodeProTxHash, blockID, block) require.NoError(t, err) // TODO check state and mempool assert.EqualValues(t, 1, state.Version.Consensus.App, "App version wasn't updated") } -// TestBeginBlockByzantineValidators ensures we send byzantine validators list. -func TestBeginBlockByzantineValidators(t *testing.T) { +// TestFinalizeBlockByzantineValidators ensures we send byzantine validators list. +func TestFinalizeBlockByzantineValidators(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := &testApp{} - cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + logger := log.NewNopLogger() + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) - state, stateDB, privVals := makeState(1, 1) - nodeProTxHash := state.Validators.Validators[0].ProTxHash + state, stateDB, privVals := makeState(t, 1, 1) stateStore := sm.NewStore(stateDB) + nodeProTxHash := state.Validators.Validators[0].ProTxHash app.ValidatorSetUpdate = state.Validators.ABCIEquivalentValidatorUpdates() defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) @@ -87,6 +116,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) { // we don't need to worry about validating the evidence as long as they pass validate basic dve, err := types.NewMockDuplicateVoteEvidenceWithValidator( + ctx, 3, defaultEvidenceTime, privVal, @@ -99,9 +129,9 @@ func TestBeginBlockByzantineValidators(t *testing.T) { ev := []types.Evidence{dve} - abciEv := []abci.Evidence{ + abciMb := []abci.Misbehavior{ { - Type: abci.EvidenceType_DUPLICATE_VOTE, + Type: abci.MisbehaviorType_DUPLICATE_VOTE, Height: 3, Time: defaultEvidenceTime, Validator: types.TM2PB.Validator(state.Validators.Validators[0]), @@ -111,34 +141,119 @@ func TestBeginBlockByzantineValidators(t *testing.T) { evpool := &mocks.EvidencePool{} evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return(ev, int64(100)) - evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")). - Return() - evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) + evpool.On("Update", ctx, mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() + evpool.On("CheckEvidence", ctx, mock.AnythingOfType("types.EvidenceList")).Return(nil) + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + blockStore := store.NewBlockStore(dbm.NewMemDB()) + + blockExec := sm.NewBlockExecutor(stateStore, log.NewNopLogger(), proxyApp, mp, evpool, blockStore, eventBus, sm.NopMetrics()) + + block, err := sf.MakeBlock(state, 1, new(types.Commit), nil, 1) + require.NoError(t, err) + block.Evidence = ev + block.Header.EvidenceHash = block.Evidence.Hash() + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} + + _, err = blockExec.ApplyBlock(ctx, state, nodeProTxHash, blockID, block) + require.NoError(t, err) + + // TODO check state and mempool + assert.Equal(t, abciMb, app.ByzantineValidators) +} +func TestProcessProposal(t *testing.T) { + const height = 2 + txs := factory.MakeNTxs(height, 10) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + app := abcimocks.NewApplication(t) + logger := log.NewNopLogger() + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + state, stateDB, _ := makeState(t, 1, height) + stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), - proxyApp.Consensus(), - proxyApp.Query(), - mmock.Mempool{}, - evpool, + logger, + proxyApp, + new(mpmocks.Mempool), + sm.EmptyEvidencePool{}, blockStore, - nil, + eventBus, + sm.NopMetrics(), ) - block, err := sf.MakeBlock(state, 1, new(types.Commit), nil, 0) + //block0 := sf.MakeBlock(t, state, height-1, new(types.Commit), nil, 0) + //partSet, err := block0.MakePartSet(types.BlockPartSizeBytes) + //require.NoError(t, err) + //blockID := types.BlockID{Hash: block0.Hash(), PartSetHeader: partSet.Header()} + //stateID := types.RandStateID().WithHeight(height - 1) + + //quorumHash := state.Validators.QuorumHash + //thBlockSign := + //thStateSign + //voteInfos := []abci.VoteInfo{} + //for _, privVal := range privVals { + // vote, err := factory.MakeVote(ctx, privVal, state.Validators, block0.Header.ChainID, 0, 0, 0, 2, blockID, stateID) + // require.NoError(t, err) + // proTxHash, err := privVal.GetProTxHash(ctx) + // require.NoError(t, err) + // + //} + + lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, types.StateID{}, nil, nil, nil) + block1, err := sf.MakeBlock(state, height, lastCommit, nil, 0) require.NoError(t, err) - block.Evidence = types.EvidenceData{Evidence: ev} - block.Header.EvidenceHash = block.Evidence.Hash() - blockID := block.BlockID() - - _, err = blockExec.ApplyBlock(state, nodeProTxHash, blockID, block) - require.Nil(t, err) + block1.Txs = txs + + expectedRpp := &abci.RequestProcessProposal{ + Txs: block1.Txs.ToSliceOfBytes(), + Hash: block1.Hash(), + Height: block1.Header.Height, + Time: block1.Header.Time, + ByzantineValidators: block1.Evidence.ToABCI(), + ProposedLastCommit: abci.CommitInfo{ + Round: 0, + //QuorumHash: + //BlockSignature: + //StateSignature: + }, + NextValidatorsHash: block1.NextValidatorsHash, + ProposerProTxHash: block1.ProposerProTxHash, + } - // TODO check state and mempool - assert.Equal(t, abciEv, app.ByzantineValidators) + app.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil) + acceptBlock, err := blockExec.ProcessProposal(ctx, block1, state) + require.NoError(t, err) + require.True(t, acceptBlock) + app.AssertExpectations(t) + app.AssertCalled(t, "ProcessProposal", ctx, expectedRpp) } func TestValidateValidatorUpdates(t *testing.T) { @@ -310,49 +425,61 @@ func TestUpdateValidators(t *testing.T) { } } -// TestEndBlockValidatorUpdates ensures we update validator set and send an event. -func TestEndBlockValidatorUpdates(t *testing.T) { +// TestFinalizeBlockValidatorUpdates ensures we update validator set and send an event. +func TestFinalizeBlockValidatorUpdates(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := &testApp{} - cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + logger := log.NewNopLogger() + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) - state, stateDB, _ := makeState(1, 1) - nodeProTxHash := state.Validators.Validators[0].ProTxHash + state, stateDB, _ := makeState(t, 1, 1) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) + nodeProTxHash := state.Validators.Validators[0].ProTxHash + + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs{}) + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), - proxyApp.Consensus(), - proxyApp.Query(), - mmock.Mempool{}, + logger, + proxyApp, + mp, sm.EmptyEvidencePool{}, blockStore, - nil, + eventBus, + sm.NopMetrics(), ) - eventBus := types.NewEventBus() - err = eventBus.Start() - require.NoError(t, err) - defer eventBus.Stop() //nolint:errcheck // ignore for tests - - blockExec.SetEventBus(eventBus) - - updatesSub, err := eventBus.Subscribe( - context.Background(), - "TestEndBlockValidatorUpdates", - types.EventQueryValidatorSetUpdates, - ) + updatesSub, err := eventBus.SubscribeWithArgs(ctx, pubsub.SubscribeArgs{ + ClientID: "TestFinalizeBlockValidatorUpdates", + Query: types.EventQueryValidatorSetUpdates, + }) require.NoError(t, err) block, err := sf.MakeBlock(state, 1, new(types.Commit), nil, 0) require.NoError(t, err) - - blockID := block.BlockID() + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} vals := state.Validators proTxHashes := vals.GetProTxHashes() @@ -379,7 +506,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { app.ValidatorSetUpdate = newVals.ABCIEquivalentValidatorUpdates() - state, err = blockExec.ApplyBlock(state, nodeProTxHash, blockID, block) + state, err = blockExec.ApplyBlock(ctx, state, nodeProTxHash, blockID, block) require.Nil(t, err) // test new validator was added to NextValidators if assert.Equal(t, state.Validators.Size()+1, state.NextValidators.Size()) { @@ -390,60 +517,60 @@ func TestEndBlockValidatorUpdates(t *testing.T) { } // test we threw an event - select { - case msg := <-updatesSub.Out(): - event, ok := msg.Data().(types.EventDataValidatorSetUpdate) - require.True( + ctx, cancel = context.WithTimeout(ctx, 1*time.Second) + defer cancel() + msg, err := updatesSub.Next(ctx) + require.NoError(t, err) + event, ok := msg.Data().(types.EventDataValidatorSetUpdate) + require.True(t, ok, "Expected event of type EventDataValidatorSetUpdate, got %T", msg.Data()) + assert.Len(t, event.QuorumHash, crypto.QuorumHashSize) + if assert.NotEmpty(t, event.ValidatorSetUpdates) { + assert.Equal(t, addProTxHash, event.ValidatorSetUpdates[pos].ProTxHash) + assert.EqualValues( t, - ok, - "Expected event of type EventDataValidatorSetUpdate, got %T", - msg.Data(), + types.DefaultDashVotingPower, + event.ValidatorSetUpdates[pos].VotingPower, ) - assert.Len(t, event.QuorumHash, crypto.QuorumHashSize) - if assert.NotEmpty(t, event.ValidatorSetUpdates) { - assert.Equal(t, addProTxHash, event.ValidatorSetUpdates[pos].ProTxHash) - assert.EqualValues( - t, - types.DefaultDashVotingPower, - event.ValidatorSetUpdates[pos].VotingPower, - ) - } - case <-updatesSub.Canceled(): - t.Fatalf("updatesSub was canceled (reason: %v)", updatesSub.Err()) - case <-time.After(1 * time.Second): - t.Fatal("Did not receive EventValidatorSetUpdates within 1 sec.") } } -// TestEndBlockValidatorUpdatesResultingInEmptySet checks that processing validator updates that +// TestFinalizeBlockValidatorUpdatesResultingInEmptySet checks that processing validator updates that // would result in empty set causes no panic, an error is raised and NextValidators is not updated -func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { +func TestFinalizeBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + app := &testApp{} - cc := abciclient.NewLocalCreator(app) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err := proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + logger := log.NewNopLogger() + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) - state, stateDB, _ := makeState(1, 1) + state, stateDB, _ := makeState(t, 1, 1) nodeProTxHash := state.Validators.Validators[0].ProTxHash stateStore := sm.NewStore(stateDB) proTxHash := state.Validators.Validators[0].ProTxHash blockStore := store.NewBlockStore(dbm.NewMemDB()) blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), - proxyApp.Consensus(), - proxyApp.Query(), - mmock.Mempool{}, + log.NewNopLogger(), + proxyApp, + new(mpmocks.Mempool), sm.EmptyEvidencePool{}, blockStore, - nil, + eventBus, + sm.NopMetrics(), ) block, err := sf.MakeBlock(state, 1, new(types.Commit), nil, 0) require.NoError(t, err) - blockID := block.BlockID() + bps, err := block.MakePartSet(testPartSize) + require.NoError(t, err) + blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} publicKey, err := encoding.PubKeyToProto(bls12381.GenPrivKey().PubKey()) require.NoError(t, err) @@ -459,10 +586,408 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { QuorumHash: state.Validators.QuorumHash, } - assert.NotPanics( - t, - func() { state, err = blockExec.ApplyBlock(state, nodeProTxHash, blockID, block) }, - ) + assert.NotPanics(t, func() { state, err = blockExec.ApplyBlock(ctx, state, nodeProTxHash, blockID, block) }) assert.NotNil(t, err) assert.NotEmpty(t, state.NextValidators.Validators) } + +func TestEmptyPrepareProposal(t *testing.T) { + const height = 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + app := abcimocks.NewApplication(t) + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{}, nil) + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + state, stateDB, privVals := makeState(t, 1, height) + stateStore := sm.NewStore(stateDB) + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs{}) + + blockExec := sm.NewBlockExecutor( + stateStore, + logger, + proxyApp, + mp, + sm.EmptyEvidencePool{}, + nil, + eventBus, + sm.NopMetrics(), + ) + proposerProTxHash, _ := state.Validators.GetByIndex(0) + commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, types.StateID{}, state.Validators, privVals) + _, err = blockExec.CreateProposalBlock(ctx, height, state, commit, proposerProTxHash, 0, nil) + require.NoError(t, err) +} + +// TestPrepareProposalErrorOnNonExistingRemoved tests that the block creation logic returns +// an error if the ResponsePrepareProposal returned from the application marks +// a transaction as REMOVED that was not present in the original proposal. +func TestPrepareProposalErrorOnNonExistingRemoved(t *testing.T) { + const height = 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + state, stateDB, privVals := makeState(t, 1, height) + stateStore := sm.NewStore(stateDB) + + evpool := &mocks.EvidencePool{} + evpool.On("PendingEvidence", mock.Anything).Return([]types.Evidence{}, int64(0)) + + mp := &mpmocks.Mempool{} + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs{}) + + app := abcimocks.NewApplication(t) + + // create an invalid ResponsePrepareProposal + rpp := &abci.ResponsePrepareProposal{ + TxRecords: []*abci.TxRecord{ + { + Action: abci.TxRecord_REMOVED, + Tx: []byte("new tx"), + }, + }, + } + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(rpp, nil) + + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + blockExec := sm.NewBlockExecutor( + stateStore, + logger, + proxyApp, + mp, + evpool, + nil, + eventBus, + sm.NopMetrics(), + ) + proposerProTxHash, _ := state.Validators.GetByIndex(0) + commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, types.StateID{}, state.Validators, privVals) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, proposerProTxHash, 0, nil) + require.ErrorContains(t, err, "new transaction incorrectly marked as removed") + require.Nil(t, block) + + mp.AssertExpectations(t) +} + +// TestPrepareProposalRemoveTxs tests that any transactions marked as REMOVED +// are not included in the block produced by CreateProposalBlock. The test also +// ensures that any transactions removed are also removed from the mempool. +func TestPrepareProposalRemoveTxs(t *testing.T) { + const height = 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + state, stateDB, privVals := makeState(t, 1, height) + stateStore := sm.NewStore(stateDB) + + evpool := &mocks.EvidencePool{} + evpool.On("PendingEvidence", mock.Anything).Return([]types.Evidence{}, int64(0)) + + txs := factory.MakeNTxs(height, 10) + mp := &mpmocks.Mempool{} + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs(txs)) + + trs := txsToTxRecords(types.Txs(txs)) + trs[0].Action = abci.TxRecord_REMOVED + trs[1].Action = abci.TxRecord_REMOVED + mp.On("RemoveTxByKey", mock.Anything).Return(nil).Twice() + + app := abcimocks.NewApplication(t) + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ + TxRecords: trs, + }, nil) + + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + blockExec := sm.NewBlockExecutor( + stateStore, + logger, + proxyApp, + mp, + evpool, + nil, + eventBus, + sm.NopMetrics(), + ) + proposerProTxHash, _ := state.Validators.GetByIndex(0) + commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, types.StateID{}, state.Validators, privVals) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, proposerProTxHash, 0, nil) + require.NoError(t, err) + require.Len(t, block.Data.Txs.ToSliceOfBytes(), len(trs)-2) + + require.Equal(t, -1, block.Data.Txs.Index(types.Tx(trs[0].Tx))) + require.Equal(t, -1, block.Data.Txs.Index(types.Tx(trs[1].Tx))) + + mp.AssertCalled(t, "RemoveTxByKey", types.Tx(trs[0].Tx).Key()) + mp.AssertCalled(t, "RemoveTxByKey", types.Tx(trs[1].Tx).Key()) + mp.AssertExpectations(t) +} + +// TestPrepareProposalAddedTxsIncluded tests that any transactions marked as ADDED +// in the prepare proposal response are included in the block. +func TestPrepareProposalAddedTxsIncluded(t *testing.T) { + const height = 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + state, stateDB, privVals := makeState(t, 1, height) + stateStore := sm.NewStore(stateDB) + + evpool := &mocks.EvidencePool{} + evpool.On("PendingEvidence", mock.Anything).Return([]types.Evidence{}, int64(0)) + + txs := factory.MakeNTxs(height, 10) + mp := &mpmocks.Mempool{} + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs(txs[2:])) + + trs := txsToTxRecords(types.Txs(txs)) + trs[0].Action = abci.TxRecord_ADDED + trs[1].Action = abci.TxRecord_ADDED + + app := abcimocks.NewApplication(t) + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ + TxRecords: trs, + }, nil) + + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + blockExec := sm.NewBlockExecutor( + stateStore, + logger, + proxyApp, + mp, + evpool, + nil, + eventBus, + sm.NopMetrics(), + ) + proposerProTxHash, _ := state.Validators.GetByIndex(0) + commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, types.StateID{}, state.Validators, privVals) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, proposerProTxHash, 0, nil) + require.NoError(t, err) + + require.Equal(t, txs[0], block.Data.Txs[0]) + require.Equal(t, txs[1], block.Data.Txs[1]) + + mp.AssertExpectations(t) +} + +// TestPrepareProposalReorderTxs tests that CreateBlock produces a block with transactions +// in the order matching the order they are returned from PrepareProposal. +func TestPrepareProposalReorderTxs(t *testing.T) { + const height = 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + state, stateDB, privVals := makeState(t, 1, height) + stateStore := sm.NewStore(stateDB) + + evpool := &mocks.EvidencePool{} + evpool.On("PendingEvidence", mock.Anything).Return([]types.Evidence{}, int64(0)) + + txs := factory.MakeNTxs(height, 10) + mp := &mpmocks.Mempool{} + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs(txs)) + + trs := txsToTxRecords(types.Txs(txs)) + trs = trs[2:] + trs = append(trs[len(trs)/2:], trs[:len(trs)/2]...) + + app := abcimocks.NewApplication(t) + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ + TxRecords: trs, + }, nil) + + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + blockExec := sm.NewBlockExecutor( + stateStore, + logger, + proxyApp, + mp, + evpool, + nil, + eventBus, + sm.NopMetrics(), + ) + proposerProTxHash, _ := state.Validators.GetByIndex(0) + commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, types.StateID{}, state.Validators, privVals) + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, proposerProTxHash, 0, nil) + require.NoError(t, err) + for i, tx := range block.Data.Txs { + require.Equal(t, types.Tx(trs[i].Tx), tx) + } + + mp.AssertExpectations(t) + +} + +// TestPrepareProposalErrorOnTooManyTxs tests that the block creation logic returns +// an error if the ResponsePrepareProposal returned from the application is invalid. +func TestPrepareProposalErrorOnTooManyTxs(t *testing.T) { + const height = 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + state, stateDB, privVals := makeState(t, 1, height) + // limit max block size + state.ConsensusParams.Block.MaxBytes = 60 * 1024 + stateStore := sm.NewStore(stateDB) + + evpool := &mocks.EvidencePool{} + evpool.On("PendingEvidence", mock.Anything).Return([]types.Evidence{}, int64(0)) + + const nValidators = 1 + var bytesPerTx int64 = 3 + maxDataBytes := types.MaxDataBytes(state.ConsensusParams.Block.MaxBytes, crypto.BLS12381, 0, nValidators) + txs := factory.MakeNTxs(height, maxDataBytes/bytesPerTx+2) // +2 so that tx don't fit + mp := &mpmocks.Mempool{} + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs(txs)) + + trs := txsToTxRecords(types.Txs(txs)) + + app := abcimocks.NewApplication(t) + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ + TxRecords: trs, + }, nil) + + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + blockExec := sm.NewBlockExecutor( + stateStore, + logger, + proxyApp, + mp, + evpool, + nil, + eventBus, + sm.NopMetrics(), + ) + proposerProTxHash, _ := state.Validators.GetByIndex(0) + commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, types.StateID{}, state.Validators, privVals) + + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, proposerProTxHash, 0, nil) + require.ErrorContains(t, err, "transaction data size exceeds maximum") + require.Nil(t, block, "") + + mp.AssertExpectations(t) +} + +// TestPrepareProposalErrorOnPrepareProposalError tests when the client returns an error +// upon calling PrepareProposal on it. +func TestPrepareProposalErrorOnPrepareProposalError(t *testing.T) { + const height = 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + state, stateDB, privVals := makeState(t, 1, height) + stateStore := sm.NewStore(stateDB) + + evpool := &mocks.EvidencePool{} + evpool.On("PendingEvidence", mock.Anything).Return([]types.Evidence{}, int64(0)) + + txs := factory.MakeNTxs(height, 10) + mp := &mpmocks.Mempool{} + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(types.Txs(txs)) + + cm := &abciclientmocks.Client{} + cm.On("IsRunning").Return(true) + cm.On("Error").Return(nil) + cm.On("Start", mock.Anything).Return(nil).Once() + cm.On("Wait").Return(nil).Once() + cm.On("PrepareProposal", mock.Anything, mock.Anything).Return(nil, errors.New("an injected error")).Once() + + proxyApp := proxy.New(cm, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + blockExec := sm.NewBlockExecutor( + stateStore, + logger, + proxyApp, + mp, + evpool, + nil, + eventBus, + sm.NopMetrics(), + ) + proTxHash, _ := state.Validators.GetByIndex(0) + commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, types.StateID{}, state.Validators, privVals) + + block, err := blockExec.CreateProposalBlock(ctx, height, state, commit, proTxHash, 0, nil) + require.Nil(t, block) + require.ErrorContains(t, err, "an injected error") + + mp.AssertExpectations(t) +} + +func txsToTxRecords(txs []types.Tx) []*abci.TxRecord { + trs := make([]*abci.TxRecord, len(txs)) + for i, tx := range txs { + trs[i] = &abci.TxRecord{ + Action: abci.TxRecord_UNMODIFIED, + Tx: tx, + } + } + return trs +} diff --git a/internal/state/export_test.go b/internal/state/export_test.go index 8fee0fb84a..5f41108653 100644 --- a/internal/state/export_test.go +++ b/internal/state/export_test.go @@ -1,41 +1,10 @@ package state import ( - "github.com/tendermint/tendermint/crypto" - abci "github.com/tendermint/tendermint/abci/types" - tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/types" ) -// -// TODO: Remove dependence on all entities exported from this file. -// -// Every entity exported here is dependent on a private entity from the `state` -// package. Currently, these functions are only made available to tests in the -// `state_test` package, but we should not be relying on them for our testing. -// Instead, we should be exclusively relying on exported entities for our -// testing, and should be refactoring exported entities to make them more -// easily testable from outside of the package. -// - -// UpdateState is an alias for updateState exported from execution.go, -// exclusively and explicitly for testing. -func UpdateState( - state State, - nodeProTxHash crypto.ProTxHash, - blockID types.BlockID, - header *types.Header, - abciResponses *tmstate.ABCIResponses, - validatorUpdates []*types.Validator, - newThresholdPublicKey crypto.PubKey, - quorumHash crypto.QuorumHash, -) (State, error) { - return updateState( - state, nodeProTxHash, blockID, header, abciResponses, validatorUpdates, newThresholdPublicKey, quorumHash, - ) -} - // ValidateValidatorUpdates is an alias for validateValidatorUpdates exported // from execution.go, exclusively and explicitly for testing. func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params types.ValidatorParams) error { diff --git a/internal/state/helpers_test.go b/internal/state/helpers_test.go index 828d5d6e3e..0f04ebc53c 100644 --- a/internal/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -2,19 +2,20 @@ package state_test import ( + "context" "fmt" + "testing" + "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" - abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" - "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/crypto/encoding" sm "github.com/tendermint/tendermint/internal/state" sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/test/factory" - tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -26,13 +27,9 @@ type paramsChangeTestCase struct { params types.ConsensusParams } -func newTestApp() proxy.AppConns { - app := &testApp{} - cc := abciclient.NewLocalCreator(app) - return proxy.NewAppConns(cc, proxy.NopMetrics()) -} - func makeAndCommitGoodBlock( + ctx context.Context, + t *testing.T, state sm.State, nodeProTxHash crypto.ProTxHash, height int64, @@ -40,67 +37,77 @@ func makeAndCommitGoodBlock( proposerProTxHash crypto.ProTxHash, blockExec *sm.BlockExecutor, privVals map[string]types.PrivValidator, - evidence []types.Evidence, proposedAppVersion uint64, -) (sm.State, types.BlockID, *types.Commit, error) { + evidence []types.Evidence, + proposedAppVersion uint64, +) (sm.State, types.BlockID, *types.Commit) { + t.Helper() // A good block passes - state, blockID, err := makeAndApplyGoodBlock(state, nodeProTxHash, height, lastCommit, proposerProTxHash, blockExec, evidence, proposedAppVersion) - if err != nil { - return state, types.BlockID{}, nil, err - } + state, blockID := makeAndApplyGoodBlock(ctx, t, state, nodeProTxHash, height, lastCommit, proposerProTxHash, blockExec, evidence, proposedAppVersion) // Simulate a lastCommit for this block from all validators for the next height - commit, err := makeValidCommit(height, blockID, state.LastStateID, state.Validators, privVals) - if err != nil { - return state, types.BlockID{}, nil, err - } - return state, blockID, commit, nil + commit, _ := makeValidCommit(ctx, t, height, blockID, state.LastStateID, state.Validators, privVals) + + return state, blockID, commit } -func makeAndApplyGoodBlock(state sm.State, nodeProTxHash crypto.ProTxHash, height int64, lastCommit *types.Commit, proposerProTxHash []byte, - blockExec *sm.BlockExecutor, evidence []types.Evidence, proposedAppVersion uint64) (sm.State, types.BlockID, error) { - block, _ := state.MakeBlock(height, nil, factory.MakeTenTxs(height), lastCommit, evidence, proposerProTxHash, proposedAppVersion) - if err := blockExec.ValidateBlock(state, block); err != nil { - return state, types.BlockID{}, err - } +func makeAndApplyGoodBlock( + ctx context.Context, + t *testing.T, + state sm.State, + nodeProTxHash crypto.ProTxHash, + height int64, + lastCommit *types.Commit, + proposerProTxHash []byte, + blockExec *sm.BlockExecutor, + evidence []types.Evidence, + proposedAppVersion uint64, +) (sm.State, types.BlockID) { + t.Helper() + block := state.MakeBlock(height, nil, factory.MakeNTxs(height, 10), lastCommit, evidence, proposerProTxHash, proposedAppVersion) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + + require.NoError(t, blockExec.ValidateBlock(ctx, state, block)) blockID := types.BlockID{Hash: block.Hash(), - PartSetHeader: types.PartSetHeader{Total: 3, Hash: tmrand.Bytes(32)}} - state, err := blockExec.ApplyBlock(state, nodeProTxHash, blockID, block) - if err != nil { - return state, types.BlockID{}, err - } + PartSetHeader: partSet.Header()} + state, err = blockExec.ApplyBlock(ctx, state, nodeProTxHash, blockID, block) + require.NoError(t, err) - return state, blockID, nil + return state, blockID } func makeValidCommit( + ctx context.Context, + t *testing.T, height int64, blockID types.BlockID, stateID types.StateID, vals *types.ValidatorSet, privVals map[string]types.PrivValidator, -) (*types.Commit, error) { - var blockSigs [][]byte - var stateSigs [][]byte - var blsIDs [][]byte +) (*types.Commit, []*types.Vote) { + t.Helper() + var ( + blockSigs [][]byte + stateSigs [][]byte + blsIDs [][]byte + ) + votes := make([]*types.Vote, vals.Size()) for i := 0; i < vals.Size(); i++ { _, val := vals.GetByIndex(int32(i)) - vote, err := factory.MakeVote(privVals[val.ProTxHash.String()], vals, chainID, int32(i), height, 0, 2, blockID, stateID) - if err != nil { - return nil, err - } + vote, err := factory.MakeVote(ctx, privVals[val.ProTxHash.String()], vals, chainID, int32(i), height, 0, 2, blockID, stateID) + require.NoError(t, err) blockSigs = append(blockSigs, vote.BlockSignature) stateSigs = append(stateSigs, vote.StateSignature) blsIDs = append(blsIDs, vote.ValidatorProTxHash) + votes[i] = vote } - thresholdBlockSig, _ := bls12381.RecoverThresholdSignatureFromShares(blockSigs, blsIDs) thresholdStateSig, _ := bls12381.RecoverThresholdSignatureFromShares(stateSigs, blsIDs) - - return types.NewCommit(height, 0, blockID, stateID, vals.QuorumHash, thresholdBlockSig, thresholdStateSig), nil + return types.NewCommit(height, 0, blockID, stateID, vals.QuorumHash, thresholdBlockSig, thresholdStateSig), votes } -func makeState(nVals int, height int64) (sm.State, dbm.DB, map[string]types.PrivValidator) { +func makeState(t *testing.T, nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValidator) { privValsByProTxHash := make(map[string]types.PrivValidator, nVals) vals, privVals := types.RandValidatorSet(nVals) genVals := types.MakeGenesisValsFromValidatorSet(vals) @@ -119,53 +126,85 @@ func makeState(nVals int, height int64) (sm.State, dbm.DB, map[string]types.Priv stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - if err := stateStore.Save(s); err != nil { - panic(err) - } + require.NoError(t, stateStore.Save(s)) - for i := int64(1); i < height; i++ { + for i := 1; i < height; i++ { s.LastBlockHeight++ s.LastValidators = s.Validators.Copy() - if err := stateStore.Save(s); err != nil { - panic(err) - } + + require.NoError(t, stateStore.Save(s)) } return s, stateDB, privValsByProTxHash } -func makeHeaderPartsResponsesValKeysRegenerate(state sm.State, regenerate bool) (types.Header, *types.CoreChainLock, types.BlockID, *tmstate.ABCIResponses, error) { +func makeHeaderPartsResponsesValPowerChange( + t *testing.T, + state sm.State, + power int64, +) (types.Header, *types.CoreChainLock, types.BlockID, *tmstate.ABCIResponses) { + t.Helper() + + block, err := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit), nil, 0) + require.NoError(t, err) + + abciResponses := &tmstate.ABCIResponses{} + + abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{} + // If the pubkey is new, remove the old and add the new. + _, val := state.NextValidators.GetByIndex(0) + if val.VotingPower != power { + vPbPk, err := encoding.PubKeyToProto(val.PubKey) + require.NoError(t, err) + thresholdPubKey, err := encoding.PubKeyToProto(state.NextValidators.ThresholdPublicKey) + require.NoError(t, err) + + abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{ + ValidatorSetUpdate: &abci.ValidatorSetUpdate{ + ValidatorUpdates: []abci.ValidatorUpdate{ + {PubKey: &vPbPk, Power: power}, + }, + ThresholdPublicKey: thresholdPubKey, + QuorumHash: state.NextValidators.QuorumHash, + }, + } + } + + return block.Header, block.CoreChainLock, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses +} + +func makeHeaderPartsResponsesValKeysRegenerate(t *testing.T, state sm.State, regenerate bool) (types.Header, *types.CoreChainLock, types.BlockID, *tmstate.ABCIResponses) { block, err := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit), nil, 0) if err != nil { - return types.Header{}, nil, types.BlockID{}, nil, err + t.Error(err) } abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorSetUpdate: nil}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ValidatorSetUpdate: nil}, } if regenerate == true { proTxHashes := state.Validators.GetProTxHashes() valUpdates := types.ValidatorUpdatesRegenerateOnProTxHashes(proTxHashes) - abciResponses.EndBlock = &abci.ResponseEndBlock{ + abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{ ValidatorSetUpdate: &valUpdates, } } - - return block.Header, block.CoreChainLock, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses, nil + return block.Header, block.CoreChainLock, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses } func makeHeaderPartsResponsesParams( + t *testing.T, state sm.State, - params types.ConsensusParams, -) (types.Header, *types.CoreChainLock, types.BlockID, *tmstate.ABCIResponses, error) { + params *types.ConsensusParams, +) (types.Header, *types.CoreChainLock, types.BlockID, *tmstate.ABCIResponses) { + t.Helper() block, err := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit), nil, 0) + require.NoError(t, err) pbParams := params.ToProto() abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ConsensusParamUpdates: &pbParams}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ConsensusParamUpdates: &pbParams}, } - return block.Header, block.CoreChainLock, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses, err + return block.Header, block.CoreChainLock, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses } func randomGenesisDoc() *types.GenesisDoc { @@ -203,9 +242,14 @@ func makeRandomStateFromValidatorSet( InitialHeight: 1, } } - -func makeRandomStateFromConsensusParams(consensusParams *types.ConsensusParams, - height, lastHeightConsensusParamsChanged int64) sm.State { +func makeRandomStateFromConsensusParams( + ctx context.Context, + t *testing.T, + consensusParams *types.ConsensusParams, + height, + lastHeightConsensusParamsChanged int64, +) sm.State { + t.Helper() valSet, _ := types.RandValidatorSet(1) return sm.State{ LastBlockHeight: height - 1, @@ -224,41 +268,57 @@ func makeRandomStateFromConsensusParams(consensusParams *types.ConsensusParams, type testApp struct { abci.BaseApplication - ByzantineValidators []abci.Evidence + ByzantineValidators []abci.Misbehavior ValidatorSetUpdate *abci.ValidatorSetUpdate } var _ abci.Application = (*testApp)(nil) -func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) { - return abci.ResponseInfo{} +func (app *testApp) Info(_ context.Context, req *abci.RequestInfo) (*abci.ResponseInfo, error) { + return &abci.ResponseInfo{}, nil } -func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock { +func (app *testApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { app.ByzantineValidators = req.ByzantineValidators - return abci.ResponseBeginBlock{} -} -func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { - return abci.ResponseEndBlock{ + resTxs := make([]*abci.ExecTxResult, len(req.Txs)) + for i, tx := range req.Txs { + if len(tx) > 0 { + resTxs[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK} + } else { + resTxs[i] = &abci.ExecTxResult{Code: abci.CodeTypeOK + 10} // error + } + } + + return &abci.ResponseFinalizeBlock{ ValidatorSetUpdate: app.ValidatorSetUpdate, ConsensusParamUpdates: &tmproto.ConsensusParams{ Version: &tmproto.VersionParams{ - AppVersion: 1}}} + AppVersion: 1, + }, + }, + Events: []abci.Event{}, + TxResults: resTxs, + }, nil } -func (app *testApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { - return abci.ResponseDeliverTx{Events: []abci.Event{}} +func (app *testApp) CheckTx(_ context.Context, req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { + return &abci.ResponseCheckTx{}, nil } -func (app *testApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { - return abci.ResponseCheckTx{} +func (app *testApp) Commit(context.Context) (*abci.ResponseCommit, error) { + return &abci.ResponseCommit{RetainHeight: 1}, nil } -func (app *testApp) Commit() abci.ResponseCommit { - return abci.ResponseCommit{RetainHeight: 1} +func (app *testApp) Query(_ context.Context, req *abci.RequestQuery) (*abci.ResponseQuery, error) { + return &abci.ResponseQuery{}, nil } -func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) { - return +func (app *testApp) ProcessProposal(_ context.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { + for _, tx := range req.Txs { + if len(tx) == 0 { + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil + } + } + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil } diff --git a/internal/state/indexer/block/kv/kv.go b/internal/state/indexer/block/kv/kv.go index 26fdcf1fc9..5356b4c07b 100644 --- a/internal/state/indexer/block/kv/kv.go +++ b/internal/state/indexer/block/kv/kv.go @@ -12,15 +12,15 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/libs/pubsub/query/syntax" "github.com/tendermint/tendermint/types" ) var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) -// BlockerIndexer implements a block indexer, indexing BeginBlock and EndBlock +// BlockerIndexer implements a block indexer, indexing FinalizeBlock // events with an underlying KV store. Block events are indexed by their height, // such that matching search criteria returns the respective block height(s). type BlockerIndexer struct { @@ -44,12 +44,11 @@ func (idx *BlockerIndexer) Has(height int64) (bool, error) { return idx.store.Has(key) } -// Index indexes BeginBlock and EndBlock events for a given block by its height. +// Index indexes FinalizeBlock events for a given block by its height. // The following is indexed: // // primary key: encode(block.height | height) => encode(height) -// BeginBlock events: encode(eventType.eventAttr|eventValue|height|begin_block) => encode(height) -// EndBlock events: encode(eventType.eventAttr|eventValue|height|end_block) => encode(height) +// FinalizeBlock events: encode(eventType.eventAttr|eventValue|height|finalize_block) => encode(height) func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockHeader) error { batch := idx.store.NewBatch() defer batch.Close() @@ -65,24 +64,19 @@ func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockHeader) error { return err } - // 2. index BeginBlock events - if err := idx.indexEvents(batch, bh.ResultBeginBlock.Events, "begin_block", height); err != nil { - return fmt.Errorf("failed to index BeginBlock events: %w", err) - } - - // 3. index EndBlock events - if err := idx.indexEvents(batch, bh.ResultEndBlock.Events, "end_block", height); err != nil { - return fmt.Errorf("failed to index EndBlock events: %w", err) + // 2. index FinalizeBlock events + if err := idx.indexEvents(batch, bh.ResultFinalizeBlock.Events, types.EventTypeFinalizeBlock, height); err != nil { + return fmt.Errorf("failed to index FinalizeBlock events: %w", err) } return batch.WriteSync() } -// Search performs a query for block heights that match a given BeginBlock -// and Endblock event search criteria. The given query can match against zero, -// one or more block heights. In the case of height queries, i.e. block.height=H, -// if the height is indexed, that height alone will be returned. An error and -// nil slice is returned. Otherwise, a non-nil slice and nil error is returned. +// Search performs a query for block heights that match a given FinalizeBlock +// The given query can match against zero or more block heights. In the case +// of height queries, i.e. block.height=H, if the height is indexed, that height +// alone will be returned. An error and nil slice is returned. Otherwise, a +// non-nil slice and nil error is returned. func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, error) { results := make([]int64, 0) select { diff --git a/internal/state/indexer/block/kv/kv_test.go b/internal/state/indexer/block/kv/kv_test.go index 650723dbf9..0bca43848b 100644 --- a/internal/state/indexer/block/kv/kv_test.go +++ b/internal/state/indexer/block/kv/kv_test.go @@ -9,8 +9,8 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" blockidxkv "github.com/tendermint/tendermint/internal/state/indexer/block/kv" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) @@ -20,10 +20,10 @@ func TestBlockIndexer(t *testing.T) { require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{ Header: types.Header{Height: 1}, - ResultBeginBlock: abci.ResponseBeginBlock{ + ResultFinalizeBlock: abci.ResponseFinalizeBlock{ Events: []abci.Event{ { - Type: "begin_event", + Type: "finalize_event1", Attributes: []abci.EventAttribute{ { Key: "proposer", @@ -32,12 +32,8 @@ func TestBlockIndexer(t *testing.T) { }, }, }, - }, - }, - ResultEndBlock: abci.ResponseEndBlock{ - Events: []abci.Event{ { - Type: "end_event", + Type: "finalize_event2", Attributes: []abci.EventAttribute{ { Key: "foo", @@ -55,13 +51,12 @@ func TestBlockIndexer(t *testing.T) { if i%2 == 0 { index = true } - require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{ Header: types.Header{Height: int64(i)}, - ResultBeginBlock: abci.ResponseBeginBlock{ + ResultFinalizeBlock: abci.ResponseFinalizeBlock{ Events: []abci.Event{ { - Type: "begin_event", + Type: "finalize_event1", Attributes: []abci.EventAttribute{ { Key: "proposer", @@ -70,12 +65,8 @@ func TestBlockIndexer(t *testing.T) { }, }, }, - }, - }, - ResultEndBlock: abci.ResponseEndBlock{ - Events: []abci.Event{ { - Type: "end_event", + Type: "finalize_event2", Attributes: []abci.EventAttribute{ { Key: "foo", @@ -101,32 +92,32 @@ func TestBlockIndexer(t *testing.T) { q: query.MustCompile(`block.height = 5`), results: []int64{5}, }, - "begin_event.key1 = 'value1'": { - q: query.MustCompile(`begin_event.key1 = 'value1'`), + "finalize_event.key1 = 'value1'": { + q: query.MustCompile(`finalize_event1.key1 = 'value1'`), results: []int64{}, }, - "begin_event.proposer = 'FCAA001'": { - q: query.MustCompile(`begin_event.proposer = 'FCAA001'`), + "finalize_event.proposer = 'FCAA001'": { + q: query.MustCompile(`finalize_event1.proposer = 'FCAA001'`), results: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, }, - "end_event.foo <= 5": { - q: query.MustCompile(`end_event.foo <= 5`), + "finalize_event.foo <= 5": { + q: query.MustCompile(`finalize_event2.foo <= 5`), results: []int64{2, 4}, }, - "end_event.foo >= 100": { - q: query.MustCompile(`end_event.foo >= 100`), + "finalize_event.foo >= 100": { + q: query.MustCompile(`finalize_event2.foo >= 100`), results: []int64{1}, }, - "block.height > 2 AND end_event.foo <= 8": { - q: query.MustCompile(`block.height > 2 AND end_event.foo <= 8`), + "block.height > 2 AND finalize_event2.foo <= 8": { + q: query.MustCompile(`block.height > 2 AND finalize_event2.foo <= 8`), results: []int64{4, 6, 8}, }, - "begin_event.proposer CONTAINS 'FFFFFFF'": { - q: query.MustCompile(`begin_event.proposer CONTAINS 'FFFFFFF'`), + "finalize_event.proposer CONTAINS 'FFFFFFF'": { + q: query.MustCompile(`finalize_event1.proposer CONTAINS 'FFFFFFF'`), results: []int64{}, }, - "begin_event.proposer CONTAINS 'FCAA001'": { - q: query.MustCompile(`begin_event.proposer CONTAINS 'FCAA001'`), + "finalize_event.proposer CONTAINS 'FCAA001'": { + q: query.MustCompile(`finalize_event1.proposer CONTAINS 'FCAA001'`), results: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, }, } @@ -134,7 +125,10 @@ func TestBlockIndexer(t *testing.T) { for name, tc := range testCases { tc := tc t.Run(name, func(t *testing.T) { - results, err := indexer.Search(context.Background(), tc.q) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + results, err := indexer.Search(ctx, tc.q) require.NoError(t, err) require.Equal(t, tc.results, results) }) diff --git a/internal/state/indexer/block/kv/util.go b/internal/state/indexer/block/kv/util.go index fff88046cf..fd68462739 100644 --- a/internal/state/indexer/block/kv/util.go +++ b/internal/state/indexer/block/kv/util.go @@ -6,7 +6,8 @@ import ( "strconv" "github.com/google/orderedcode" - "github.com/tendermint/tendermint/libs/pubsub/query/syntax" + + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/block/null/null.go b/internal/state/indexer/block/null/null.go index 9b28d93bba..7d5453848e 100644 --- a/internal/state/indexer/block/null/null.go +++ b/internal/state/indexer/block/null/null.go @@ -4,8 +4,8 @@ import ( "context" "errors" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/eventsink.go b/internal/state/indexer/eventsink.go index dba50b6af7..9b4d6f5614 100644 --- a/internal/state/indexer/eventsink.go +++ b/internal/state/indexer/eventsink.go @@ -4,7 +4,7 @@ import ( "context" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/indexer.go b/internal/state/indexer/indexer.go index 24dc62d70e..7ff6733db3 100644 --- a/internal/state/indexer/indexer.go +++ b/internal/state/indexer/indexer.go @@ -5,7 +5,7 @@ import ( "errors" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/types" ) @@ -30,37 +30,37 @@ type BlockIndexer interface { // upon database query failure. Has(height int64) (bool, error) - // Index indexes BeginBlock and EndBlock events for a given block by its height. + // Index indexes FinalizeBlock events for a given block by its height. Index(types.EventDataNewBlockHeader) error - // Search performs a query for block heights that match a given BeginBlock - // and Endblock event search criteria. + // Search performs a query for block heights that match a given FinalizeBlock + // event search criteria. Search(ctx context.Context, q *query.Query) ([]int64, error) } // Batch groups together multiple Index operations to be performed at the same time. // NOTE: Batch is NOT thread-safe and must not be modified after starting its execution. type Batch struct { - Ops []*abci.TxResult + Ops []*abci.TxResult + Pending int64 } // NewBatch creates a new Batch. func NewBatch(n int64) *Batch { - return &Batch{ - Ops: make([]*abci.TxResult, n), - } + return &Batch{Ops: make([]*abci.TxResult, n), Pending: n} } // Add or update an entry for the given result.Index. func (b *Batch) Add(result *abci.TxResult) error { - b.Ops[result.Index] = result + if b.Ops[result.Index] == nil { + b.Pending-- + b.Ops[result.Index] = result + } return nil } // Size returns the total number of operations inside the batch. -func (b *Batch) Size() int { - return len(b.Ops) -} +func (b *Batch) Size() int { return len(b.Ops) } // ErrorEmptyHash indicates empty hash var ErrorEmptyHash = errors.New("transaction hash cannot be empty") diff --git a/internal/state/indexer/indexer_service.go b/internal/state/indexer/indexer_service.go index c00b1e54bf..e73e4a3ba2 100644 --- a/internal/state/indexer/indexer_service.go +++ b/internal/state/indexer/indexer_service.go @@ -4,30 +4,34 @@ import ( "context" "time" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/pubsub" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/types" ) -// XXX/TODO: These types should be moved to the indexer package. - -const ( - subscriber = "IndexerService" -) - // Service connects event bus, transaction and block indexers together in // order to index transactions and blocks coming from the event bus. type Service struct { service.BaseService + logger log.Logger eventSinks []EventSink - eventBus *types.EventBus + eventBus *eventbus.EventBus metrics *Metrics + + currentBlock struct { + header types.EventDataNewBlockHeader + height int64 + batch *Batch + } } // NewService constructs a new indexer service from the given arguments. func NewService(args ServiceArgs) *Service { is := &Service{ + logger: args.Logger, eventSinks: args.Sinks, eventBus: args.EventBus, metrics: args.Metrics, @@ -39,101 +43,99 @@ func NewService(args ServiceArgs) *Service { return is } -// NewIndexerService returns a new service instance. -// Deprecated: Use NewService instead. -func NewIndexerService(es []EventSink, eventBus *types.EventBus) *Service { - return NewService(ServiceArgs{ - Sinks: es, - EventBus: eventBus, - }) -} - -// OnStart implements service.Service by subscribing for all transactions -// and indexing them by events. -func (is *Service) OnStart() error { - // Use SubscribeUnbuffered here to ensure both subscriptions does not get - // canceled due to not pulling messages fast enough. Cause this might - // sometimes happen when there are no other subscribers. - blockHeadersSub, err := is.eventBus.SubscribeUnbuffered( - context.Background(), - subscriber, - types.EventQueryNewBlockHeader) - if err != nil { - return err +// publish publishes a pubsub message to the service. The service blocks until +// the message has been fully processed. +func (is *Service) publish(msg pubsub.Message) error { + // Indexing has three states. Initially, no block is in progress (WAIT) and + // we expect a block header. Upon seeing a header, we are waiting for zero + // or more transactions (GATHER). Once all the expected transactions have + // been delivered (in some order), we are ready to index. After indexing a + // block, we revert to the WAIT state for the next block. + + if is.currentBlock.batch == nil { + // WAIT: Start a new block. + hdr := msg.Data().(types.EventDataNewBlockHeader) + is.currentBlock.header = hdr + is.currentBlock.height = hdr.Header.Height + is.currentBlock.batch = NewBatch(hdr.NumTxs) + + if hdr.NumTxs != 0 { + return nil + } + // If the block does not expect any transactions, fall through and index + // it immediately. This shouldn't happen, but this check ensures we do + // not get stuck if it does. } - txsSub, err := is.eventBus.SubscribeUnbuffered(context.Background(), subscriber, types.EventQueryTx) - if err != nil { - return err - } + curr := is.currentBlock.batch + if curr.Pending != 0 { + // GATHER: Accumulate a transaction into the current block's batch. + txResult := msg.Data().(types.EventDataTx).TxResult + if err := curr.Add(&txResult); err != nil { + is.logger.Error("failed to add tx to batch", + "height", is.currentBlock.height, "index", txResult.Index, "err", err) + } - go func() { - for { - select { - case <-blockHeadersSub.Canceled(): - return - case msg := <-blockHeadersSub.Out(): - - eventDataHeader := msg.Data().(types.EventDataNewBlockHeader) - height := eventDataHeader.Header.Height - batch := NewBatch(eventDataHeader.NumTxs) - - for i := int64(0); i < eventDataHeader.NumTxs; i++ { - msg2 := <-txsSub.Out() - txResult := msg2.Data().(types.EventDataTx).TxResult - - if err = batch.Add(&txResult); err != nil { - is.Logger.Error( - "failed to add tx to batch", - "height", height, - "index", txResult.Index, - "err", err, - ) - } - } + // This may have been the last transaction in the batch, so fall through + // to check whether it is time to index. + } - if !IndexingEnabled(is.eventSinks) { - continue - } + if curr.Pending == 0 { + // INDEX: We have all the transactions we expect for the current block. + for _, sink := range is.eventSinks { + start := time.Now() + if err := sink.IndexBlockEvents(is.currentBlock.header); err != nil { + is.logger.Error("failed to index block header", + "height", is.currentBlock.height, "err", err) + } else { + is.metrics.BlockEventsSeconds.Observe(time.Since(start).Seconds()) + is.metrics.BlocksIndexed.Add(1) + is.logger.Debug("indexed block", + "height", is.currentBlock.height, "sink", sink.Type()) + } - for _, sink := range is.eventSinks { - start := time.Now() - if err := sink.IndexBlockEvents(eventDataHeader); err != nil { - is.Logger.Error("failed to index block", "height", height, "err", err) - } else { - is.metrics.BlockEventsSeconds.Observe(time.Since(start).Seconds()) - is.metrics.BlocksIndexed.Add(1) - is.Logger.Debug("indexed block", "height", height, "sink", sink.Type()) - } - - if len(batch.Ops) > 0 { - start := time.Now() - err := sink.IndexTxEvents(batch.Ops) - if err != nil { - is.Logger.Error("failed to index block txs", "height", height, "err", err) - } else { - is.metrics.TxEventsSeconds.Observe(time.Since(start).Seconds()) - is.metrics.TransactionsIndexed.Add(float64(len(batch.Ops))) - is.Logger.Debug("indexed txs", "height", height, "sink", sink.Type()) - } - } + if curr.Size() != 0 { + start := time.Now() + err := sink.IndexTxEvents(curr.Ops) + if err != nil { + is.logger.Error("failed to index block txs", + "height", is.currentBlock.height, "err", err) + } else { + is.metrics.TxEventsSeconds.Observe(time.Since(start).Seconds()) + is.metrics.TransactionsIndexed.Add(float64(curr.Size())) + is.logger.Debug("indexed txs", + "height", is.currentBlock.height, "sink", sink.Type()) } } } - }() + is.currentBlock.batch = nil // return to the WAIT state for the next block + } + return nil } -// OnStop implements service.Service by unsubscribing from all transactions and -// close the eventsink. -func (is *Service) OnStop() { - if is.eventBus.IsRunning() { - _ = is.eventBus.UnsubscribeAll(context.Background(), subscriber) +// OnStart implements part of service.Service. It registers an observer for the +// indexer if the underlying event sinks support indexing. +// +// TODO(creachadair): Can we get rid of the "enabled" check? +func (is *Service) OnStart(ctx context.Context) error { + // If the event sinks support indexing, register an observer to capture + // block header data for the indexer. + if IndexingEnabled(is.eventSinks) { + err := is.eventBus.Observe(ctx, is.publish, + types.EventQueryNewBlockHeader, types.EventQueryTx) + if err != nil { + return err + } } + return nil +} +// OnStop implements service.Service by closing the event sinks. +func (is *Service) OnStop() { for _, sink := range is.eventSinks { if err := sink.Stop(); err != nil { - is.Logger.Error("failed to close eventsink", "eventsink", sink.Type(), "err", err) + is.logger.Error("failed to close eventsink", "eventsink", sink.Type(), "err", err) } } } @@ -141,7 +143,7 @@ func (is *Service) OnStop() { // ServiceArgs are arguments for constructing a new indexer service. type ServiceArgs struct { Sinks []EventSink - EventBus *types.EventBus + EventBus *eventbus.EventBus Metrics *Metrics Logger log.Logger } diff --git a/internal/state/indexer/indexer_service_test.go b/internal/state/indexer/indexer_service_test.go index d9f29b6773..6126ae2595 100644 --- a/internal/state/indexer/indexer_service_test.go +++ b/internal/state/indexer/indexer_service_test.go @@ -1,24 +1,25 @@ package indexer_test import ( + "context" "database/sql" "fmt" - "io/ioutil" "os" "testing" "time" "github.com/adlio/schema" - dockertest "github.com/ory/dockertest" + "github.com/ory/dockertest" "github.com/ory/dockertest/docker" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - indexer "github.com/tendermint/tendermint/internal/state/indexer" - kv "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" - psql "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" + "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" tmlog "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" @@ -39,38 +40,34 @@ var ( ) func TestIndexerServiceIndexesBlocks(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := tmlog.NewNopLogger() // event bus - eventBus := types.NewEventBus() - eventBus.SetLogger(tmlog.TestingLogger()) - err := eventBus.Start() + eventBus := eventbus.NewDefault(logger) + err := eventBus.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(eventBus.Wait) assert.False(t, indexer.KVSinkEnabled([]indexer.EventSink{})) assert.False(t, indexer.IndexingEnabled([]indexer.EventSink{})) // event sink setup - pool, err := setupDB(t) - assert.Nil(t, err) + pool := setupDB(t) store := dbm.NewMemDB() eventSinks := []indexer.EventSink{kv.NewEventSink(store), pSink} assert.True(t, indexer.KVSinkEnabled(eventSinks)) assert.True(t, indexer.IndexingEnabled(eventSinks)) - service := indexer.NewIndexerService(eventSinks, eventBus) - service.SetLogger(tmlog.TestingLogger()) - err = service.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := service.Stop(); err != nil { - t.Error(err) - } + service := indexer.NewService(indexer.ServiceArgs{ + Logger: logger, + Sinks: eventSinks, + EventBus: eventBus, }) + require.NoError(t, service.Start(ctx)) + t.Cleanup(service.Wait) // publish block with txs err = eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ @@ -82,7 +79,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { Height: 1, Index: uint32(0), Tx: types.Tx("foo"), - Result: abci.ResponseDeliverTx{Code: 0}, + Result: abci.ExecTxResult{Code: 0}, } err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult1}) require.NoError(t, err) @@ -90,7 +87,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { Height: 1, Index: uint32(1), Tx: types.Tx("bar"), - Result: abci.ResponseDeliverTx{Code: 0}, + Result: abci.ExecTxResult{Code: 0}, } err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult2}) require.NoError(t, err) @@ -114,7 +111,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { func readSchema() ([]*schema.Migration, error) { filename := "./sink/psql/schema.sql" - contents, err := ioutil.ReadFile(filename) + contents, err := os.ReadFile(filename) if err != nil { return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err) } @@ -128,17 +125,20 @@ func readSchema() ([]*schema.Migration, error) { func resetDB(t *testing.T) { q := "DROP TABLE IF EXISTS block_events,tx_events,tx_results" _, err := psqldb.Exec(q) - assert.Nil(t, err) + assert.NoError(t, err) q = "DROP TYPE IF EXISTS block_event_type" _, err = psqldb.Exec(q) - assert.Nil(t, err) + assert.NoError(t, err) } -func setupDB(t *testing.T) (*dockertest.Pool, error) { +func setupDB(t *testing.T) *dockertest.Pool { t.Helper() pool, err := dockertest.NewPool(os.Getenv("DOCKER_URL")) - assert.Nil(t, err) + assert.NoError(t, err) + if _, err := pool.Client.Info(); err != nil { + t.Skipf("WARNING: Docker is not available: %v [skipping this test]", err) + } resource, err = pool.RunWithOptions(&dockertest.RunOptions{ Repository: "postgres", @@ -158,7 +158,7 @@ func setupDB(t *testing.T) (*dockertest.Pool, error) { } }) - assert.Nil(t, err) + assert.NoError(t, err) // Set the container to expire in a minute to avoid orphaned containers // hanging around @@ -180,12 +180,13 @@ func setupDB(t *testing.T) (*dockertest.Pool, error) { resetDB(t) sm, err := readSchema() - assert.Nil(t, err) + assert.NoError(t, err) - err = schema.NewMigrator().Apply(psqldb, sm) - assert.Nil(t, err) + migrator := schema.NewMigrator() + err = migrator.Apply(psqldb, sm) + assert.NoError(t, err) - return pool, nil + return pool } func teardown(t *testing.T, pool *dockertest.Pool) error { diff --git a/internal/state/indexer/mocks/event_sink.go b/internal/state/indexer/mocks/event_sink.go index 98b32e9350..decf551abd 100644 --- a/internal/state/indexer/mocks/event_sink.go +++ b/internal/state/indexer/mocks/event_sink.go @@ -8,10 +8,12 @@ import ( mock "github.com/stretchr/testify/mock" indexer "github.com/tendermint/tendermint/internal/state/indexer" - query "github.com/tendermint/tendermint/libs/pubsub/query" + query "github.com/tendermint/tendermint/internal/pubsub/query" tenderminttypes "github.com/tendermint/tendermint/types" + testing "testing" + types "github.com/tendermint/tendermint/abci/types" ) @@ -165,3 +167,13 @@ func (_m *EventSink) Type() indexer.EventSinkType { return r0 } + +// NewEventSink creates a new instance of EventSink. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewEventSink(t testing.TB) *EventSink { + mock := &EventSink{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/state/indexer/query_range.go b/internal/state/indexer/query_range.go index 4c026955da..ff54cd32b8 100644 --- a/internal/state/indexer/query_range.go +++ b/internal/state/indexer/query_range.go @@ -3,7 +3,7 @@ package indexer import ( "time" - "github.com/tendermint/tendermint/libs/pubsub/query/syntax" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" ) // QueryRanges defines a mapping between a composite event key and a QueryRange. diff --git a/internal/state/indexer/sink/kv/kv.go b/internal/state/indexer/sink/kv/kv.go index 4c471b4d33..10282fd340 100644 --- a/internal/state/indexer/sink/kv/kv.go +++ b/internal/state/indexer/sink/kv/kv.go @@ -6,10 +6,10 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" kvb "github.com/tendermint/tendermint/internal/state/indexer/block/kv" kvt "github.com/tendermint/tendermint/internal/state/indexer/tx/kv" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) @@ -18,14 +18,16 @@ var _ indexer.EventSink = (*EventSink)(nil) // The EventSink is an aggregator for redirecting the call path of the tx/block kvIndexer. // For the implementation details please see the kv.go in the indexer/block and indexer/tx folder. type EventSink struct { - txi *kvt.TxIndex - bi *kvb.BlockerIndexer + txi *kvt.TxIndex + bi *kvb.BlockerIndexer + store dbm.DB } func NewEventSink(store dbm.DB) indexer.EventSink { return &EventSink{ - txi: kvt.NewTxIndex(store), - bi: kvb.New(store), + txi: kvt.NewTxIndex(store), + bi: kvb.New(store), + store: store, } } @@ -58,5 +60,5 @@ func (kves *EventSink) HasBlock(h int64) (bool, error) { } func (kves *EventSink) Stop() error { - return nil + return kves.store.Close() } diff --git a/internal/state/indexer/sink/kv/kv_test.go b/internal/state/indexer/sink/kv/kv_test.go index 47b1f53647..d4b110f4ae 100644 --- a/internal/state/indexer/sink/kv/kv_test.go +++ b/internal/state/indexer/sink/kv/kv_test.go @@ -10,10 +10,11 @@ import ( "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" kvtx "github.com/tendermint/tendermint/internal/state/indexer/tx/kv" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) @@ -33,10 +34,10 @@ func TestBlockFuncs(t *testing.T) { require.NoError(t, indexer.IndexBlockEvents(types.EventDataNewBlockHeader{ Header: types.Header{Height: 1}, - ResultBeginBlock: abci.ResponseBeginBlock{ + ResultFinalizeBlock: abci.ResponseFinalizeBlock{ Events: []abci.Event{ { - Type: "begin_event", + Type: "finalize_eventA", Attributes: []abci.EventAttribute{ { Key: "proposer", @@ -45,12 +46,8 @@ func TestBlockFuncs(t *testing.T) { }, }, }, - }, - }, - ResultEndBlock: abci.ResponseEndBlock{ - Events: []abci.Event{ { - Type: "end_event", + Type: "finalize_eventB", Attributes: []abci.EventAttribute{ { Key: "foo", @@ -75,10 +72,10 @@ func TestBlockFuncs(t *testing.T) { require.NoError(t, indexer.IndexBlockEvents(types.EventDataNewBlockHeader{ Header: types.Header{Height: int64(i)}, - ResultBeginBlock: abci.ResponseBeginBlock{ + ResultFinalizeBlock: abci.ResponseFinalizeBlock{ Events: []abci.Event{ { - Type: "begin_event", + Type: "finalize_eventA", Attributes: []abci.EventAttribute{ { Key: "proposer", @@ -87,12 +84,8 @@ func TestBlockFuncs(t *testing.T) { }, }, }, - }, - }, - ResultEndBlock: abci.ResponseEndBlock{ - Events: []abci.Event{ { - Type: "end_event", + Type: "finalize_eventB", Attributes: []abci.EventAttribute{ { Key: "foo", @@ -118,32 +111,32 @@ func TestBlockFuncs(t *testing.T) { q: query.MustCompile(`block.height = 5`), results: []int64{5}, }, - "begin_event.key1 = 'value1'": { - q: query.MustCompile(`begin_event.key1 = 'value1'`), + "finalize_eventA.key1 = 'value1'": { + q: query.MustCompile(`finalize_eventA.key1 = 'value1'`), results: []int64{}, }, - "begin_event.proposer = 'FCAA001'": { - q: query.MustCompile(`begin_event.proposer = 'FCAA001'`), + "finalize_eventA.proposer = 'FCAA001'": { + q: query.MustCompile(`finalize_eventA.proposer = 'FCAA001'`), results: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, }, - "end_event.foo <= 5": { - q: query.MustCompile(`end_event.foo <= 5`), + "finalize_eventB.foo <= 5": { + q: query.MustCompile(`finalize_eventB.foo <= 5`), results: []int64{2, 4}, }, - "end_event.foo >= 100": { - q: query.MustCompile(`end_event.foo >= 100`), + "finalize_eventB.foo >= 100": { + q: query.MustCompile(`finalize_eventB.foo >= 100`), results: []int64{1}, }, - "block.height > 2 AND end_event.foo <= 8": { - q: query.MustCompile(`block.height > 2 AND end_event.foo <= 8`), + "block.height > 2 AND finalize_eventB.foo <= 8": { + q: query.MustCompile(`block.height > 2 AND finalize_eventB.foo <= 8`), results: []int64{4, 6, 8}, }, - "begin_event.proposer CONTAINS 'FFFFFFF'": { - q: query.MustCompile(`begin_event.proposer CONTAINS 'FFFFFFF'`), + "finalize_eventA.proposer CONTAINS 'FFFFFFF'": { + q: query.MustCompile(`finalize_eventA.proposer CONTAINS 'FFFFFFF'`), results: []int64{}, }, - "begin_event.proposer CONTAINS 'FCAA001'": { - q: query.MustCompile(`begin_event.proposer CONTAINS 'FCAA001'`), + "finalize_eventA.proposer CONTAINS 'FCAA001'": { + q: query.MustCompile(`finalize_eventA.proposer CONTAINS 'FCAA001'`), results: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, }, } @@ -151,7 +144,10 @@ func TestBlockFuncs(t *testing.T) { for name, tc := range testCases { tc := tc t.Run(name, func(t *testing.T) { - results, err := indexer.SearchBlockEvents(context.Background(), tc.q) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + results, err := indexer.SearchBlockEvents(ctx, tc.q) require.NoError(t, err) require.Equal(t, tc.results, results) }) @@ -342,7 +338,7 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult { Height: 1, Index: 0, Tx: tx, - Result: abci.ResponseDeliverTx{ + Result: abci.ExecTxResult{ Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", diff --git a/internal/state/indexer/sink/null/null.go b/internal/state/indexer/sink/null/null.go index f58142f21e..c436bdf0f1 100644 --- a/internal/state/indexer/sink/null/null.go +++ b/internal/state/indexer/sink/null/null.go @@ -4,8 +4,8 @@ import ( "context" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/sink/null/null_test.go b/internal/state/indexer/sink/null/null_test.go index 15b77dc55b..9af66027f6 100644 --- a/internal/state/indexer/sink/null/null_test.go +++ b/internal/state/indexer/sink/null/null_test.go @@ -5,27 +5,31 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/types" ) func TestNullEventSink(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nullIndexer := NewEventSink() assert.Nil(t, nullIndexer.IndexTxEvents(nil)) assert.Nil(t, nullIndexer.IndexBlockEvents(types.EventDataNewBlockHeader{})) - val1, err1 := nullIndexer.SearchBlockEvents(context.TODO(), nil) + val1, err1 := nullIndexer.SearchBlockEvents(ctx, nil) assert.Nil(t, val1) - assert.Nil(t, err1) - val2, err2 := nullIndexer.SearchTxEvents(context.TODO(), nil) + assert.NoError(t, err1) + val2, err2 := nullIndexer.SearchTxEvents(ctx, nil) assert.Nil(t, val2) - assert.Nil(t, err2) + assert.NoError(t, err2) val3, err3 := nullIndexer.GetTxByHash(nil) assert.Nil(t, val3) - assert.Nil(t, err3) + assert.NoError(t, err3) val4, err4 := nullIndexer.HasBlock(0) assert.False(t, val4) - assert.Nil(t, err4) + assert.NoError(t, err4) } func TestType(t *testing.T) { diff --git a/internal/state/indexer/sink/psql/psql.go b/internal/state/indexer/sink/psql/psql.go index 18e95b97d1..c063832640 100644 --- a/internal/state/indexer/sink/psql/psql.go +++ b/internal/state/indexer/sink/psql/psql.go @@ -10,9 +10,10 @@ import ( "time" "github.com/gogo/protobuf/proto" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" ) @@ -169,11 +170,8 @@ INSERT INTO `+tableBlocks+` (height, chain_id, created_at) return fmt.Errorf("block meta-events: %w", err) } // Insert all the block events. Order is important here, - if err := insertEvents(dbtx, blockID, 0, h.ResultBeginBlock.Events); err != nil { - return fmt.Errorf("begin-block events: %w", err) - } - if err := insertEvents(dbtx, blockID, 0, h.ResultEndBlock.Events); err != nil { - return fmt.Errorf("end-block events: %w", err) + if err := insertEvents(dbtx, blockID, 0, h.ResultFinalizeBlock.Events); err != nil { + return fmt.Errorf("finalize-block events: %w", err) } return nil }) diff --git a/internal/state/indexer/sink/psql/psql_test.go b/internal/state/indexer/sink/psql/psql_test.go index f19bbfba7f..72d14b5d89 100644 --- a/internal/state/indexer/sink/psql/psql_test.go +++ b/internal/state/indexer/sink/psql/psql_test.go @@ -5,7 +5,6 @@ import ( "database/sql" "flag" "fmt" - "io/ioutil" "log" "os" "os/signal" @@ -18,6 +17,7 @@ import ( "github.com/ory/dockertest/docker" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/types" @@ -46,19 +46,25 @@ const ( dbName = "postgres" chainID = "test-chainID" - viewBlockEvents = "block_events" - viewTxEvents = "tx_events" + viewTxEvents = "tx_events" ) func TestMain(m *testing.M) { flag.Parse() - // Set up docker and start a container running PostgreSQL. + // Set up docker. pool, err := dockertest.NewPool(os.Getenv("DOCKER_URL")) if err != nil { log.Fatalf("Creating docker pool: %v", err) } + // If docker is unavailable, log and exit without reporting failure. + if _, err := pool.Client.Info(); err != nil { + log.Printf("WARNING: Docker is not available: %v [skipping this test]", err) + return + } + + // Start a container running PostgreSQL. resource, err := pool.RunWithOptions(&dockertest.RunOptions{ Repository: "postgres", Tag: "13", @@ -111,7 +117,9 @@ func TestMain(m *testing.M) { sm, err := readSchema() if err != nil { log.Fatalf("Reading schema: %v", err) - } else if err := schema.NewMigrator().Apply(db, sm); err != nil { + } + migrator := schema.NewMigrator() + if err := migrator.Apply(db, sm); err != nil { log.Fatalf("Applying schema: %v", err) } @@ -144,6 +152,9 @@ func TestType(t *testing.T) { } func TestIndexing(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + t.Run("IndexBlockEvents", func(t *testing.T) { indexer := &EventSink{store: testDB(), chainID: chainID} require.NoError(t, indexer.IndexBlockEvents(newTestBlockHeader())) @@ -155,7 +166,7 @@ func TestIndexing(t *testing.T) { verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(2) }) verifyNotImplemented(t, "block search", func() (bool, error) { - v, err := indexer.SearchBlockEvents(context.Background(), nil) + v, err := indexer.SearchBlockEvents(ctx, nil) return v != nil, err }) @@ -189,7 +200,7 @@ func TestIndexing(t *testing.T) { return txr != nil, err }) verifyNotImplemented(t, "tx search", func() (bool, error) { - txr, err := indexer.SearchTxEvents(context.Background(), nil) + txr, err := indexer.SearchTxEvents(ctx, nil) return txr != nil, err }) @@ -209,15 +220,11 @@ func TestStop(t *testing.T) { func newTestBlockHeader() types.EventDataNewBlockHeader { return types.EventDataNewBlockHeader{ Header: types.Header{Height: 1}, - ResultBeginBlock: abci.ResponseBeginBlock{ + ResultFinalizeBlock: abci.ResponseFinalizeBlock{ Events: []abci.Event{ - makeIndexedEvent("begin_event.proposer", "FCAA001"), + makeIndexedEvent("finalize_event.proposer", "FCAA001"), makeIndexedEvent("thingy.whatzit", "O.O"), - }, - }, - ResultEndBlock: abci.ResponseEndBlock{ - Events: []abci.Event{ - makeIndexedEvent("end_event.foo", "100"), + makeIndexedEvent("my_event.foo", "100"), makeIndexedEvent("thingy.whatzit", "-.O"), }, }, @@ -227,7 +234,7 @@ func newTestBlockHeader() types.EventDataNewBlockHeader { // readSchema loads the indexing database schema file func readSchema() ([]*schema.Migration, error) { const filename = "schema.sql" - contents, err := ioutil.ReadFile(filename) + contents, err := os.ReadFile(filename) if err != nil { return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err) } @@ -242,11 +249,11 @@ func readSchema() ([]*schema.Migration, error) { func resetDatabase(db *sql.DB) error { _, err := db.Exec(`DROP TABLE IF EXISTS blocks,tx_results,events,attributes CASCADE;`) if err != nil { - return fmt.Errorf("dropping tables: %v", err) + return fmt.Errorf("dropping tables: %w", err) } _, err = db.Exec(`DROP VIEW IF EXISTS event_attributes,block_events,tx_events CASCADE;`) if err != nil { - return fmt.Errorf("dropping views: %v", err) + return fmt.Errorf("dropping views: %w", err) } return nil } @@ -258,7 +265,7 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult { Height: 1, Index: 0, Tx: types.Tx("HELLO WORLD"), - Result: abci.ResponseDeliverTx{ + Result: abci.ExecTxResult{ Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", @@ -278,7 +285,7 @@ SELECT tx_result FROM `+tableTxResults+` WHERE tx_hash = $1; txr := new(abci.TxResult) if err := proto.Unmarshal(resultData, txr); err != nil { - return nil, fmt.Errorf("unmarshaling txr: %v", err) + return nil, fmt.Errorf("unmarshaling txr: %w", err) } return txr, nil @@ -301,25 +308,6 @@ SELECT height FROM `+tableBlocks+` WHERE height = $1; } else if err != nil { t.Fatalf("Database query failed: %v", err) } - - // Verify the presence of begin_block and end_block events. - if err := testDB().QueryRow(` -SELECT type, height, chain_id FROM `+viewBlockEvents+` - WHERE height = $1 AND type = $2 AND chain_id = $3; -`, height, types.EventTypeBeginBlock, chainID).Err(); err == sql.ErrNoRows { - t.Errorf("No %q event found for height=%d", types.EventTypeBeginBlock, height) - } else if err != nil { - t.Fatalf("Database query failed: %v", err) - } - - if err := testDB().QueryRow(` -SELECT type, height, chain_id FROM `+viewBlockEvents+` - WHERE height = $1 AND type = $2 AND chain_id = $3; -`, height, types.EventTypeEndBlock, chainID).Err(); err == sql.ErrNoRows { - t.Errorf("No %q event found for height=%d", types.EventTypeEndBlock, height) - } else if err != nil { - t.Fatalf("Database query failed: %v", err) - } } // verifyNotImplemented calls f and verifies that it returns both a @@ -332,7 +320,7 @@ func verifyNotImplemented(t *testing.T, label string, f func() (bool, error)) { want := label + " is not supported via the postgres event sink" ok, err := f() assert.False(t, ok) - require.NotNil(t, err) + require.Error(t, err) assert.Equal(t, want, err.Error()) } diff --git a/internal/state/indexer/tx/kv/kv.go b/internal/state/indexer/tx/kv/kv.go index 4bcff958b0..ef362425bd 100644 --- a/internal/state/indexer/tx/kv/kv.go +++ b/internal/state/indexer/tx/kv/kv.go @@ -12,9 +12,9 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - indexer "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/libs/pubsub/query/syntax" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query/syntax" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/types" ) @@ -53,7 +53,7 @@ func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { txResult := new(abci.TxResult) err = proto.Unmarshal(rawBytes, txResult) if err != nil { - return nil, fmt.Errorf("error reading TxResult: %v", err) + return nil, fmt.Errorf("error reading TxResult: %w", err) } return txResult, nil diff --git a/internal/state/indexer/tx/kv/kv_bench_test.go b/internal/state/indexer/tx/kv/kv_bench_test.go index 93724beba3..7007d5bb5b 100644 --- a/internal/state/indexer/tx/kv/kv_bench_test.go +++ b/internal/state/indexer/tx/kv/kv_bench_test.go @@ -4,21 +4,17 @@ import ( "context" "crypto/rand" "fmt" - "io/ioutil" "testing" dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/types" ) func BenchmarkTxSearch(b *testing.B) { - dbDir, err := ioutil.TempDir("", "benchmark_tx_search_test") - if err != nil { - b.Errorf("failed to create temporary directory: %s", err) - } + dbDir := b.TempDir() db, err := dbm.NewGoLevelDB("benchmark_tx_search_test", dbDir) if err != nil { @@ -47,7 +43,7 @@ func BenchmarkTxSearch(b *testing.B) { Height: int64(i), Index: 0, Tx: types.Tx(string(txBz)), - Result: abci.ResponseDeliverTx{ + Result: abci.ExecTxResult{ Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", diff --git a/internal/state/indexer/tx/kv/kv_test.go b/internal/state/indexer/tx/kv/kv_test.go index 9bb8bfb7bb..8004c0f27e 100644 --- a/internal/state/indexer/tx/kv/kv_test.go +++ b/internal/state/indexer/tx/kv/kv_test.go @@ -3,8 +3,6 @@ package kv import ( "context" "fmt" - "io/ioutil" - "os" "testing" "github.com/gogo/protobuf/proto" @@ -13,8 +11,8 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - indexer "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/pubsub/query" + "github.com/tendermint/tendermint/internal/state/indexer" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/types" ) @@ -27,7 +25,7 @@ func TestTxIndex(t *testing.T) { Height: 1, Index: 0, Tx: tx, - Result: abci.ResponseDeliverTx{ + Result: abci.ExecTxResult{ Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Events: nil, }, @@ -50,7 +48,7 @@ func TestTxIndex(t *testing.T) { Height: 1, Index: 0, Tx: tx2, - Result: abci.ResponseDeliverTx{ + Result: abci.ExecTxResult{ Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Events: nil, }, @@ -324,7 +322,7 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult { Height: 1, Index: 0, Tx: tx, - Result: abci.ResponseDeliverTx{ + Result: abci.ExecTxResult{ Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", @@ -334,9 +332,7 @@ func txResultWithEvents(events []abci.Event) *abci.TxResult { } func benchmarkTxIndex(txsCount int64, b *testing.B) { - dir, err := ioutil.TempDir("", "tx_index_db") - require.NoError(b, err) - defer os.RemoveAll(dir) + dir := b.TempDir() store, err := dbm.NewDB("tx_index", "goleveldb", dir) require.NoError(b, err) @@ -350,7 +346,7 @@ func benchmarkTxIndex(txsCount int64, b *testing.B) { Height: 1, Index: txIndex, Tx: tx, - Result: abci.ResponseDeliverTx{ + Result: abci.ExecTxResult{ Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", diff --git a/internal/state/indexer/tx/null/null.go b/internal/state/indexer/tx/null/null.go index 0da7fc6837..dea5d570f8 100644 --- a/internal/state/indexer/tx/null/null.go +++ b/internal/state/indexer/tx/null/null.go @@ -5,8 +5,8 @@ import ( "errors" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/pubsub/query" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/libs/pubsub/query" ) var _ indexer.TxIndexer = (*TxIndex)(nil) diff --git a/internal/state/mocks/block_store.go b/internal/state/mocks/block_store.go index f30627d174..09075f3b06 100644 --- a/internal/state/mocks/block_store.go +++ b/internal/state/mocks/block_store.go @@ -5,6 +5,8 @@ package mocks import ( mock "github.com/stretchr/testify/mock" + testing "testing" + types "github.com/tendermint/tendermint/types" ) @@ -222,3 +224,13 @@ func (_m *BlockStore) Size() int64 { return r0 } + +// NewBlockStore creates a new instance of BlockStore. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewBlockStore(t testing.TB) *BlockStore { + mock := &BlockStore{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/state/mocks/event_sink.go b/internal/state/mocks/event_sink.go index b8a8fc4648..9f2d2daf3c 100644 --- a/internal/state/mocks/event_sink.go +++ b/internal/state/mocks/event_sink.go @@ -6,9 +6,10 @@ import ( context "context" mock "github.com/stretchr/testify/mock" + indexer "github.com/tendermint/tendermint/internal/state/indexer" - query "github.com/tendermint/tendermint/libs/pubsub/query" + query "github.com/tendermint/tendermint/internal/pubsub/query" tenderminttypes "github.com/tendermint/tendermint/types" diff --git a/internal/state/mocks/evidence_pool.go b/internal/state/mocks/evidence_pool.go index 8bf4a9b64b..49633269b1 100644 --- a/internal/state/mocks/evidence_pool.go +++ b/internal/state/mocks/evidence_pool.go @@ -3,8 +3,13 @@ package mocks import ( + context "context" + mock "github.com/stretchr/testify/mock" state "github.com/tendermint/tendermint/internal/state" + + testing "testing" + types "github.com/tendermint/tendermint/types" ) @@ -13,13 +18,13 @@ type EvidencePool struct { mock.Mock } -// AddEvidence provides a mock function with given fields: _a0 -func (_m *EvidencePool) AddEvidence(_a0 types.Evidence) error { - ret := _m.Called(_a0) +// AddEvidence provides a mock function with given fields: _a0, _a1 +func (_m *EvidencePool) AddEvidence(_a0 context.Context, _a1 types.Evidence) error { + ret := _m.Called(_a0, _a1) var r0 error - if rf, ok := ret.Get(0).(func(types.Evidence) error); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.Evidence) error); ok { + r0 = rf(_a0, _a1) } else { r0 = ret.Error(0) } @@ -27,13 +32,13 @@ func (_m *EvidencePool) AddEvidence(_a0 types.Evidence) error { return r0 } -// CheckEvidence provides a mock function with given fields: _a0 -func (_m *EvidencePool) CheckEvidence(_a0 types.EvidenceList) error { - ret := _m.Called(_a0) +// CheckEvidence provides a mock function with given fields: _a0, _a1 +func (_m *EvidencePool) CheckEvidence(_a0 context.Context, _a1 types.EvidenceList) error { + ret := _m.Called(_a0, _a1) var r0 error - if rf, ok := ret.Get(0).(func(types.EvidenceList) error); ok { - r0 = rf(_a0) + if rf, ok := ret.Get(0).(func(context.Context, types.EvidenceList) error); ok { + r0 = rf(_a0, _a1) } else { r0 = ret.Error(0) } @@ -64,7 +69,17 @@ func (_m *EvidencePool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64 return r0, r1 } -// Update provides a mock function with given fields: _a0, _a1 -func (_m *EvidencePool) Update(_a0 state.State, _a1 types.EvidenceList) { - _m.Called(_a0, _a1) +// Update provides a mock function with given fields: _a0, _a1, _a2 +func (_m *EvidencePool) Update(_a0 context.Context, _a1 state.State, _a2 types.EvidenceList) { + _m.Called(_a0, _a1, _a2) +} + +// NewEvidencePool creates a new instance of EvidencePool. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewEvidencePool(t testing.TB) *EvidencePool { + mock := &EvidencePool{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock } diff --git a/internal/state/mocks/store.go b/internal/state/mocks/store.go index 02c69d3e05..9b41f3c1bc 100644 --- a/internal/state/mocks/store.go +++ b/internal/state/mocks/store.go @@ -7,6 +7,8 @@ import ( state "github.com/tendermint/tendermint/internal/state" tendermintstate "github.com/tendermint/tendermint/proto/tendermint/state" + testing "testing" + types "github.com/tendermint/tendermint/types" ) @@ -186,3 +188,13 @@ func (_m *Store) SaveValidatorSets(_a0 int64, _a1 int64, _a2 *types.ValidatorSet return r0 } + +// NewStore creates a new instance of Store. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewStore(t testing.TB) *Store { + mock := &Store{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/state/rollback_test.go b/internal/state/rollback_test.go index f5e5490c01..ddd51b224e 100644 --- a/internal/state/rollback_test.go +++ b/internal/state/rollback_test.go @@ -18,6 +18,7 @@ func TestRollback(t *testing.T) { height int64 = 100 nextHeight int64 = 101 ) + blockStore := &mocks.BlockStore{} stateStore := setupStateStore(t, height) initialState, err := stateStore.Load() @@ -88,6 +89,7 @@ func TestRollbackNoState(t *testing.T) { func TestRollbackNoBlocks(t *testing.T) { const height = int64(100) + stateStore := setupStateStore(t, height) blockStore := &mocks.BlockStore{} blockStore.On("Height").Return(height) diff --git a/internal/state/services.go b/internal/state/services.go index 09a5fe5189..40365f2fbf 100644 --- a/internal/state/services.go +++ b/internal/state/services.go @@ -1,6 +1,8 @@ package state import ( + "context" + "github.com/tendermint/tendermint/types" ) @@ -45,9 +47,9 @@ type BlockStore interface { // EvidencePool defines the EvidencePool interface used by State. type EvidencePool interface { PendingEvidence(maxBytes int64) (ev []types.Evidence, size int64) - AddEvidence(types.Evidence) error - Update(State, types.EvidenceList) - CheckEvidence(types.EvidenceList) error + AddEvidence(context.Context, types.Evidence) error + Update(context.Context, State, types.EvidenceList) + CheckEvidence(context.Context, types.EvidenceList) error } // EmptyEvidencePool is an empty implementation of EvidencePool, useful for testing. It also complies @@ -57,7 +59,9 @@ type EmptyEvidencePool struct{} func (EmptyEvidencePool) PendingEvidence(maxBytes int64) (ev []types.Evidence, size int64) { return nil, 0 } -func (EmptyEvidencePool) AddEvidence(types.Evidence) error { return nil } -func (EmptyEvidencePool) Update(State, types.EvidenceList) {} -func (EmptyEvidencePool) CheckEvidence(evList types.EvidenceList) error { return nil } +func (EmptyEvidencePool) AddEvidence(context.Context, types.Evidence) error { return nil } +func (EmptyEvidencePool) Update(context.Context, State, types.EvidenceList) {} +func (EmptyEvidencePool) CheckEvidence(ctx context.Context, evList types.EvidenceList) error { + return nil +} func (EmptyEvidencePool) ReportConflictingVotes(voteA, voteB *types.Vote) {} diff --git a/internal/state/state.go b/internal/state/state.go index aa452ff3c9..9fa7bbe553 100644 --- a/internal/state/state.go +++ b/internal/state/state.go @@ -4,7 +4,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "os" "time" "github.com/gogo/protobuf/proto" @@ -97,7 +97,7 @@ type State struct { LastHeightValidatorsChanged int64 // Consensus parameters used for validating blocks. - // Changes returned by EndBlock and updated after Commit. + // Changes returned by FinalizeBlock and updated after Commit. ConsensusParams types.ConsensusParams LastHeightConsensusParamsChanged int64 @@ -139,23 +139,30 @@ func (state State) Copy() State { } // Equals returns true if the States are identical. -func (state State) Equals(state2 State) bool { - sbz, s2bz := state.Bytes(), state2.Bytes() - return bytes.Equal(sbz, s2bz) +func (state State) Equals(state2 State) (bool, error) { + sbz, err := state.Bytes() + if err != nil { + return false, err + } + s2bz, err := state2.Bytes() + if err != nil { + return false, err + } + return bytes.Equal(sbz, s2bz), nil } -// Bytes serializes the State using protobuf. -// It panics if either casting to protobuf or serialization fails. -func (state State) Bytes() []byte { +// Bytes serializes the State using protobuf, propagating marshaling +// errors +func (state State) Bytes() ([]byte, error) { sm, err := state.ToProto() if err != nil { - panic(err) + return nil, err } bz, err := proto.Marshal(sm) if err != nil { - panic(err) + return nil, err } - return bz + return bz, nil } // IsEmpty returns true if the State is equal to the empty State. @@ -301,7 +308,7 @@ func (state State) MakeBlock( evidence []types.Evidence, proposerProTxHash types.ProTxHash, proposedAppVersion uint64, -) (*types.Block, *types.PartSet) { +) *types.Block { var coreChainLockHeight uint32 if coreChainLock == nil { @@ -313,30 +320,16 @@ func (state State) MakeBlock( // Build base block with block data. block := types.MakeBlock(height, coreChainLockHeight, coreChainLock, txs, commit, evidence, proposedAppVersion) - // Set time. - var timestamp time.Time - if height == state.InitialHeight { - timestamp = state.LastBlockTime // genesis time - } else { - currentTime := tmtime.Now() - if currentTime.Before(state.LastBlockTime) { - // this is weird, propose last block time - timestamp = state.LastBlockTime - } else { - timestamp = currentTime - } - } - // Fill rest of header with state data. block.Header.Populate( state.Version.Consensus, state.ChainID, - timestamp, state.LastBlockID, + tmtime.Now(), state.LastBlockID, state.Validators.Hash(), state.NextValidators.Hash(), state.ConsensusParams.HashConsensusParams(), state.AppHash, state.LastResultsHash, proposerProTxHash, ) - return block, block.MakePartSet(types.BlockPartSizeBytes) + return block } func (state State) ValidatorsAtHeight(height int64) *types.ValidatorSet { @@ -367,13 +360,13 @@ func MakeGenesisStateFromFile(genDocFile string) (State, error) { // MakeGenesisDocFromFile reads and unmarshals genesis doc from the given file. func MakeGenesisDocFromFile(genDocFile string) (*types.GenesisDoc, error) { - genDocJSON, err := ioutil.ReadFile(genDocFile) + genDocJSON, err := os.ReadFile(genDocFile) if err != nil { - return nil, fmt.Errorf("couldn't read GenesisDoc file: %v", err) + return nil, fmt.Errorf("couldn't read GenesisDoc file: %w", err) } genDoc, err := types.GenesisDocFromJSON(genDocJSON) if err != nil { - return nil, fmt.Errorf("error reading GenesisDoc: %v", err) + return nil, fmt.Errorf("error reading GenesisDoc: %w", err) } return genDoc, nil } diff --git a/internal/state/state_test.go b/internal/state/state_test.go index 0707925d4a..198b551dcb 100644 --- a/internal/state/state_test.go +++ b/internal/state/state_test.go @@ -18,6 +18,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/dash/llmq" sm "github.com/tendermint/tendermint/internal/state" statefactory "github.com/tendermint/tendermint/internal/state/test/factory" @@ -28,7 +29,7 @@ import ( // setupTestCase does setup common to all test cases. func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { - cfg, err := config.ResetTestRoot("state_") + cfg, err := config.ResetTestRoot(t.TempDir(), "state_") require.NoError(t, err) dbType := dbm.BackendType(cfg.DBBackend) @@ -44,7 +45,7 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { err = stateStore.Save(state) require.NoError(t, err) - tearDown := func(t *testing.T) { os.RemoveAll(cfg.RootDir) } + tearDown := func(t *testing.T) { _ = os.RemoveAll(cfg.RootDir) } return tearDown, stateDB, state } @@ -53,18 +54,21 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { func TestStateCopy(t *testing.T) { tearDown, _, state := setupTestCase(t) defer tearDown(t) - assert := assert.New(t) stateCopy := state.Copy() - assert.True(state.Equals(stateCopy), - fmt.Sprintf("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", - stateCopy, state)) + seq, err := state.Equals(stateCopy) + require.NoError(t, err) + assert.True(t, seq, + "expected state and its copy to be identical.\ngot: %v\nexpected: %v", + stateCopy, state) stateCopy.LastBlockHeight++ stateCopy.LastValidators = state.Validators - assert.False(state.Equals(stateCopy), fmt.Sprintf(`expected states to be different. got same - %v`, state)) + + seq, err = state.Equals(stateCopy) + require.NoError(t, err) + assert.False(t, seq, "expected states to be different. got same %v", state) } // TestMakeGenesisStateNilValidators tests state's consistency when genesis file's validators field is nil. @@ -75,7 +79,7 @@ func TestMakeGenesisStateNilValidators(t *testing.T) { } require.Nil(t, doc.ValidateAndComplete()) state, err := sm.MakeGenesisState(&doc) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, 0, len(state.Validators.Validators)) require.Equal(t, 0, len(state.NextValidators.Validators)) } @@ -85,7 +89,6 @@ func TestStateSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) stateStore := sm.NewStore(stateDB) - assert := assert.New(t) state.LastBlockHeight++ state.LastValidators = state.Validators @@ -94,9 +97,11 @@ func TestStateSaveLoad(t *testing.T) { loadedState, err := stateStore.Load() require.NoError(t, err) - assert.True(state.Equals(loadedState), - fmt.Sprintf("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", - loadedState, state)) + seq, err := state.Equals(loadedState) + require.NoError(t, err) + assert.True(t, seq, + "expected state and its copy to be identical.\ngot: %v\nexpected: %v", + loadedState, state) } // TestABCIResponsesSaveLoad tests saving and loading ABCIResponses. @@ -104,7 +109,6 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) stateStore := sm.NewStore(stateDB) - assert := assert.New(t) state.LastBlockHeight++ @@ -113,57 +117,58 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { require.NoError(t, err) abciResponses := new(tmstate.ABCIResponses) - dtxs := make([]*abci.ResponseDeliverTx, 2) - abciResponses.DeliverTxs = dtxs + dtxs := make([]*abci.ExecTxResult, 2) + abciResponses.FinalizeBlock = new(abci.ResponseFinalizeBlock) + abciResponses.FinalizeBlock.TxResults = dtxs - abciResponses.DeliverTxs[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Events: nil} - abciResponses.DeliverTxs[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Events: nil} + abciResponses.FinalizeBlock.TxResults[0] = &abci.ExecTxResult{Data: []byte("foo"), Events: nil} + abciResponses.FinalizeBlock.TxResults[1] = &abci.ExecTxResult{Data: []byte("bar"), Log: "ok", Events: nil} pubKey := bls12381.GenPrivKey().PubKey() abciPubKey, err := cryptoenc.PubKeyToProto(pubKey) require.NoError(t, err) vu := types.TM2PB.NewValidatorUpdate(pubKey, 100, crypto.RandProTxHash(), types.RandValidatorAddress().String()) - abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorSetUpdate: &abci.ValidatorSetUpdate{ + abciResponses.FinalizeBlock.ValidatorSetUpdate = &abci.ValidatorSetUpdate{ ValidatorUpdates: []abci.ValidatorUpdate{vu}, ThresholdPublicKey: abciPubKey, - }} + } err = stateStore.SaveABCIResponses(block.Height, abciResponses) require.NoError(t, err) loadedABCIResponses, err := stateStore.LoadABCIResponses(block.Height) - assert.Nil(err) - assert.Equal(abciResponses, loadedABCIResponses, - fmt.Sprintf("ABCIResponses don't match:\ngot: %v\nexpected: %v\n", - loadedABCIResponses, abciResponses)) + require.NoError(t, err) + assert.Equal(t, abciResponses, loadedABCIResponses, + "ABCIResponses don't match:\ngot: %v\nexpected: %v\n", + loadedABCIResponses, abciResponses) } // TestResultsSaveLoad tests saving and loading ABCI results. func TestABCIResponsesSaveLoad2(t *testing.T) { tearDown, stateDB, _ := setupTestCase(t) defer tearDown(t) - assert := assert.New(t) stateStore := sm.NewStore(stateDB) cases := [...]struct { // Height is implied to equal index+2, // as block 1 is created from genesis. - added []*abci.ResponseDeliverTx - expected []*abci.ResponseDeliverTx + added []*abci.ExecTxResult + expected []*abci.ExecTxResult }{ 0: { nil, nil, }, 1: { - []*abci.ResponseDeliverTx{ + []*abci.ExecTxResult{ {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, }, - []*abci.ResponseDeliverTx{ + []*abci.ExecTxResult{ {Code: 32, Data: []byte("Hello")}, - }}, + }, + }, 2: { - []*abci.ResponseDeliverTx{ + []*abci.ExecTxResult{ {Code: 383}, { Data: []byte("Gotcha!"), @@ -173,19 +178,20 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { }, }, }, - []*abci.ResponseDeliverTx{ + []*abci.ExecTxResult{ {Code: 383, Data: nil}, {Code: 0, Data: []byte("Gotcha!"), Events: []abci.Event{ {Type: "type1", Attributes: []abci.EventAttribute{{Key: "a", Value: "1"}}}, {Type: "type2", Attributes: []abci.EventAttribute{{Key: "build", Value: "stuff"}}}, }}, - }}, + }, + }, 3: { nil, nil, }, 4: { - []*abci.ResponseDeliverTx{nil}, + []*abci.ExecTxResult{nil}, nil, }, } @@ -194,16 +200,16 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { for i := range cases { h := int64(i + 1) res, err := stateStore.LoadABCIResponses(h) - assert.Error(err, "%d: %#v", i, res) + assert.Error(t, err, "%d: %#v", i, res) } // Add all cases. for i, tc := range cases { h := int64(i + 1) // last block height, one below what we save responses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - DeliverTxs: tc.added, - EndBlock: &abci.ResponseEndBlock{}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + TxResults: tc.added, + }, } err := stateStore.SaveABCIResponses(h, responses) require.NoError(t, err) @@ -213,14 +219,15 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { for i, tc := range cases { h := int64(i + 1) res, err := stateStore.LoadABCIResponses(h) - if assert.NoError(err, "%d", i) { + if assert.NoError(t, err, "%d", i) { t.Log(res) - responses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - DeliverTxs: tc.expected, - EndBlock: &abci.ResponseEndBlock{}, - } - assert.Equal(sm.ABCIResponsesResultsHash(responses), sm.ABCIResponsesResultsHash(res), "%d", i) + e, err := abci.MarshalTxResults(tc.expected) + require.NoError(t, err) + he := merkle.HashFromByteSlices(e) + rs, err := abci.MarshalTxResults(res.FinalizeBlock.TxResults) + hrs := merkle.HashFromByteSlices(rs) + require.NoError(t, err) + assert.Equal(t, he, hrs, "%d", i) } } } @@ -229,23 +236,22 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { func TestValidatorSimpleSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - assert := assert.New(t) statestore := sm.NewStore(stateDB) // Can't load anything for height 0. _, err := statestore.LoadValidators(0) - assert.IsType(sm.ErrNoValSetForHeight{}, err, "expected err at height 0") + assert.IsType(t, sm.ErrNoValSetForHeight{}, err, "expected err at height 0") // Should be able to load for height 1. v, err := statestore.LoadValidators(1) - assert.Nil(err, "expected no err at height 1") - assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") + require.NoError(t, err, "expected no err at height 1") + assert.Equal(t, v.Hash(), state.Validators.Hash(), "expected validator hashes to match") // Should be able to load for height 2. v, err = statestore.LoadValidators(2) - assert.Nil(err, "expected no err at height 2") - assert.Equal(v.Hash(), state.NextValidators.Hash(), "expected validator hashes to match") + require.NoError(t, err, "expected no err at height 2") + assert.Equal(t, v.Hash(), state.NextValidators.Hash(), "expected validator hashes to match") // Increment height, save; should be able to load for next & next next height. state.LastBlockHeight++ @@ -253,11 +259,11 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { err = statestore.Save(state) require.NoError(t, err) vp0, err := statestore.LoadValidators(nextHeight + 0) - assert.Nil(err, "expected no err") + assert.NoError(t, err) vp1, err := statestore.LoadValidators(nextHeight + 1) - assert.Nil(err, "expected no err") - assert.Equal(vp0.Hash(), state.Validators.Hash(), "expected validator hashes to match") - assert.Equal(vp1.Hash(), state.NextValidators.Hash(), "expected next validator hashes to match") + assert.NoError(t, err) + assert.Equal(t, vp0.Hash(), state.Validators.Hash(), "expected validator hashes to match") + assert.Equal(t, vp1.Hash(), state.NextValidators.Hash(), "expected next validator hashes to match") } // TestValidatorChangesSaveLoad tests saving and loading a validator set with changes. @@ -274,24 +280,22 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { // with the right validator set for each height. highestHeight := changeHeights[N-1] + 5 changeIndex := 0 - var validatorUpdates []*types.Validator - var thresholdPublicKeyUpdate crypto.PubKey - var quorumHash crypto.QuorumHash + testCases := make([]crypto.PubKey, highestHeight-1) for i := int64(1); i < highestHeight; i++ { // When we get to a change height, use the next pubkey. - regenerate := false if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { changeIndex++ - regenerate = true } - header, _, blockID, responses, err := makeHeaderPartsResponsesValKeysRegenerate(state, regenerate) + header, _, blockID, responses := makeHeaderPartsResponsesValPowerChange(t, state, types.DefaultDashVotingPower) + validatorUpdates, thresholdPubKey, quorumHash, err := types.PB2TM.ValidatorUpdatesFromValidatorSet(responses.FinalizeBlock.ValidatorSetUpdate) require.NoError(t, err) - validatorUpdates, thresholdPublicKeyUpdate, quorumHash, err = types.PB2TM.ValidatorUpdatesFromValidatorSet(responses.EndBlock.ValidatorSetUpdate) + rs, err := abci.MarshalTxResults(responses.FinalizeBlock.TxResults) require.NoError(t, err) // Any node pro tx hash should do firstNodeProTxHash, _ := state.Validators.GetByIndex(0) - state, err = sm.UpdateState(state, firstNodeProTxHash, blockID, &header, responses, validatorUpdates, thresholdPublicKeyUpdate, quorumHash) + h := merkle.HashFromByteSlices(rs) + state, err = state.Update(firstNodeProTxHash, blockID, &header, h, responses.FinalizeBlock.ConsensusParamUpdates, validatorUpdates, thresholdPubKey, quorumHash) require.NoError(t, err) validator := state.Validators.Validators[0] testCases[i-1] = validator.PubKey @@ -301,7 +305,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { for i, pubKey := range testCases { v, err := stateStore.LoadValidators(int64(i + 1 + 1)) // +1 because vset changes delayed by 1 block. - assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", i)) + assert.NoError(t, err, fmt.Sprintf("expected no err at height %d", i)) assert.Equal(t, v.Size(), 1, "validator set size is greater than 1: %d", v.Size()) _, val := v.GetByIndex(0) @@ -310,8 +314,10 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { } } -// ToDo maybe? -// func TestProposerFrequency(t *testing.T) { +//func TestProposerFrequency(t *testing.T) { +// ctx, cancel := context.WithCancel(context.Background()) +// defer cancel() +// // // some explicit test cases // testCases := []struct { // powers []int64 @@ -353,7 +359,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { // // run each case 5 times to sample different // // initial priorities // for i := 0; i < 5; i++ { -// valSet := factory.GenerateValidatorSetWithPowers(testCase.powers) +// valSet := genValSetWithPowers(testCase.powers) // testProposerFreq(t, caseNum, valSet) // } // } @@ -363,46 +369,44 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { // maxPower := 1000 // nTestCases := 5 // for i := 0; i < nTestCases; i++ { -// N := tmrand.Int()%maxVals + 1 +// N := mrand.Int()%maxVals + 1 // vals := make([]*types.Validator, N) // totalVotePower := int64(0) // for j := 0; j < N; j++ { // // make sure votePower > 0 -// votePower := int64(tmrand.Int()%maxPower) + 1 +// votePower := int64(mrand.Int()%maxPower) + 1 // totalVotePower += votePower // privVal := types.NewMockPV() -// pubKey, err := privVal.GetPubKey() -// proTxHash := tmrand.Bytes(32) +// pubKey, err := privVal.GetPubKey(ctx) // require.NoError(t, err) -// val := types.NewValidatorDefaultVotingPower(pubKey, proTxHash) -// val.ProposerPriority = tmrand.Int64() +// val := types.NewValidator(pubKey, votePower) +// val.ProposerPriority = mrand.Int63() // vals[j] = val // } // valSet := types.NewValidatorSet(vals) // valSet.RescalePriorities(totalVotePower) // testProposerFreq(t, i, valSet) // } -// } +//} // -// // new val set with given powers and random initial priorities -// func factory.GenerateValidatorSetWithPowers(powers []int64) *types.ValidatorSet { +//// new val set with given powers and random initial priorities +//func genValSetWithPowers(powers []int64) *types.ValidatorSet { // size := len(powers) // vals := make([]*types.Validator, size) // totalVotePower := int64(0) // for i := 0; i < size; i++ { // totalVotePower += powers[i] -// proTxHash := tmrand.Bytes(32) -// val := types.NewValidator(bls12381.GenPrivKey().PubKey(), powers[i], proTxHash) -// val.ProposerPriority = tmrand.Int64() +// val := types.NewValidator(ed25519.GenPrivKey().PubKey(), powers[i]) +// val.ProposerPriority = mrand.Int63() // vals[i] = val // } // valSet := types.NewValidatorSet(vals) // valSet.RescalePriorities(totalVotePower) // return valSet //} - -// // test a proposer appears as frequently as expected -// func testProposerFreq(t *testing.T, caseNum int, valSet *types.ValidatorSet) { +// +//// test a proposer appears as frequently as expected +//func testProposerFreq(t *testing.T, caseNum int, valSet *types.ValidatorSet) { // N := valSet.Size() // totalPower := valSet.TotalVotingPower() // @@ -412,7 +416,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { // freqs := make([]int, N) // for i := 0; i < runs; i++ { // prop := valSet.GetProposer() -// idx, _ := valSet.GetByProTxHash(prop.ProTxHash) +// idx, _ := valSet.GetByAddress(prop.Address) // freqs[idx]++ // valSet.IncrementProposerPriority(1) // } @@ -435,7 +439,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { // fmt.Sprintf("Case %d val %d (%d): got %d, expected %d", caseNum, i, N, gotFreq, expectFreq), // ) // } -// } +//} // TestProposerPriorityDoesNotGetResetToZero assert that we preserve accum when calling updateState // see https://github.com/tendermint/tendermint/issues/2718 @@ -459,17 +463,20 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { block, err := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit), nil, 0) require.NoError(t, err) - blockID := block.BlockID() - abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorSetUpdate: nil}, + blockID, err := block.BlockID() + require.NoError(t, err) + fb := &abci.ResponseFinalizeBlock{ + ValidatorSetUpdate: nil, } validatorUpdates, thresholdPublicKeyUpdate, _, err := - types.PB2TM.ValidatorUpdatesFromValidatorSet(abciResponses.EndBlock.ValidatorSetUpdate) + types.PB2TM.ValidatorUpdatesFromValidatorSet(nil) require.NoError(t, err) // Any node pro tx hash should do firstNodeProTxHash, _ := state.Validators.GetByIndex(0) - updatedState, err := sm.UpdateState(state, firstNodeProTxHash, blockID, &block.Header, abciResponses, validatorUpdates, thresholdPublicKeyUpdate, quorumHash) + rs, err := abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h := merkle.HashFromByteSlices(rs) + updatedState, err := state.Update(firstNodeProTxHash, blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates, thresholdPublicKeyUpdate, quorumHash) assert.NoError(t, err) curTotal := val1VotingPower // one increment step and one validator: 0 + power - total_power == 0 @@ -485,16 +492,10 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { updateAddVal := abci.ValidatorUpdate{ProTxHash: val2ProTxHash, PubKey: &fvp, Power: val2VotingPower} validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) assert.NoError(t, err) - updatedState2, err := sm.UpdateState( - state, - firstNodeProTxHash, - blockID, - &block.Header, - abciResponses, - validatorUpdates, - ld.ThresholdPubKey, - quorumHash, - ) + rs, err = abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h = merkle.HashFromByteSlices(rs) + updatedState2, err := updatedState.Update(firstNodeProTxHash, blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates, ld.ThresholdPubKey, quorumHash) assert.NoError(t, err) require.Equal(t, len(updatedState2.NextValidators.Validators), 2) @@ -533,7 +534,10 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // this will cause the diff of priorities (77) // to be larger than threshold == 2*totalVotingPower (22): - updatedState3, err := sm.UpdateState(updatedState2, firstNodeProTxHash, blockID, &block.Header, abciResponses, validatorUpdates, thresholdPubKey, quorumHash) + rs, err = abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h = merkle.HashFromByteSlices(rs) + updatedState3, err := updatedState2.Update(firstNodeProTxHash, blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates, thresholdPubKey, quorumHash) assert.NoError(t, err) require.Equal(t, len(updatedState3.NextValidators.Validators), 2) @@ -597,20 +601,24 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { block, err := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit), nil, 0) require.NoError(t, err) - blockID := block.BlockID() + blockID, err := block.BlockID() + require.NoError(t, err) // no updates: - abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorSetUpdate: nil}, + fb := &abci.ResponseFinalizeBlock{ + ValidatorSetUpdate: nil, } validatorUpdates, thresholdPublicKeyUpdate, _, err := - types.PB2TM.ValidatorUpdatesFromValidatorSet(abciResponses.EndBlock.ValidatorSetUpdate) + types.PB2TM.ValidatorUpdatesFromValidatorSet(nil) + require.NoError(t, err) + + rs, err := abci.MarshalTxResults(fb.TxResults) require.NoError(t, err) + h := merkle.HashFromByteSlices(rs) // Any node pro tx hash should do firstNodeProTxHash, _ := state.Validators.GetByIndex(0) - updatedState, err := sm.UpdateState(state, firstNodeProTxHash, blockID, &block.Header, abciResponses, - validatorUpdates, thresholdPublicKeyUpdate, quorumHash) + + updatedState, err := state.Update(firstNodeProTxHash, blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates, thresholdPublicKeyUpdate, quorumHash) assert.NoError(t, err) // 0 + 10 (initial prio) - 10 (avg) - 10 (mostest - total) = -10 @@ -628,8 +636,10 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) assert.NoError(t, err) - updatedState2, err := sm.UpdateState(updatedState, firstNodeProTxHash, blockID, &block.Header, abciResponses, - validatorUpdates, ld.ThresholdPubKey, quorumHash) + rs, err = abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h = merkle.HashFromByteSlices(rs) + updatedState2, err := updatedState.Update(firstNodeProTxHash, blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates, ld.ThresholdPubKey, quorumHash) assert.NoError(t, err) require.Equal(t, len(updatedState2.NextValidators.Validators), 2) @@ -670,11 +680,13 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { ) validatorUpdates, thresholdPublicKeyUpdate, quorumHash, err = - types.PB2TM.ValidatorUpdatesFromValidatorSet(abciResponses.EndBlock.ValidatorSetUpdate) + types.PB2TM.ValidatorUpdatesFromValidatorSet(fb.ValidatorSetUpdate) require.NoError(t, err) - updatedState3, err := sm.UpdateState(updatedState2, firstNodeProTxHash, blockID, &block.Header, abciResponses, - validatorUpdates, thresholdPublicKeyUpdate, quorumHash) + rs, err = abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h = merkle.HashFromByteSlices(rs) + updatedState3, err := updatedState2.Update(firstNodeProTxHash, blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates, thresholdPublicKeyUpdate, quorumHash) assert.NoError(t, err) assert.Equal(t, updatedState3.Validators.Proposer.ProTxHash, updatedState3.NextValidators.Proposer.ProTxHash) @@ -710,16 +722,17 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // no changes in voting power and both validators have same voting power // -> proposers should alternate: oldState := updatedState3 - abciResponses = &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorSetUpdate: nil}, + fb = &abci.ResponseFinalizeBlock{ + ValidatorSetUpdate: nil, } validatorUpdates, thresholdPublicKeyUpdate, quorumHash, err = - types.PB2TM.ValidatorUpdatesFromValidatorSet(abciResponses.EndBlock.ValidatorSetUpdate) + types.PB2TM.ValidatorUpdatesFromValidatorSet(fb.ValidatorSetUpdate) require.NoError(t, err) - oldState, err = sm.UpdateState(oldState, firstNodeProTxHash, blockID, &block.Header, abciResponses, - validatorUpdates, thresholdPublicKeyUpdate, quorumHash) + rs, err = abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h = merkle.HashFromByteSlices(rs) + oldState, err = oldState.Update(firstNodeProTxHash, blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates, thresholdPublicKeyUpdate, quorumHash) assert.NoError(t, err) expectedVal1Prio2 = 13 expectedVal2Prio2 = -12 @@ -728,16 +741,17 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { for i := 0; i < 1000; i++ { // no validator updates: - abciResponses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorSetUpdate: nil}, + fb := &abci.ResponseFinalizeBlock{ + ValidatorSetUpdate: nil, } validatorUpdates, thresholdPublicKeyUpdate, quorumHash, err = - types.PB2TM.ValidatorUpdatesFromValidatorSet(abciResponses.EndBlock.ValidatorSetUpdate) + types.PB2TM.ValidatorUpdatesFromValidatorSet(fb.ValidatorSetUpdate) require.NoError(t, err) - updatedState, err := sm.UpdateState(oldState, firstNodeProTxHash, blockID, &block.Header, abciResponses, - validatorUpdates, thresholdPublicKeyUpdate, quorumHash) + rs, err := abci.MarshalTxResults(fb.TxResults) + require.NoError(t, err) + h := merkle.HashFromByteSlices(rs) + updatedState, err := oldState.Update(firstNodeProTxHash, blockID, &block.Header, h, fb.ConsensusParamUpdates, validatorUpdates, thresholdPublicKeyUpdate, quorumHash) assert.NoError(t, err) // alternate (and cyclic priorities): assert.NotEqual( @@ -877,11 +891,11 @@ func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) { nextHeight := state.LastBlockHeight + 1 v0, err := stateStore.LoadValidators(nextHeight) - assert.Nil(t, err) + assert.NoError(t, err) acc0 := v0.Validators[0].ProposerPriority v1, err := stateStore.LoadValidators(nextHeight + 1) - assert.Nil(t, err) + assert.NoError(t, err) acc1 := v1.Validators[0].ProposerPriority assert.NotEqual(t, acc1, acc0, "expected ProposerPriority value to change between heights") @@ -905,17 +919,19 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { oldPubkey := val0.PubKey // Swap the first validator with a new one (validator set size stays the same). - header, _, blockID, responses, err := makeHeaderPartsResponsesValKeysRegenerate(state, true) - require.NoError(t, err) + header, _, blockID, responses := makeHeaderPartsResponsesValKeysRegenerate(t, state, true) // Save state etc. var validatorUpdates []*types.Validator validatorUpdates, thresholdPublicKeyUpdate, quorumHash, err := - types.PB2TM.ValidatorUpdatesFromValidatorSet(responses.EndBlock.ValidatorSetUpdate) + types.PB2TM.ValidatorUpdatesFromValidatorSet(responses.FinalizeBlock.ValidatorSetUpdate) + require.NoError(t, err) + rs, err := abci.MarshalTxResults(responses.FinalizeBlock.TxResults) require.NoError(t, err) - state, err = sm.UpdateState(state, firstNodeProTxHash, blockID, &header, responses, validatorUpdates, + h := merkle.HashFromByteSlices(rs) + state, err = state.Update(firstNodeProTxHash, blockID, &header, h, responses.FinalizeBlock.ConsensusParamUpdates, validatorUpdates, thresholdPublicKeyUpdate, quorumHash) - require.Nil(t, err) + require.NoError(t, err) nextHeight := state.LastBlockHeight + 1 err = stateStore.Save(state) require.NoError(t, err) @@ -929,7 +945,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { // Load nextheight, it should be the oldpubkey. v0, err := stateStore.LoadValidators(nextHeight) - assert.Nil(t, err) + assert.NoError(t, err) assert.Equal(t, valSetSize, v0.Size()) index, val := v0.GetByProTxHash(proTxHash) assert.NotNil(t, val) @@ -940,7 +956,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { // Load nextheight+1, it should be the new pubkey. v1, err := stateStore.LoadValidators(nextHeight + 1) - assert.Nil(t, err) + assert.NoError(t, err) assert.Equal(t, valSetSize, v1.Size()) index, val = v1.GetByProTxHash(proTxHash) assert.NotNil(t, val) @@ -992,25 +1008,30 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { highestHeight := changeHeights[N-1] + 5 changeIndex := 0 cp := params[changeIndex] - var validatorUpdates []*types.Validator - var thresholdPublicKeyUpdate crypto.PubKey - var quorumHash crypto.QuorumHash + var ( + validatorUpdates []*types.Validator + thresholdPublicKeyUpdate crypto.PubKey + quorumHash crypto.QuorumHash + err error + ) for i := int64(1); i < highestHeight; i++ { // When we get to a change height, use the next params. if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { changeIndex++ cp = params[changeIndex] } - header, _, blockID, responses, err := makeHeaderPartsResponsesParams(state, cp) + header, _, blockID, responses := makeHeaderPartsResponsesParams(t, state, &cp) + validatorUpdates, thresholdPublicKeyUpdate, quorumHash, err = types.PB2TM.ValidatorUpdatesFromValidatorSet(responses.FinalizeBlock.ValidatorSetUpdate) require.NoError(t, err) - validatorUpdates, thresholdPublicKeyUpdate, quorumHash, err = types.PB2TM.ValidatorUpdatesFromValidatorSet(responses.EndBlock.ValidatorSetUpdate) + rs, err := abci.MarshalTxResults(responses.FinalizeBlock.TxResults) require.NoError(t, err) + h := merkle.HashFromByteSlices(rs) // Any node pro tx hash should do firstNodeProTxHash, _ := state.Validators.GetByIndex(0) - state, err = sm.UpdateState(state, firstNodeProTxHash, blockID, &header, responses, validatorUpdates, thresholdPublicKeyUpdate, quorumHash) + state, err = state.Update(firstNodeProTxHash, blockID, &header, h, responses.FinalizeBlock.ConsensusParamUpdates, validatorUpdates, thresholdPublicKeyUpdate, quorumHash) - require.Nil(t, err) + require.NoError(t, err) err = stateStore.Save(state) require.NoError(t, err) } @@ -1032,7 +1053,7 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { for _, testCase := range testCases { p, err := stateStore.LoadConsensusParams(testCase.height) - assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", testCase.height)) + assert.NoError(t, err, fmt.Sprintf("expected no err at height %d", testCase.height)) assert.EqualValues(t, testCase.params, p, fmt.Sprintf(`unexpected consensus params at height %d`, testCase.height)) } @@ -1093,15 +1114,19 @@ func blockExecutorFunc(t *testing.T, firstProTxHash crypto.ProTxHash) func(prevS return func(prevState, state sm.State, vsu *abci.ValidatorSetUpdate) sm.State { t.Helper() resp := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - EndBlock: &abci.ResponseEndBlock{ValidatorSetUpdate: vsu}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ValidatorSetUpdate: vsu}, } validatorUpdates, thresholdPubKey, quorumHash, err := - types.PB2TM.ValidatorUpdatesFromValidatorSet(resp.EndBlock.ValidatorSetUpdate) + types.PB2TM.ValidatorUpdatesFromValidatorSet(resp.FinalizeBlock.ValidatorSetUpdate) require.NoError(t, err) block, err := statefactory.MakeBlock(prevState, prevState.LastBlockHeight+1, new(types.Commit), nil, 0) require.NoError(t, err) - state, err = sm.UpdateState(state, firstProTxHash, block.BlockID(), &block.Header, resp, + blockID, err := block.BlockID() + require.NoError(t, err) + rs, err := abci.MarshalTxResults(resp.FinalizeBlock.TxResults) + require.NoError(t, err) + h := merkle.HashFromByteSlices(rs) + state, err = state.Update(firstProTxHash, blockID, &block.Header, h, resp.FinalizeBlock.ConsensusParamUpdates, validatorUpdates, thresholdPubKey, quorumHash) require.NoError(t, err) return state diff --git a/internal/state/store.go b/internal/state/store.go index f8168b7f9d..6433a25ab8 100644 --- a/internal/state/store.go +++ b/internal/state/store.go @@ -11,7 +11,6 @@ import ( abci "github.com/tendermint/tendermint/abci/types" tmmath "github.com/tendermint/tendermint/libs/math" - tmos "github.com/tendermint/tendermint/libs/os" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -128,8 +127,7 @@ func (store dbStore) loadState(key []byte) (state State, err error) { err = proto.Unmarshal(buf, sp) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - tmos.Exit(fmt.Sprintf(`LoadState: Data has been corrupted or its spec has changed: - %v\n`, err)) + panic(fmt.Sprintf("data has been corrupted or its spec has changed: %+v", err)) } sm, err := FromProto(sp) @@ -172,7 +170,12 @@ func (store dbStore) save(state State, key []byte) error { return err } - if err := batch.Set(key, state.Bytes()); err != nil { + stateBz, err := state.Bytes() + if err != nil { + return err + } + + if err := batch.Set(key, stateBz); err != nil { return err } @@ -208,7 +211,12 @@ func (store dbStore) Bootstrap(state State) error { return err } - if err := batch.Set(stateKey, state.Bytes()); err != nil { + stateBz, err := state.Bytes() + if err != nil { + return err + } + + if err := batch.Set(stateKey, stateBz); err != nil { return err } @@ -398,14 +406,6 @@ func (store dbStore) reverseBatchDelete(batch dbm.Batch, start, end []byte) ([]b //------------------------------------------------------------------------ -// ABCIResponsesResultsHash returns the root hash of a Merkle tree of -// ResponseDeliverTx responses (see ABCIResults.Hash) -// -// See merkle.SimpleHashFromByteSlices -func ABCIResponsesResultsHash(ar *tmstate.ABCIResponses) []byte { - return types.NewResults(ar.DeliverTxs).Hash() -} - // LoadABCIResponses loads the ABCIResponses for the given height from the // database. If not found, ErrNoABCIResponsesForHeight is returned. // @@ -426,8 +426,7 @@ func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, er err = abciResponses.Unmarshal(buf) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - tmos.Exit(fmt.Sprintf(`LoadABCIResponses: Data has been corrupted or its spec has - changed: %v\n`, err)) + panic(fmt.Sprintf("data has been corrupted or its spec has changed: %+v", err)) } // TODO: ensure that buf is completely read. @@ -445,15 +444,15 @@ func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCI } func (store dbStore) saveABCIResponses(height int64, abciResponses *tmstate.ABCIResponses) error { - var dtxs []*abci.ResponseDeliverTx + var dtxs []*abci.ExecTxResult // strip nil values, - for _, tx := range abciResponses.DeliverTxs { + for _, tx := range abciResponses.FinalizeBlock.TxResults { if tx != nil { dtxs = append(dtxs, tx) } } - abciResponses.DeliverTxs = dtxs + abciResponses.FinalizeBlock.TxResults = dtxs bz, err := abciResponses.Marshal() if err != nil { @@ -489,7 +488,7 @@ func (store dbStore) LoadValidators(height int64) (*types.ValidatorSet, error) { valInfo, err := loadValidatorsInfo(store.db, height) if err != nil { - return nil, ErrNoValSetForHeight{height} + return nil, ErrNoValSetForHeight{Height: height, Err: err} } if valInfo.ValidatorSet == nil { lastStoredHeight := lastStoredHeightFor(height, valInfo.LastHeightChanged) @@ -507,8 +506,12 @@ func (store dbStore) LoadValidators(height int64) (*types.ValidatorSet, error) { if err != nil { return nil, err } + h, err := tmmath.SafeConvertInt32(height - lastStoredHeight) + if err != nil { + return nil, err + } - vs.IncrementProposerPriority(tmmath.SafeConvertInt32(height - lastStoredHeight)) // mutate + vs.IncrementProposerPriority(h) // mutate vi2, err := vs.ToProto() if err != nil { return nil, err @@ -547,8 +550,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error err = v.Unmarshal(buf) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - tmos.Exit(fmt.Sprintf(`LoadValidators: Data has been corrupted or its spec has changed: - %v\n`, err)) + panic(fmt.Sprintf("data has been corrupted or its spec has changed: %+v", err)) } // TODO: ensure that buf is completely read. @@ -635,8 +637,7 @@ func (store dbStore) loadConsensusParamsInfo(height int64) (*tmstate.ConsensusPa paramsInfo := new(tmstate.ConsensusParamsInfo) if err = paramsInfo.Unmarshal(buf); err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - tmos.Exit(fmt.Sprintf(`LoadConsensusParams: Data has been corrupted or its spec has changed: - %v\n`, err)) + panic(fmt.Sprintf(`data has been corrupted or its spec has changed: %+v`, err)) } // TODO: ensure that buf is completely read. diff --git a/internal/state/store_test.go b/internal/state/store_test.go index 4cd70e90fa..1e8682a4fb 100644 --- a/internal/state/store_test.go +++ b/internal/state/store_test.go @@ -1,11 +1,11 @@ package state_test import ( + "context" "fmt" "os" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -29,11 +29,10 @@ func TestStoreBootstrap(t *testing.T) { vals, _ := types.RandValidatorSet(3) bootstrapState := makeRandomStateFromValidatorSet(vals, 100, 100) - err := stateStore.Bootstrap(bootstrapState) - require.NoError(t, err) + require.NoError(t, stateStore.Bootstrap(bootstrapState)) // bootstrap should also save the previous validator - _, err = stateStore.LoadValidators(99) + _, err := stateStore.LoadValidators(99) require.NoError(t, err) _, err = stateStore.LoadValidators(100) @@ -54,10 +53,8 @@ func TestStoreLoadValidators(t *testing.T) { // 1) LoadValidators loads validators using a height where they were last changed // Note that only the next validators at height h + 1 are saved - err := stateStore.Save(makeRandomStateFromValidatorSet(vals, 1, 1)) - require.NoError(t, err) - err = stateStore.Save(makeRandomStateFromValidatorSet(vals.CopyIncrementProposerPriority(1), 2, 1)) - require.NoError(t, err) + require.NoError(t, stateStore.Save(makeRandomStateFromValidatorSet(vals, 1, 1))) + require.NoError(t, stateStore.Save(makeRandomStateFromValidatorSet(vals.CopyIncrementProposerPriority(1), 2, 1))) loadedVals, err := stateStore.LoadValidators(3) require.NoError(t, err) require.Equal(t, vals.CopyIncrementProposerPriority(3), loadedVals) @@ -94,7 +91,7 @@ func TestStoreLoadValidators(t *testing.T) { func BenchmarkLoadValidators(b *testing.B) { const valSetSize = 100 - cfg, err := config.ResetTestRoot("state_") + cfg, err := config.ResetTestRoot(b.TempDir(), "state_") require.NoError(b, err) defer os.RemoveAll(cfg.RootDir) @@ -134,9 +131,12 @@ func BenchmarkLoadValidators(b *testing.B) { } func TestStoreLoadConsensusParams(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - err := stateStore.Save(makeRandomStateFromConsensusParams(types.DefaultConsensusParams(), 1, 1)) + err := stateStore.Save(makeRandomStateFromConsensusParams(ctx, t, types.DefaultConsensusParams(), 1, 1)) require.NoError(t, err) params, err := stateStore.LoadConsensusParams(1) require.NoError(t, err) @@ -146,7 +146,7 @@ func TestStoreLoadConsensusParams(t *testing.T) { // it should save a pointer to the params at height 1 differentParams := types.DefaultConsensusParams() differentParams.Block.MaxBytes = 20000 - err = stateStore.Save(makeRandomStateFromConsensusParams(differentParams, 10, 1)) + err = stateStore.Save(makeRandomStateFromConsensusParams(ctx, t, differentParams, 10, 1)) require.NoError(t, err) res, err := stateStore.LoadConsensusParams(10) require.NoError(t, err) @@ -223,10 +223,12 @@ func TestPruneStates(t *testing.T) { require.NoError(t, err) err = stateStore.SaveABCIResponses(h, &tmstate.ABCIResponses{ - DeliverTxs: []*abci.ResponseDeliverTx{ - {Data: []byte{1}}, - {Data: []byte{2}}, - {Data: []byte{3}}, + FinalizeBlock: &abci.ResponseFinalizeBlock{ + TxResults: []*abci.ExecTxResult{ + {Data: []byte{1}}, + {Data: []byte{2}}, + {Data: []byte{3}}, + }, }, }) require.NoError(t, err) @@ -282,25 +284,3 @@ func TestPruneStates(t *testing.T) { }) } } - -func TestABCIResponsesResultsHash(t *testing.T) { - responses := &tmstate.ABCIResponses{ - BeginBlock: &abci.ResponseBeginBlock{}, - DeliverTxs: []*abci.ResponseDeliverTx{ - {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, - }, - EndBlock: &abci.ResponseEndBlock{}, - } - - root := sm.ABCIResponsesResultsHash(responses) - - // root should be Merkle tree root of DeliverTxs responses - results := types.NewResults(responses.DeliverTxs) - assert.Equal(t, root, results.Hash()) - - // test we can prove first DeliverTx - proof := results.ProveResult(0) - bz, err := results[0].Marshal() - require.NoError(t, err) - assert.NoError(t, proof.Verify(root, bz)) -} diff --git a/internal/state/test/factory/block.go b/internal/state/test/factory/block.go index f5be543626..2efb031363 100644 --- a/internal/state/test/factory/block.go +++ b/internal/state/test/factory/block.go @@ -1,8 +1,12 @@ package factory import ( + "context" "encoding/binary" "fmt" + "testing" + + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" sm "github.com/tendermint/tendermint/internal/state" @@ -10,8 +14,10 @@ import ( "github.com/tendermint/tendermint/types" ) -func MakeBlocks(n int, state *sm.State, privVal types.PrivValidator) ([]*types.Block, error) { - blocks := make([]*types.Block, 0) +func MakeBlocks(ctx context.Context, t *testing.T, n int, state *sm.State, privVal types.PrivValidator) []*types.Block { + t.Helper() + + blocks := make([]*types.Block, n) var ( prevBlock *types.Block @@ -22,10 +28,7 @@ func MakeBlocks(n int, state *sm.State, privVal types.PrivValidator) ([]*types.B for i := 0; i < n; i++ { height := int64(i + 1) - block, parts, err := makeBlockAndPartSet(*state, prevBlock, prevBlockMeta, privVal, height) - if err != nil { - return nil, fmt.Errorf("error making blocks %v", err) - } + block, parts := makeBlockAndPartSet(ctx, t, *state, prevBlock, prevBlockMeta, privVal, height) blocks = append(blocks, block) prevBlock = block @@ -39,35 +42,40 @@ func MakeBlocks(n int, state *sm.State, privVal types.PrivValidator) ([]*types.B state.LastBlockHeight = height } - return blocks, nil + return blocks } func MakeBlock(state sm.State, height int64, c *types.Commit, coreChainLock *types.CoreChainLock, proposedAppVersion uint64) (*types.Block, error) { - if state.LastBlockHeight != (height - 1) { - return nil, fmt.Errorf("requested height %d should be 1 more than last block height %d", - height, state.LastBlockHeight) + return nil, fmt.Errorf("requested height %d should be 1 more than last block height %d", height, state.LastBlockHeight) } - - block, _ := state.MakeBlock( + return state.MakeBlock( height, coreChainLock, - factory.MakeTenTxs(state.LastBlockHeight), + factory.MakeNTxs(state.LastBlockHeight, 10), c, nil, state.Validators.GetProposer().ProTxHash, proposedAppVersion, - ) - return block, nil + ), nil } -func makeBlockAndPartSet(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta, - privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet, error) { +func makeBlockAndPartSet( + ctx context.Context, + t *testing.T, + state sm.State, + lastBlock *types.Block, + lastBlockMeta *types.BlockMeta, + privVal types.PrivValidator, + height int64, +) (*types.Block, *types.PartSet) { + t.Helper() lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, state.StateID(), state.LastValidators.QuorumHash, nil, nil) - if height > state.InitialHeight { + if height > 1 { vote, err := factory.MakeVote( + ctx, privVal, state.Validators, lastBlock.Header.ChainID, @@ -75,12 +83,14 @@ func makeBlockAndPartSet(state sm.State, lastBlock *types.Block, lastBlockMeta * lastBlockMeta.BlockID, state.LastStateID, ) - if err != nil { - return nil, nil, fmt.Errorf("error when creating vote at height %d: %s", height, err) - } + require.NoError(t, err) lastCommit = types.NewCommit(vote.Height, vote.Round, lastBlockMeta.BlockID, state.StateID(), state.LastValidators.QuorumHash, vote.BlockSignature, vote.StateSignature) } - block, partSet := state.MakeBlock(height, nil, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().ProTxHash, 0) - return block, partSet, nil + + block := state.MakeBlock(height, nil, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().ProTxHash, 0) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) + + return block, partSet } diff --git a/internal/state/time.go b/internal/state/time.go deleted file mode 100644 index c0770b3af7..0000000000 --- a/internal/state/time.go +++ /dev/null @@ -1,46 +0,0 @@ -package state - -import ( - "sort" - "time" -) - -// weightedTime for computing a median. -type weightedTime struct { - Time time.Time - Weight int64 -} - -// newWeightedTime with time and weight. -func newWeightedTime(time time.Time, weight int64) *weightedTime { - return &weightedTime{ - Time: time, - Weight: weight, - } -} - -// weightedMedian computes weighted median time for a given array of WeightedTime and the total voting power. -func weightedMedian(weightedTimes []*weightedTime, totalVotingPower int64) (res time.Time) { - median := totalVotingPower / 2 - - sort.Slice(weightedTimes, func(i, j int) bool { - if weightedTimes[i] == nil { - return false - } - if weightedTimes[j] == nil { - return true - } - return weightedTimes[i].Time.UnixNano() < weightedTimes[j].Time.UnixNano() - }) - - for _, weightedTime := range weightedTimes { - if weightedTime != nil { - if median <= weightedTime.Weight { - res = weightedTime.Time - break - } - median -= weightedTime.Weight - } - } - return -} diff --git a/internal/state/time_test.go b/internal/state/time_test.go deleted file mode 100644 index 893ade7eaa..0000000000 --- a/internal/state/time_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package state - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - tmtime "github.com/tendermint/tendermint/libs/time" -) - -func TestWeightedMedian(t *testing.T) { - m := make([]*weightedTime, 3) - - t1 := tmtime.Now() - t2 := t1.Add(5 * time.Second) - t3 := t1.Add(10 * time.Second) - - m[2] = newWeightedTime(t1, 33) // faulty processes - m[0] = newWeightedTime(t2, 40) // correct processes - m[1] = newWeightedTime(t3, 27) // correct processes - totalVotingPower := int64(100) - - median := weightedMedian(m, totalVotingPower) - assert.Equal(t, t2, median) - // median always returns value between values of correct processes - assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && - (median.Before(t3) || median.Equal(t3))) - - m[1] = newWeightedTime(t1, 40) // correct processes - m[2] = newWeightedTime(t2, 27) // correct processes - m[0] = newWeightedTime(t3, 33) // faulty processes - totalVotingPower = int64(100) - - median = weightedMedian(m, totalVotingPower) - assert.Equal(t, t2, median) - // median always returns value between values of correct processes - assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && - (median.Before(t2) || median.Equal(t2))) - - m = make([]*weightedTime, 8) - t4 := t1.Add(15 * time.Second) - t5 := t1.Add(60 * time.Second) - - m[3] = newWeightedTime(t1, 10) // correct processes - m[1] = newWeightedTime(t2, 10) // correct processes - m[5] = newWeightedTime(t2, 10) // correct processes - m[4] = newWeightedTime(t3, 23) // faulty processes - m[0] = newWeightedTime(t4, 20) // correct processes - m[7] = newWeightedTime(t5, 10) // faulty processes - totalVotingPower = int64(83) - - median = weightedMedian(m, totalVotingPower) - assert.Equal(t, t3, median) - // median always returns value between values of correct processes - assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && - (median.Before(t4) || median.Equal(t4))) -} diff --git a/internal/state/tx_filter.go b/internal/state/tx_filter.go index 1136982ea5..835ec16313 100644 --- a/internal/state/tx_filter.go +++ b/internal/state/tx_filter.go @@ -1,21 +1,84 @@ package state import ( + "sync" + "time" + + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/types" ) -// TxPreCheck returns a function to filter transactions before processing. -// The function limits the size of a transaction to the block's maximum data size. -func TxPreCheck(state State) mempool.PreCheckFunc { - maxDataBytes := types.MaxDataBytesNoEvidence( - state.ConsensusParams.Block.MaxBytes, +func cachingStateFetcher(store Store) func() (State, error) { + const ttl = time.Second + + var ( + last time.Time + mutex = &sync.Mutex{} + cache State + err error ) - return mempool.PreCheckMaxBytes(maxDataBytes) + + return func() (State, error) { + mutex.Lock() + defer mutex.Unlock() + + if time.Since(last) < ttl && cache.ChainID != "" { + return cache, nil + } + + cache, err = store.Load() + if err != nil { + return State{}, err + } + last = time.Now() + + return cache, nil + } + } -// TxPostCheck returns a function to filter transactions after processing. +// TxPreCheckFromStore returns a function to filter transactions before processing. +// The function limits the size of a transaction to the block's maximum data size. +func TxPreCheckFromStore(store Store) mempool.PreCheckFunc { + fetch := cachingStateFetcher(store) + + return func(tx types.Tx) error { + state, err := fetch() + if err != nil { + return err + } + + return TxPreCheckForState(state)(tx) + } +} + +func TxPreCheckForState(state State) mempool.PreCheckFunc { + return func(tx types.Tx) error { + maxDataBytes := types.MaxDataBytesNoEvidence( + state.ConsensusParams.Block.MaxBytes, + ) + return mempool.PreCheckMaxBytes(maxDataBytes)(tx) + } + +} + +// TxPostCheckFromStore returns a function to filter transactions after processing. // The function limits the gas wanted by a transaction to the block's maximum total gas. -func TxPostCheck(state State) mempool.PostCheckFunc { - return mempool.PostCheckMaxGas(state.ConsensusParams.Block.MaxGas) +func TxPostCheckFromStore(store Store) mempool.PostCheckFunc { + fetch := cachingStateFetcher(store) + + return func(tx types.Tx, resp *abci.ResponseCheckTx) error { + state, err := fetch() + if err != nil { + return err + } + return mempool.PostCheckMaxGas(state.ConsensusParams.Block.MaxGas)(tx, resp) + } +} + +func TxPostCheckForState(state State) mempool.PostCheckFunc { + return func(tx types.Tx, resp *abci.ResponseCheckTx) error { + return mempool.PostCheckMaxGas(state.ConsensusParams.Block.MaxGas)(tx, resp) + } } diff --git a/internal/state/tx_filter_test.go b/internal/state/tx_filter_test.go index 4692a10981..e664d18c4a 100644 --- a/internal/state/tx_filter_test.go +++ b/internal/state/tx_filter_test.go @@ -36,7 +36,7 @@ func TestTxFilter(t *testing.T) { state, err := sm.MakeGenesisState(genDoc) require.NoError(t, err) - f := sm.TxPreCheck(state) + f := sm.TxPreCheckForState(state) if tc.isErr { assert.NotNil(t, f(tc.tx), "#%v", i) } else { diff --git a/internal/state/validation.go b/internal/state/validation.go index 3eb928549a..6add08785f 100644 --- a/internal/state/validation.go +++ b/internal/state/validation.go @@ -9,7 +9,6 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/proxy" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/types" ) @@ -113,7 +112,7 @@ func validateBlock(state State, block *types.Block) error { // LastPrecommits.Signatures length is checked in VerifyCommit. if err := state.LastValidators.VerifyCommit( state.ChainID, state.LastBlockID, state.LastStateID, block.Height-1, block.LastCommit); err != nil { - return fmt.Errorf("error validating block: %v", err) + return fmt.Errorf("error validating block: %w", err) } } @@ -144,8 +143,8 @@ func validateBlock(state State, block *types.Block) error { case block.Height == state.InitialHeight: genesisTime := state.LastBlockTime - if !block.Time.Equal(genesisTime) { - return fmt.Errorf("block time %v is not equal to genesis time %v", + if block.Time.Before(genesisTime) { + return fmt.Errorf("block time %v is before genesis time %v", block.Time, genesisTime, ) @@ -208,7 +207,7 @@ func validateBlockTime(allowedTimeWindow time.Duration, state State, block *type return nil } -func validateBlockChainLock(proxyAppQueryConn proxy.AppConnQuery, state State, block *types.Block) error { +func validateBlockChainLock(ctx context.Context, client abci.Application, state State, block *types.Block) error { if block.CoreChainLock != nil { // If there is a new Chain Lock we need to make sure the height in the header is the same as the chain lock if block.Header.CoreChainLockedHeight != block.CoreChainLock.CoreBlockHeight { @@ -230,14 +229,14 @@ func validateBlockChainLock(proxyAppQueryConn proxy.AppConnQuery, state State, b panic(err) } - verifySignatureQueryRequest := abci.RequestQuery{ + verifySignatureQueryRequest := &abci.RequestQuery{ Data: coreChainLocksBytes, Path: "/verify-chainlock", } // We need to query our abci application to make sure the chain lock signature is valid - checkQuorumSignatureResponse, err := proxyAppQueryConn.QuerySync(context.Background(), verifySignatureQueryRequest) + checkQuorumSignatureResponse, err := client.Query(ctx, verifySignatureQueryRequest) if err != nil { return err } diff --git a/internal/state/validation_test.go b/internal/state/validation_test.go index affb9f6668..f4a8dc35f4 100644 --- a/internal/state/validation_test.go +++ b/internal/state/validation_test.go @@ -2,6 +2,7 @@ package state_test import ( "context" + "errors" "strings" "testing" "time" @@ -11,10 +12,12 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/tmhash" - memmock "github.com/tendermint/tendermint/internal/mempool/mock" + "github.com/tendermint/tendermint/internal/eventbus" + mpmocks "github.com/tendermint/tendermint/internal/mempool/mocks" + "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/mocks" statefactory "github.com/tendermint/tendermint/internal/state/test/factory" @@ -29,33 +32,51 @@ import ( const validationTestsStopHeight int64 = 10 func TestValidateBlockHeader(t *testing.T) { - proxyApp := newTestApp() - require.NoError(t, proxyApp.Start()) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewNopLogger() + proxyApp := proxy.New(abciclient.NewLocalClient(logger, &testApp{}), logger, proxy.NopMetrics()) + require.NoError(t, proxyApp.Start(ctx)) - state, stateDB, privVals := makeState(3, 1) - nodeProTxHash := state.Validators.Validators[0].ProTxHash + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + state, stateDB, privVals := makeState(t, 3, 1) stateStore := sm.NewStore(stateDB) + nodeProTxHash := state.Validators.Validators[0].ProTxHash nextChainLock := &types.CoreChainLock{ CoreBlockHeight: 100, CoreBlockHash: tmrand.Bytes(32), Signature: tmrand.Bytes(96), } + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + blockStore := store.NewBlockStore(dbm.NewMemDB()) blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), - proxyApp.Consensus(), - proxyApp.Query(), - memmock.Mempool{}, + logger, + proxyApp, + mp, sm.EmptyEvidencePool{}, blockStore, - nextChainLock, + eventBus, + sm.NopMetrics(), ) + blockExec.SetNextCoreChainLock(nextChainLock) lastCommit := types.NewCommit(0, 0, types.BlockID{}, types.StateID{}, nil, nil, nil) // some bad values - wrongHash := tmhash.Sum([]byte("this hash is wrong")) + wrongHash := crypto.Checksum([]byte("this hash is wrong")) wrongVersion1 := state.Version.Consensus wrongVersion1.Block += 2 wrongVersion2 := state.Version.Consensus @@ -116,16 +137,10 @@ func TestValidateBlockHeader(t *testing.T) { Invalid blocks don't pass */ for _, tc := range testCases { - block, err := statefactory.MakeBlock( - state, - height, - lastCommit, - nextChainLock, - 0, - ) - require.NoError(t, err, tc.name) + block, err := statefactory.MakeBlock(state, height, lastCommit, nextChainLock, 0) + require.NoError(t, err) tc.malleateBlock(block) - err = blockExec.ValidateBlock(state, block) + err = blockExec.ValidateBlock(ctx, state, block) t.Logf("%s: %v", tc.name, err) require.Error(t, err, tc.name) } @@ -133,64 +148,67 @@ func TestValidateBlockHeader(t *testing.T) { /* A good block passes */ - var err error - state, _, lastCommit, err = makeAndCommitGoodBlock( - state, - nodeProTxHash, - height, - lastCommit, - proposerProTxHash, - blockExec, - privVals, - nil, - 3) - require.NoError(t, err, "height: %d\nstate:\n%+v\n", height, state) + state, _, lastCommit = makeAndCommitGoodBlock(ctx, t, + state, nodeProTxHash, height, lastCommit, proposerProTxHash, blockExec, privVals, nil, 3) } nextHeight := validationTestsStopHeight block, err := statefactory.MakeBlock(state, nextHeight, lastCommit, nextChainLock, 0) require.NoError(t, err) state.InitialHeight = nextHeight + 1 - err = blockExec.ValidateBlock(state, block) + err = blockExec.ValidateBlock(ctx, state, block) require.Error(t, err, "expected an error when state is ahead of block") assert.Contains(t, err.Error(), "lower than initial height") } func TestValidateBlockCommit(t *testing.T) { - proxyApp := newTestApp() - require.NoError(t, proxyApp.Start()) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + proxyApp := proxy.New(abciclient.NewLocalClient(logger, &testApp{}), logger, proxy.NopMetrics()) + require.NoError(t, proxyApp.Start(ctx)) - state, stateDB, privVals := makeState(1, 1) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + state, stateDB, privVals := makeState(t, 1, 1) nodeProTxHash := state.Validators.Validators[0].ProTxHash stateStore := sm.NewStore(stateDB) - nextChainLock := &types.CoreChainLock{ CoreBlockHeight: 100, CoreBlockHash: tmrand.Bytes(32), Signature: tmrand.Bytes(96), } + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) + blockStore := store.NewBlockStore(dbm.NewMemDB()) blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), - proxyApp.Consensus(), - proxyApp.Query(), - memmock.Mempool{}, + logger, + proxyApp, + mp, sm.EmptyEvidencePool{}, blockStore, - nextChainLock, + eventBus, + sm.NopMetrics(), ) - lastCommit := types.NewCommit(0, 0, types.BlockID{}, types.StateID{}, nil, - nil, nil) - wrongVoteMessageSignedCommit := types.NewCommit(1, 0, types.BlockID{}, types.StateID{}, nil, - nil, nil) - badPrivVal := types.NewMockPV() - badPrivValQuorumHash, err := badPrivVal.GetFirstQuorumHash(context.Background()) - if err != nil { - panic(err) - } + blockExec.SetNextCoreChainLock(nextChainLock) + lastCommit := types.NewCommit(0, 0, types.BlockID{}, types.StateID{}, nil, nil, nil) + wrongVoteMessageSignedCommit := types.NewCommit(1, 0, types.BlockID{}, types.StateID{}, nil, nil, nil) + badPrivValQuorumHash := crypto.RandQuorumHash() + badPrivVal := types.NewMockPVForQuorum(badPrivValQuorumHash) for height := int64(1); height < validationTestsStopHeight; height++ { stateID := state.StateID() @@ -200,7 +218,7 @@ func TestValidateBlockCommit(t *testing.T) { #2589: ensure state.LastValidators.VerifyCommit fails here */ // should be height-1 instead of height - wrongHeightVote, err := testfactory.MakeVote( + wrongHeightVote, err := testfactory.MakeVote(ctx, privVals[proTxHash.String()], state.Validators, chainID, @@ -211,7 +229,7 @@ func TestValidateBlockCommit(t *testing.T) { state.LastBlockID, stateID, ) - require.NoError(t, err, "height %d", height) + require.NoError(t, err) wrongHeightCommit := types.NewCommit( wrongHeightVote.Height, wrongHeightVote.Round, @@ -221,31 +239,18 @@ func TestValidateBlockCommit(t *testing.T) { wrongHeightVote.BlockSignature, wrongHeightVote.StateSignature, ) - block, err := statefactory.MakeBlock( - state, - height, - wrongHeightCommit, - nextChainLock, - 0, - ) + block, err := statefactory.MakeBlock(state, height, wrongHeightCommit, nextChainLock, 0) require.NoError(t, err) - err = blockExec.ValidateBlock(state, block) - require.True( - t, - strings.HasPrefix( - err.Error(), - "error validating block: Invalid commit -- wrong height:", - ), - "expected error on block threshold signature at height %d, but got: %v", - height, - err, - ) + err = blockExec.ValidateBlock(ctx, state, block) + var wantErr types.ErrInvalidCommitHeight + require.True(t, errors.As(err, &wantErr), "expected ErrInvalidCommitHeight at height %d but got: %v", height, err) + /* Test that the threshold block signatures are good */ - block, _ = statefactory.MakeBlock(state, height, wrongVoteMessageSignedCommit, nextChainLock, 0) - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) + block, err = statefactory.MakeBlock(state, height, wrongVoteMessageSignedCommit, nextChainLock, 0) + require.NoError(t, err) + err = blockExec.ValidateBlock(ctx, state, block) require.True( t, strings.HasPrefix( @@ -261,9 +266,8 @@ func TestValidateBlockCommit(t *testing.T) { /* A good block passes */ - var err error var blockID types.BlockID - state, blockID, lastCommit, err = makeAndCommitGoodBlock( + state, blockID, lastCommit = makeAndCommitGoodBlock(ctx, t, state, nodeProTxHash, height, @@ -274,14 +278,13 @@ func TestValidateBlockCommit(t *testing.T) { nil, 0, ) - require.NoError(t, err, "height %d", height) /* wrongSigsCommit is fine except for the extra bad precommit */ proTxHashString := proTxHash.String() - goodVote, err := testfactory.MakeVote( + goodVote, err := testfactory.MakeVote(ctx, privVals[proTxHashString], state.Validators, chainID, @@ -294,7 +297,7 @@ func TestValidateBlockCommit(t *testing.T) { ) require.NoError(t, err, "height %d", height) - bpvProTxHash, err := badPrivVal.GetProTxHash(context.Background()) + bpvProTxHash, err := badPrivVal.GetProTxHash(ctx) require.NoError(t, err) badVote := &types.Vote{ @@ -309,9 +312,7 @@ func TestValidateBlockCommit(t *testing.T) { g := goodVote.ToProto() b := badVote.ToProto() - err = badPrivVal.SignVote( - context.Background(), - chainID, + err = badPrivVal.SignVote(ctx, chainID, state.Validators.QuorumType, badPrivValQuorumHash, g, @@ -319,8 +320,7 @@ func TestValidateBlockCommit(t *testing.T) { nil, ) require.NoError(t, err, "height %d", height) - err = badPrivVal.SignVote( - context.Background(), + err = badPrivVal.SignVote(ctx, chainID, state.Validators.QuorumType, badPrivValQuorumHash, @@ -340,33 +340,49 @@ func TestValidateBlockCommit(t *testing.T) { } func TestValidateBlockEvidence(t *testing.T) { - proxyApp := newTestApp() - require.NoError(t, proxyApp.Start()) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + proxyApp := proxy.New(abciclient.NewLocalClient(logger, &testApp{}), logger, proxy.NopMetrics()) + require.NoError(t, proxyApp.Start(ctx)) - state, stateDB, privVals := makeState(4, 1) + state, stateDB, privVals := makeState(t, 4, 1) nodeProTxHash := state.Validators.Validators[0].ProTxHash stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) evpool := &mocks.EvidencePool{} - evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) - evpool.On("Update", mock.AnythingOfType("state.State"), - mock.AnythingOfType("types.EvidenceList")).Return() - evpool.On("ABCIEvidence", mock.AnythingOfType("int64"), - mock.AnythingOfType("[]types.Evidence")).Return([]abci.Evidence{}) + evpool.On("CheckEvidence", ctx, mock.AnythingOfType("types.EvidenceList")).Return(nil) + evpool.On("Update", ctx, mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() + evpool.On("ABCIEvidence", mock.AnythingOfType("int64"), mock.AnythingOfType("[]types.Evidence")).Return( + []abci.Misbehavior{}) + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + mp := &mpmocks.Mempool{} + mp.On("Lock").Return() + mp.On("Unlock").Return() + mp.On("FlushAppConn", mock.Anything).Return(nil) + mp.On("Update", + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything, + mock.Anything).Return(nil) state.ConsensusParams.Evidence.MaxBytes = 1000 blockExec := sm.NewBlockExecutor( stateStore, - log.TestingLogger(), - proxyApp.Consensus(), - proxyApp.Query(), - memmock.Mempool{}, + log.NewNopLogger(), + proxyApp, + mp, evpool, blockStore, - nil, + eventBus, + sm.NopMetrics(), ) lastCommit := types.NewCommit(0, 0, types.BlockID{}, types.StateID{}, nil, nil, nil) @@ -382,23 +398,24 @@ func TestValidateBlockEvidence(t *testing.T) { var currentBytes int64 // more bytes than the maximum allowed for evidence for currentBytes <= maxBytesEvidence { - newEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), + newEv, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, height, time.Now(), privVals[proposerProTxHash.String()], chainID, state.Validators.QuorumType, state.Validators.QuorumHash) require.NoError(t, err) evidence = append(evidence, newEv) currentBytes += int64(len(newEv.Bytes())) } - block, _ := state.MakeBlock( + block := state.MakeBlock( height, nil, - testfactory.MakeTenTxs(height), + testfactory.MakeNTxs(height, 10), lastCommit, evidence, proposerProTxHash, 0, ) - err := blockExec.ValidateBlock(state, block) + + err := blockExec.ValidateBlock(ctx, state, block) if assert.Error(t, err) { _, ok := err.(*types.ErrEvidenceOverflow) require.True( @@ -418,11 +435,12 @@ func TestValidateBlockEvidence(t *testing.T) { var currentBytes int64 // precisely the amount of allowed evidence for { - proposerProTxHashString := proposerProTxHash.String() + propProTxHashStr := proposerProTxHash.String() newEv, err := types.NewMockDuplicateVoteEvidenceWithValidator( + ctx, height, defaultEvidenceTime, - privVals[proposerProTxHashString], + privVals[propProTxHashStr], chainID, state.Validators.QuorumType, state.Validators.QuorumHash, @@ -435,8 +453,9 @@ func TestValidateBlockEvidence(t *testing.T) { evidence = append(evidence, newEv) } - var err error - state, _, lastCommit, err = makeAndCommitGoodBlock( + state, _, lastCommit = makeAndCommitGoodBlock( + ctx, + t, state, nodeProTxHash, height, @@ -447,6 +466,6 @@ func TestValidateBlockEvidence(t *testing.T) { evidence, 0, ) - require.NoError(t, err, "height %d", height) + } } diff --git a/internal/statesync/block_queue.go b/internal/statesync/block_queue.go index 56ed3c3762..80b0ffbd52 100644 --- a/internal/statesync/block_queue.go +++ b/internal/statesync/block_queue.go @@ -200,7 +200,7 @@ func (q *blockQueue) retry(height int64) { // Success is called when a light block has been successfully verified and // processed -func (q *blockQueue) success(height int64) { +func (q *blockQueue) success() { q.mtx.Lock() defer q.mtx.Unlock() if q.terminal != nil && q.verifyHeight == q.terminal.Height { diff --git a/internal/statesync/block_queue_test.go b/internal/statesync/block_queue_test.go index 7cc47bcf79..2380036f63 100644 --- a/internal/statesync/block_queue_test.go +++ b/internal/statesync/block_queue_test.go @@ -1,6 +1,7 @@ package statesync import ( + "context" "math/rand" "sync" "testing" @@ -22,6 +23,9 @@ var ( ) func TestBlockQueueBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) @@ -35,7 +39,7 @@ func TestBlockQueueBasic(t *testing.T) { for { select { case height := <-queue.nextHeight(): - queue.add(mockLBResp(t, peerID, height, endTime)) + queue.add(mockLBResp(ctx, t, peerID, height, endTime)) case <-queue.done(): wg.Done() return @@ -58,7 +62,7 @@ loop: // assert that the queue serializes the blocks require.Equal(t, resp.block.Height, trackingHeight) trackingHeight-- - queue.success(resp.block.Height) + queue.success() } } @@ -69,6 +73,9 @@ loop: // Test with spurious failures and retries func TestBlockQueueWithFailures(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) @@ -85,7 +92,7 @@ func TestBlockQueueWithFailures(t *testing.T) { if rand.Intn(failureRate) == 0 { queue.retry(height) } else { - queue.add(mockLBResp(t, peerID, height, endTime)) + queue.add(mockLBResp(ctx, t, peerID, height, endTime)) } case <-queue.done(): wg.Done() @@ -105,7 +112,7 @@ func TestBlockQueueWithFailures(t *testing.T) { queue.retry(resp.block.Height) } else { trackingHeight-- - queue.success(resp.block.Height) + queue.success() } case <-queue.done(): @@ -125,6 +132,9 @@ func TestBlockQueueBlocks(t *testing.T) { expectedHeight := startHeight retryHeight := stopHeight + 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + loop: for { select { @@ -132,7 +142,7 @@ loop: require.Equal(t, height, expectedHeight) require.GreaterOrEqual(t, height, stopHeight) expectedHeight-- - queue.add(mockLBResp(t, peerID, height, endTime)) + queue.add(mockLBResp(ctx, t, peerID, height, endTime)) case <-time.After(1 * time.Second): if expectedHeight >= stopHeight { t.Fatalf("expected next height %d", expectedHeight) @@ -171,12 +181,15 @@ func TestBlockQueueAcceptsNoMoreBlocks(t *testing.T) { queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1) defer queue.close() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + loop: for { select { case height := <-queue.nextHeight(): require.GreaterOrEqual(t, height, stopHeight) - queue.add(mockLBResp(t, peerID, height, endTime)) + queue.add(mockLBResp(ctx, t, peerID, height, endTime)) case <-time.After(1 * time.Second): break loop } @@ -184,7 +197,7 @@ loop: require.Len(t, queue.pending, int(startHeight-stopHeight)+1) - queue.add(mockLBResp(t, peerID, stopHeight-1, endTime)) + queue.add(mockLBResp(ctx, t, peerID, stopHeight-1, endTime)) require.Len(t, queue.pending, int(startHeight-stopHeight)+1) } @@ -197,6 +210,9 @@ func TestBlockQueueStopTime(t *testing.T) { queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1) wg := &sync.WaitGroup{} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + baseTime := stopTime.Add(-50 * time.Second) // asynchronously fetch blocks and add it to the queue @@ -207,7 +223,7 @@ func TestBlockQueueStopTime(t *testing.T) { select { case height := <-queue.nextHeight(): blockTime := baseTime.Add(time.Duration(height) * time.Second) - queue.add(mockLBResp(t, peerID, height, blockTime)) + queue.add(mockLBResp(ctx, t, peerID, height, blockTime)) case <-queue.done(): wg.Done() return @@ -223,7 +239,7 @@ func TestBlockQueueStopTime(t *testing.T) { // assert that the queue serializes the blocks assert.Equal(t, resp.block.Height, trackingHeight) trackingHeight-- - queue.success(resp.block.Height) + queue.success() case <-queue.done(): wg.Wait() @@ -241,6 +257,9 @@ func TestBlockQueueInitialHeight(t *testing.T) { queue := newBlockQueue(startHeight, stopHeight, initialHeight, stopTime, 1) wg := &sync.WaitGroup{} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // asynchronously fetch blocks and add it to the queue for i := 0; i <= numWorkers; i++ { wg.Add(1) @@ -249,7 +268,7 @@ func TestBlockQueueInitialHeight(t *testing.T) { select { case height := <-queue.nextHeight(): require.GreaterOrEqual(t, height, initialHeight) - queue.add(mockLBResp(t, peerID, height, endTime)) + queue.add(mockLBResp(ctx, t, peerID, height, endTime)) case <-queue.done(): wg.Done() return @@ -268,14 +287,15 @@ loop: case resp := <-queue.verifyNext(): require.GreaterOrEqual(t, resp.block.Height, initialHeight) - queue.success(resp.block.Height) + queue.success() } } } -func mockLBResp(t *testing.T, peer types.NodeID, height int64, time time.Time) lightBlockResponse { +func mockLBResp(ctx context.Context, t *testing.T, peer types.NodeID, height int64, time time.Time) lightBlockResponse { + t.Helper() vals, pv := types.RandValidatorSet(3) - _, _, lb := mockLB(t, height, time, factory.MakeBlockID(), vals, pv) + _, _, lb := mockLB(ctx, t, height, time, factory.MakeBlockID(), vals, pv) return lightBlockResponse{ block: lb, peer: peer, diff --git a/internal/statesync/chunks.go b/internal/statesync/chunks.go index cf24cefad1..88f18eb59c 100644 --- a/internal/statesync/chunks.go +++ b/internal/statesync/chunks.go @@ -3,13 +3,12 @@ package statesync import ( "errors" "fmt" - "io/ioutil" "os" "path/filepath" "strconv" + "sync" "time" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/types" ) @@ -29,7 +28,7 @@ type chunk struct { // iterator over all chunks, but callers can request chunks to be retried, optionally after // refetching. type chunkQueue struct { - tmsync.Mutex + sync.Mutex snapshot *snapshot // if this is nil, the queue has been closed dir string // temp dir for on-disk chunk storage chunkFiles map[uint32]string // path to temporary chunk file @@ -42,7 +41,7 @@ type chunkQueue struct { // newChunkQueue creates a new chunk queue for a snapshot, using a temp dir for storage. // Callers must call Close() when done. func newChunkQueue(snapshot *snapshot, tempDir string) (*chunkQueue, error) { - dir, err := ioutil.TempDir(tempDir, "tm-statesync") + dir, err := os.MkdirTemp(tempDir, "tm-statesync") if err != nil { return nil, fmt.Errorf("unable to create temp dir for state sync chunks: %w", err) } @@ -95,7 +94,7 @@ func (q *chunkQueue) Add(chunk *chunk) (bool, error) { } path := filepath.Join(q.dir, strconv.FormatUint(uint64(chunk.Index), 10)) - err := ioutil.WriteFile(path, chunk.Chunk, 0600) + err := os.WriteFile(path, chunk.Chunk, 0600) if err != nil { return false, fmt.Errorf("failed to save chunk %v to file %v: %w", chunk.Index, path, err) } @@ -237,7 +236,7 @@ func (q *chunkQueue) load(index uint32) (*chunk, error) { return nil, nil } - body, err := ioutil.ReadFile(path) + body, err := os.ReadFile(path) if err != nil { return nil, fmt.Errorf("failed to load chunk %v: %w", index, err) } diff --git a/internal/statesync/chunks_test.go b/internal/statesync/chunks_test.go index 3197869b48..85cc23a806 100644 --- a/internal/statesync/chunks_test.go +++ b/internal/statesync/chunks_test.go @@ -1,7 +1,6 @@ package statesync import ( - "io/ioutil" "os" "testing" @@ -19,7 +18,7 @@ func setupChunkQueue(t *testing.T) (*chunkQueue, func()) { Hash: []byte{7}, Metadata: nil, } - queue, err := newChunkQueue(snapshot, "") + queue, err := newChunkQueue(snapshot, t.TempDir()) require.NoError(t, err) teardown := func() { err := queue.Close() @@ -36,20 +35,18 @@ func TestNewChunkQueue_TempDir(t *testing.T) { Hash: []byte{7}, Metadata: nil, } - dir, err := ioutil.TempDir("", "newchunkqueue") - require.NoError(t, err) - defer os.RemoveAll(dir) + dir := t.TempDir() queue, err := newChunkQueue(snapshot, dir) require.NoError(t, err) - files, err := ioutil.ReadDir(dir) + files, err := os.ReadDir(dir) require.NoError(t, err) assert.Len(t, files, 1) err = queue.Close() require.NoError(t, err) - files, err = ioutil.ReadDir(dir) + files, err = os.ReadDir(dir) require.NoError(t, err) assert.Len(t, files, 0) } diff --git a/internal/statesync/dispatcher.go b/internal/statesync/dispatcher.go index 844cb5e323..9cdb349784 100644 --- a/internal/statesync/dispatcher.go +++ b/internal/statesync/dispatcher.go @@ -26,18 +26,16 @@ var ( // NOTE: It is not the responsibility of the dispatcher to verify the light blocks. type Dispatcher struct { // the channel with which to send light block requests on - requestCh chan<- p2p.Envelope - closeCh chan struct{} + requestCh *p2p.Channel mtx sync.Mutex // all pending calls that have been dispatched and are awaiting an answer calls map[types.NodeID]chan *types.LightBlock } -func NewDispatcher(requestCh chan<- p2p.Envelope) *Dispatcher { +func NewDispatcher(requestChannel *p2p.Channel) *Dispatcher { return &Dispatcher{ - requestCh: requestCh, - closeCh: make(chan struct{}), + requestCh: requestChannel, calls: make(map[types.NodeID]chan *types.LightBlock), } } @@ -47,7 +45,7 @@ func NewDispatcher(requestCh chan<- p2p.Envelope) *Dispatcher { // LightBlock response is used to signal that the peer doesn't have the requested LightBlock. func (d *Dispatcher) LightBlock(ctx context.Context, height int64, peer types.NodeID) (*types.LightBlock, error) { // dispatch the request to the peer - callCh, err := d.dispatch(peer, height) + callCh, err := d.dispatch(ctx, peer, height) if err != nil { return nil, err } @@ -69,19 +67,16 @@ func (d *Dispatcher) LightBlock(ctx context.Context, height int64, peer types.No case <-ctx.Done(): return nil, ctx.Err() - - case <-d.closeCh: - return nil, errDisconnected } } // dispatch takes a peer and allocates it a channel so long as it's not already // busy and the receiving channel is still running. It then dispatches the message -func (d *Dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.LightBlock, error) { +func (d *Dispatcher) dispatch(ctx context.Context, peer types.NodeID, height int64) (chan *types.LightBlock, error) { d.mtx.Lock() defer d.mtx.Unlock() select { - case <-d.closeCh: + case <-ctx.Done(): return nil, errDisconnected default: } @@ -96,11 +91,14 @@ func (d *Dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.Ligh d.calls[peer] = ch // send request - d.requestCh <- p2p.Envelope{ + if err := d.requestCh.Send(ctx, p2p.Envelope{ To: peer, Message: &ssproto.LightBlockRequest{ Height: uint64(height), }, + }); err != nil { + close(ch) + return ch, err } return ch, nil @@ -109,7 +107,7 @@ func (d *Dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.Ligh // Respond allows the underlying process which receives requests on the // requestCh to respond with the respective light block. A nil response is used to // represent that the receiver of the request does not have a light block at that height. -func (d *Dispatcher) Respond(lb *tmproto.LightBlock, peer types.NodeID) error { +func (d *Dispatcher) Respond(ctx context.Context, lb *tmproto.LightBlock, peer types.NodeID) error { d.mtx.Lock() defer d.mtx.Unlock() @@ -123,8 +121,12 @@ func (d *Dispatcher) Respond(lb *tmproto.LightBlock, peer types.NodeID) error { // If lb is nil we take that to mean that the peer didn't have the requested light // block and thus pass on the nil to the caller. if lb == nil { - answerCh <- nil - return nil + select { + case answerCh <- nil: + return nil + case <-ctx.Done(): + return ctx.Err() + } } block, err := types.LightBlockFromProto(lb) @@ -132,8 +134,12 @@ func (d *Dispatcher) Respond(lb *tmproto.LightBlock, peer types.NodeID) error { return err } - answerCh <- block - return nil + select { + case <-ctx.Done(): + return ctx.Err() + case answerCh <- block: + return nil + } } // Close shuts down the dispatcher and cancels any pending calls awaiting responses. @@ -141,17 +147,14 @@ func (d *Dispatcher) Respond(lb *tmproto.LightBlock, peer types.NodeID) error { func (d *Dispatcher) Close() { d.mtx.Lock() defer d.mtx.Unlock() - close(d.closeCh) - for peer, call := range d.calls { + for peer := range d.calls { delete(d.calls, peer) - close(call) + // don't close the channel here as it's closed in + // other handlers, and would otherwise get garbage + // collected. } } -func (d *Dispatcher) Done() <-chan struct{} { - return d.closeCh -} - //---------------------------------------------------------------- // BlockProvider is a p2p based light provider which uses a dispatcher connected @@ -192,7 +195,7 @@ func (p *BlockProvider) LightBlock(ctx context.Context, height int64) (*types.Li case errPeerAlreadyBusy: return nil, provider.ErrLightBlockNotFound default: - return nil, provider.ErrUnreliableProvider{Reason: err.Error()} + return nil, provider.ErrUnreliableProvider{Reason: err} } // check that the height requested is the same one returned @@ -221,6 +224,9 @@ func (p *BlockProvider) ReportEvidence(ctx context.Context, ev types.Evidence) e // String implements stringer interface func (p *BlockProvider) String() string { return string(p.peer) } +// Returns the ID address of the provider (NodeID of peer) +func (p *BlockProvider) ID() string { return string(p.peer) } + //---------------------------------------------------------------- // peerList is a rolling list of peers. This is used to distribute the load of diff --git a/internal/statesync/dispatcher_test.go b/internal/statesync/dispatcher_test.go index e5a6a85cd3..65c517be43 100644 --- a/internal/statesync/dispatcher_test.go +++ b/internal/statesync/dispatcher_test.go @@ -18,16 +18,32 @@ import ( "github.com/tendermint/tendermint/types" ) +type channelInternal struct { + In chan p2p.Envelope + Out chan p2p.Envelope + Error chan p2p.PeerError +} + +func testChannel(size int) (*channelInternal, *p2p.Channel) { + in := &channelInternal{ + In: make(chan p2p.Envelope, size), + Out: make(chan p2p.Envelope, size), + Error: make(chan p2p.PeerError, size), + } + return in, p2p.NewChannel(0, nil, in.In, in.Out, in.Error) +} + func TestDispatcherBasic(t *testing.T) { t.Cleanup(leaktest.Check(t)) const numPeers = 5 - ch := make(chan p2p.Envelope, 100) - closeCh := make(chan struct{}) - defer close(closeCh) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + chans, ch := testChannel(100) d := NewDispatcher(ch) - go handleRequests(t, d, ch, closeCh) + go handleRequests(ctx, t, d, chans.Out) peers := createPeerSet(numPeers) wg := sync.WaitGroup{} @@ -38,7 +54,7 @@ func TestDispatcherBasic(t *testing.T) { wg.Add(1) go func(height int64) { defer wg.Done() - lb, err := d.LightBlock(context.Background(), height, peers[height-1]) + lb, err := d.LightBlock(ctx, height, peers[height-1]) require.NoError(t, err) require.NotNil(t, lb) require.Equal(t, lb.Height, height) @@ -52,31 +68,40 @@ func TestDispatcherBasic(t *testing.T) { func TestDispatcherReturnsNoBlock(t *testing.T) { t.Cleanup(leaktest.Check(t)) - ch := make(chan p2p.Envelope, 100) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + chans, ch := testChannel(100) + d := NewDispatcher(ch) - doneCh := make(chan struct{}) - peer := factory.NodeID("a") + + peer := factory.NodeID(t, "a") go func() { - <-ch - require.NoError(t, d.Respond(nil, peer)) - close(doneCh) + <-chans.Out + require.NoError(t, d.Respond(ctx, nil, peer)) + cancel() }() - lb, err := d.LightBlock(context.Background(), 1, peer) - <-doneCh + lb, err := d.LightBlock(ctx, 1, peer) + <-ctx.Done() require.Nil(t, lb) - require.Nil(t, err) + require.NoError(t, err) } func TestDispatcherTimeOutWaitingOnLightBlock(t *testing.T) { t.Cleanup(leaktest.Check(t)) - ch := make(chan p2p.Envelope, 100) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, ch := testChannel(100) d := NewDispatcher(ch) - peer := factory.NodeID("a") + peer := factory.NodeID(t, "a") - ctx, cancelFunc := context.WithTimeout(context.Background(), 10*time.Millisecond) + ctx, cancelFunc := context.WithTimeout(ctx, 10*time.Millisecond) defer cancelFunc() lb, err := d.LightBlock(ctx, 1, peer) @@ -89,13 +114,15 @@ func TestDispatcherTimeOutWaitingOnLightBlock(t *testing.T) { func TestDispatcherProviders(t *testing.T) { t.Cleanup(leaktest.Check(t)) - ch := make(chan p2p.Envelope, 100) chainID := "test-chain" - closeCh := make(chan struct{}) - defer close(closeCh) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + chans, ch := testChannel(100) d := NewDispatcher(ch) - go handleRequests(t, d, ch, closeCh) + go handleRequests(ctx, t, d, chans.Out) peers := createPeerSet(5) providers := make([]*BlockProvider, len(peers)) @@ -106,7 +133,7 @@ func TestDispatcherProviders(t *testing.T) { for i, p := range providers { assert.Equal(t, string(peers[i]), p.String(), i) - lb, err := p.LightBlock(context.Background(), 10) + lb, err := p.LightBlock(ctx, 10) assert.NoError(t, err) assert.NotNil(t, lb) } @@ -114,6 +141,10 @@ func TestDispatcherProviders(t *testing.T) { func TestPeerListBasic(t *testing.T) { t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerList := newPeerList() assert.Zero(t, peerList.Len()) numPeers := 10 @@ -176,7 +207,10 @@ func TestEmptyPeerListReturnsWhenContextCanceled(t *testing.T) { peerList := newPeerList() require.Zero(t, peerList.Len()) doneCh := make(chan struct{}) - ctx := context.Background() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + wrapped, cancel := context.WithCancel(ctx) go func() { peerList.Pop(wrapped) @@ -199,6 +233,9 @@ func TestEmptyPeerListReturnsWhenContextCanceled(t *testing.T) { func TestPeerListConcurrent(t *testing.T) { t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + peerList := newPeerList() numPeers := 10 @@ -229,7 +266,6 @@ func TestPeerListConcurrent(t *testing.T) { // we use a context with cancel and a separate go routine to wait for all // the other goroutines to close. - ctx, cancel := context.WithCancel(context.Background()) go func() { wg.Wait(); cancel() }() select { @@ -264,17 +300,17 @@ func TestPeerListRemove(t *testing.T) { // handleRequests is a helper function usually run in a separate go routine to // imitate the expected responses of the reactor wired to the dispatcher -func handleRequests(t *testing.T, d *Dispatcher, ch chan p2p.Envelope, closeCh chan struct{}) { +func handleRequests(ctx context.Context, t *testing.T, d *Dispatcher, ch chan p2p.Envelope) { t.Helper() for { select { case request := <-ch: height := request.Message.(*ssproto.LightBlockRequest).Height peer := request.To - resp := mockLBResp(t, peer, int64(height), time.Now()) + resp := mockLBResp(ctx, t, peer, int64(height), time.Now()) block, _ := resp.block.ToProto() - require.NoError(t, d.Respond(block, resp.peer)) - case <-closeCh: + require.NoError(t, d.Respond(ctx, block, resp.peer)) + case <-ctx.Done(): return } } diff --git a/internal/statesync/mocks/state_provider.go b/internal/statesync/mocks/state_provider.go index b8d6816310..582ebcd9c4 100644 --- a/internal/statesync/mocks/state_provider.go +++ b/internal/statesync/mocks/state_provider.go @@ -8,6 +8,8 @@ import ( mock "github.com/stretchr/testify/mock" state "github.com/tendermint/tendermint/internal/state" + testing "testing" + types "github.com/tendermint/tendermint/types" ) @@ -82,3 +84,13 @@ func (_m *StateProvider) State(ctx context.Context, height uint64) (state.State, return r0, r1 } + +// NewStateProvider creates a new instance of StateProvider. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewStateProvider(t testing.TB) *StateProvider { + mock := &StateProvider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 7d30df942e..f87aed4d98 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -7,15 +7,16 @@ import ( "fmt" "runtime/debug" "sort" + "sync" "time" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" - dashcore "github.com/tendermint/tendermint/dashcore/rpc" + dashcore "github.com/tendermint/tendermint/dash/core" "github.com/tendermint/tendermint/internal/consensus" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" @@ -28,60 +29,6 @@ import ( var ( _ service.Service = (*Reactor)(nil) _ p2p.Wrapper = (*ssproto.Message)(nil) - - // ChannelShims contains a map of ChannelDescriptorShim objects, where each - // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding - // p2p proto.Message the new p2p Channel is responsible for handling. - // - // - // TODO: Remove once p2p refactor is complete. - // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - SnapshotChannel: { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(SnapshotChannel), - Priority: 6, - SendQueueCapacity: 10, - RecvMessageCapacity: snapshotMsgSize, - RecvBufferCapacity: 128, - MaxSendBytes: 400, - }, - }, - ChunkChannel: { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(ChunkChannel), - Priority: 3, - SendQueueCapacity: 4, - RecvMessageCapacity: chunkMsgSize, - RecvBufferCapacity: 128, - MaxSendBytes: 400, - }, - }, - LightBlockChannel: { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(LightBlockChannel), - Priority: 5, - SendQueueCapacity: 10, - RecvMessageCapacity: lightBlockMsgSize, - RecvBufferCapacity: 128, - MaxSendBytes: 400, - }, - }, - ParamsChannel: { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(ParamsChannel), - Priority: 2, - SendQueueCapacity: 10, - RecvMessageCapacity: paramMsgSize, - RecvBufferCapacity: 128, - MaxSendBytes: 400, - }, - }, - } ) const ( @@ -128,6 +75,48 @@ const ( backfillSleepTime = 1 * time.Second ) +func getChannelDescriptors() map[p2p.ChannelID]*p2p.ChannelDescriptor { + return map[p2p.ChannelID]*p2p.ChannelDescriptor{ + SnapshotChannel: { + ID: SnapshotChannel, + MessageType: new(ssproto.Message), + Priority: 6, + SendQueueCapacity: 10, + RecvMessageCapacity: snapshotMsgSize, + RecvBufferCapacity: 128, + Name: "snapshot", + }, + ChunkChannel: { + ID: ChunkChannel, + Priority: 3, + MessageType: new(ssproto.Message), + SendQueueCapacity: 4, + RecvMessageCapacity: chunkMsgSize, + RecvBufferCapacity: 128, + Name: "chunk", + }, + LightBlockChannel: { + ID: LightBlockChannel, + MessageType: new(ssproto.Message), + Priority: 5, + SendQueueCapacity: 10, + RecvMessageCapacity: lightBlockMsgSize, + RecvBufferCapacity: 128, + Name: "light-block", + }, + ParamsChannel: { + ID: ParamsChannel, + MessageType: new(ssproto.Message), + Priority: 2, + SendQueueCapacity: 10, + RecvMessageCapacity: paramMsgSize, + RecvBufferCapacity: 128, + Name: "params", + }, + } + +} + // Metricer defines an interface used for the rpc sync info query, please see statesync.metrics // for the details. type Metricer interface { @@ -144,6 +133,7 @@ type Metricer interface { // serving snapshots for other nodes. type Reactor struct { service.BaseService + logger log.Logger chainID string initialHeight int64 @@ -151,15 +141,17 @@ type Reactor struct { stateStore sm.Store blockStore *store.BlockStore - conn proxy.AppConnSnapshot - connQuery proxy.AppConnQuery - tempDir string - snapshotCh *p2p.Channel - chunkCh *p2p.Channel - blockCh *p2p.Channel - paramsCh *p2p.Channel - peerUpdates *p2p.PeerUpdates - closeCh chan struct{} + conn abciclient.Client + tempDir string + peerEvents p2p.PeerEventSubscriber + chCreator p2p.ChannelCreator + sendBlockError func(context.Context, p2p.PeerError) error + postSyncHook func(context.Context, sm.State) error + + // when true, the reactor will, during startup perform a + // statesync for this node, and otherwise just provide + // snapshots to other nodes. + needsStateSync bool // Dispatcher is used to multiplex light block requests and responses over multiple // peers used by the p2p state provider and in reverse sync. @@ -169,11 +161,15 @@ type Reactor struct { // These will only be set when a state sync is in progress. It is used to feed // received snapshots and chunks into the syncer and manage incoming and outgoing // providers. - mtx tmsync.RWMutex - syncer *syncer - providers map[types.NodeID]*BlockProvider - stateProvider StateProvider - + mtx sync.RWMutex + initSyncer func() *syncer + requestSnaphot func() error + syncer *syncer + providers map[types.NodeID]*BlockProvider + initStateProvider func(ctx context.Context, chainID string, initialHeight int64) error + stateProvider StateProvider + + eventBus *eventbus.EventBus metrics *Metrics backfillBlockTotal int64 backfilledBlocks int64 @@ -192,36 +188,36 @@ func NewReactor( initialHeight int64, cfg config.StateSyncConfig, logger log.Logger, - conn proxy.AppConnSnapshot, - connQuery proxy.AppConnQuery, - snapshotCh, chunkCh, blockCh, paramsCh *p2p.Channel, - peerUpdates *p2p.PeerUpdates, + conn abciclient.Client, + channelCreator p2p.ChannelCreator, + peerEvents p2p.PeerEventSubscriber, stateStore sm.Store, blockStore *store.BlockStore, tempDir string, ssMetrics *Metrics, + eventBus *eventbus.EventBus, + postSyncHook func(context.Context, sm.State) error, + needsStateSync bool, client dashcore.Client, csState *consensus.State, ) *Reactor { r := &Reactor{ + logger: logger, chainID: chainID, initialHeight: initialHeight, cfg: cfg, conn: conn, - connQuery: connQuery, - snapshotCh: snapshotCh, - chunkCh: chunkCh, - blockCh: blockCh, - paramsCh: paramsCh, - peerUpdates: peerUpdates, - closeCh: make(chan struct{}), + chCreator: channelCreator, + peerEvents: peerEvents, tempDir: tempDir, stateStore: stateStore, blockStore: blockStore, peers: newPeerList(), - dispatcher: NewDispatcher(blockCh.Out), providers: make(map[types.NodeID]*BlockProvider), metrics: ssMetrics, + eventBus: eventBus, + postSyncHook: postSyncHook, + needsStateSync: needsStateSync, dashCoreClient: client, csState: csState, } @@ -236,16 +232,103 @@ func NewReactor( // handle individual envelopes as to not have to deal with bounding workers or pools. // The caller must be sure to execute OnStop to ensure the outbound p2p Channels are // closed. No error is returned. -func (r *Reactor) OnStart() error { - go r.processSnapshotCh() +func (r *Reactor) OnStart(ctx context.Context) error { + // construct channels + chDesc := getChannelDescriptors() + snapshotCh, err := r.chCreator(ctx, chDesc[SnapshotChannel]) + if err != nil { + return err + } + chunkCh, err := r.chCreator(ctx, chDesc[ChunkChannel]) + if err != nil { + return err + } + blockCh, err := r.chCreator(ctx, chDesc[LightBlockChannel]) + if err != nil { + return err + } + paramsCh, err := r.chCreator(ctx, chDesc[ParamsChannel]) + if err != nil { + return err + } - go r.processChunkCh() + // define constructor and helper functions, that hold + // references to these channels for use later. This is not + // ideal. + r.initSyncer = func() *syncer { + return &syncer{ + logger: r.logger, + stateProvider: r.stateProvider, + conn: r.conn, + snapshots: newSnapshotPool(), + snapshotCh: snapshotCh, + chunkCh: chunkCh, + tempDir: r.tempDir, + fetchers: r.cfg.Fetchers, + retryTimeout: r.cfg.ChunkRequestTimeout, + metrics: r.metrics, + } + } + r.dispatcher = NewDispatcher(blockCh) + r.requestSnaphot = func() error { + // request snapshots from all currently connected peers + return snapshotCh.Send(ctx, p2p.Envelope{ + Broadcast: true, + Message: &ssproto.SnapshotsRequest{}, + }) + } + r.sendBlockError = blockCh.SendError + + r.initStateProvider = func(ctx context.Context, chainID string, initialHeight int64) error { + + spLogger := r.logger.With("module", "stateprovider") + spLogger.Info("initializing state provider", + "trustHeight", r.cfg.TrustHeight, "useP2P", r.cfg.UseP2P) + + if r.cfg.UseP2P { + if err := r.waitForEnoughPeers(ctx, 2); err != nil { + return err + } + + peers := r.peers.All() + providers := make([]provider.Provider, len(peers)) + for idx, p := range peers { + providers[idx] = NewBlockProvider(p, chainID, r.dispatcher) + } + + stateProvider, err := NewP2PStateProvider(ctx, chainID, initialHeight, r.cfg.TrustHeight, providers, + paramsCh, r.logger.With("module", "stateprovider"), r.dashCoreClient) + if err != nil { + return fmt.Errorf("failed to initialize P2P state provider: %w", err) + } + r.stateProvider = stateProvider + return nil + } - go r.processBlockCh() + stateProvider, err := NewRPCStateProvider(ctx, chainID, initialHeight, r.cfg.RPCServers, r.cfg.TrustHeight, + spLogger, r.dashCoreClient) + if err != nil { + return fmt.Errorf("failed to initialize RPC state provider: %w", err) + } + r.stateProvider = stateProvider + return nil + } - go r.processParamsCh() + go r.processChannels(ctx, map[p2p.ChannelID]*p2p.Channel{ + SnapshotChannel: snapshotCh, + ChunkChannel: chunkCh, + LightBlockChannel: blockCh, + ParamsChannel: paramsCh, + }) + go r.processPeerUpdates(ctx, r.peerEvents(ctx)) - go r.processPeerUpdates() + if r.needsStateSync { + r.logger.Info("starting state sync") + if _, err := r.Sync(ctx); err != nil { + r.logger.Error("state sync failed; shutting down this node", "err", err) + return err + } + } return nil } @@ -255,21 +338,6 @@ func (r *Reactor) OnStart() error { func (r *Reactor) OnStop() { // tell the dispatcher to stop sending any more requests r.dispatcher.Close() - // wait for any remaining requests to complete - <-r.dispatcher.Done() - - // Close closeCh to signal to all spawned goroutines to gracefully exit. All - // p2p Channels should execute Close(). - close(r.closeCh) - - // Wait for all p2p Channels to be closed before returning. This ensures we - // can easily reason about synchronization of all p2p Channels and ensure no - // panics will occur. - <-r.peerUpdates.Done() - <-r.snapshotCh.Done() - <-r.chunkCh.Done() - <-r.blockCh.Done() - <-r.paramsCh.Done() } // Sync runs a state sync, fetching snapshots and providing chunks to the @@ -278,6 +346,15 @@ func (r *Reactor) OnStop() { // blocksync can commence. It will then proceed to backfill the necessary amount // of historical blocks before participating in consensus func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { + if r.eventBus != nil { + if err := r.eventBus.PublishEventStateSyncStatus(types.EventDataStateSyncStatus{ + Complete: false, + Height: r.initialHeight, + }); err != nil { + return sm.State{}, err + } + } + // We need at least two peers (for cross-referencing of light blocks) before we can // begin state sync if err := r.waitForEnoughPeers(ctx, 2); err != nil { @@ -295,19 +372,9 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { return sm.State{}, err } - r.syncer = newSyncer( - r.cfg, - r.Logger, - r.conn, - r.connQuery, - r.stateProvider, - r.snapshotCh.Out, - r.chunkCh.Out, - r.snapshotCh.Done(), - r.tempDir, - r.metrics, - ) + r.syncer = r.initSyncer() r.mtx.Unlock() + defer func() { r.mtx.Lock() // reset syncing objects at the close of Sync @@ -316,21 +383,7 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { r.mtx.Unlock() }() - requestSnapshotsHook := func() { - // request snapshots from all currently connected peers - msg := p2p.Envelope{ - Broadcast: true, - Message: &ssproto.SnapshotsRequest{}, - } - - select { - case <-ctx.Done(): - case <-r.closeCh: - case r.snapshotCh.Out <- msg: - } - } - - state, commit, err := r.syncer.SyncAny(ctx, r.cfg.DiscoveryTime, requestSnapshotsHook) + state, commit, err := r.syncer.SyncAny(ctx, r.cfg.DiscoveryTime, r.requestSnaphot) if err != nil { return sm.State{}, err } @@ -340,19 +393,31 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { return state, err } - err = r.stateStore.Bootstrap(state) - if err != nil { + if err := r.stateStore.Bootstrap(state); err != nil { return sm.State{}, fmt.Errorf("failed to bootstrap node with new state: %w", err) } - err = r.blockStore.SaveSeenCommit(state.LastBlockHeight, commit) - if err != nil { + if err := r.blockStore.SaveSeenCommit(state.LastBlockHeight, commit); err != nil { return sm.State{}, fmt.Errorf("failed to store last seen commit: %w", err) } - err = r.Backfill(ctx, state) - if err != nil { - r.Logger.Error("backfill failed. Proceeding optimistically...", "err", err) + if err := r.Backfill(ctx, state); err != nil { + r.logger.Error("backfill failed. Proceeding optimistically...", "err", err) + } + + if r.eventBus != nil { + if err := r.eventBus.PublishEventStateSyncStatus(types.EventDataStateSyncStatus{ + Complete: true, + Height: state.LastBlockHeight, + }); err != nil { + return sm.State{}, err + } + } + + if r.postSyncHook != nil { + if err := r.postSyncHook(ctx, state); err != nil { + return sm.State{}, err + } } return state, nil @@ -401,7 +466,7 @@ func (r *Reactor) backfill( sleepTime time.Duration, lightBlockResponseTimeout time.Duration, ) error { - r.Logger.Info("starting backfill process...", "startHeight", startHeight, + r.logger.Info("starting backfill process...", "startHeight", startHeight, "stopHeight", stopHeight, "stopTime", stopTime, "trustedBlockID", trustedBlockID) r.backfillBlockTotal = startHeight - stopHeight + 1 @@ -426,10 +491,12 @@ func (r *Reactor) backfill( go func() { for { select { + case <-ctx.Done(): + return case height := <-queue.nextHeight(): // pop the next peer of the list to send a request to peer := r.peers.Pop(ctx) - r.Logger.Debug("fetching next block", "height", height, "peer", peer) + r.logger.Debug("fetching next block", "height", height, "peer", peer) lb, err := func() (*types.LightBlock, error) { subCtx, reqCancel := context.WithTimeout(ctxWithCancel, lightBlockResponseTimeout) defer reqCancel() @@ -444,18 +511,18 @@ func (r *Reactor) backfill( if err != nil { queue.retry(height) if errors.Is(err, errNoConnectedPeers) { - r.Logger.Info("backfill: no connected peers to fetch light blocks from; sleeping...", + r.logger.Info("backfill: no connected peers to fetch light blocks from; sleeping...", "sleepTime", sleepTime) time.Sleep(sleepTime) } else { // we don't punish the peer as it might just have not responded in time - r.Logger.Info("backfill: error with fetching light block", + r.logger.Info("backfill: error with fetching light block", "height", height, "err", err) } continue } if lb == nil { - r.Logger.Info("backfill: peer didn't have block, fetching from another peer", "height", height) + r.logger.Info("backfill: peer didn't have block, fetching from another peer", "height", height) queue.retry(height) // As we are fetching blocks backwards, if this node doesn't have the block it likely doesn't // have any prior ones, thus we remove it from the peer list. @@ -467,12 +534,14 @@ func (r *Reactor) backfill( // hashes line up err = lb.ValidateBasic(chainID) if err != nil || lb.Height != height { - r.Logger.Info("backfill: fetched light block failed validate basic, removing peer...", + r.logger.Info("backfill: fetched light block failed validate basic, removing peer...", "err", err, "height", height) queue.retry(height) - r.blockCh.Error <- p2p.PeerError{ + if serr := r.sendBlockError(ctx, p2p.PeerError{ NodeID: peer, Err: fmt.Errorf("received invalid light block: %w", err), + }); serr != nil { + return } continue } @@ -482,7 +551,7 @@ func (r *Reactor) backfill( block: lb, peer: peer, }) - r.Logger.Debug("backfill: added light block to processing queue", "height", height) + r.logger.Debug("backfill: added light block to processing queue", "height", height) case <-queue.done(): return @@ -494,9 +563,6 @@ func (r *Reactor) backfill( // verify all light blocks for { select { - case <-r.closeCh: - queue.close() - return nil case <-ctx.Done(): queue.close() return nil @@ -506,27 +572,27 @@ func (r *Reactor) backfill( // we equate to. ValidatorsHash and CommitHash have already been // checked in the `ValidateBasic` if w, g := trustedBlockID.Hash, resp.block.Hash(); !bytes.Equal(w, g) { - r.Logger.Info("received invalid light block. header hash doesn't match trusted LastBlockID", + r.logger.Info("received invalid light block. header hash doesn't match trusted LastBlockID", "trustedHash", w, "receivedHash", g, "height", resp.block.Height) - r.blockCh.Error <- p2p.PeerError{ + if err := r.sendBlockError(ctx, p2p.PeerError{ NodeID: resp.peer, Err: fmt.Errorf("received invalid light block. Expected hash %v, got: %v", w, g), + }); err != nil { + return nil } queue.retry(resp.block.Height) continue } // save the signed headers - err := r.blockStore.SaveSignedHeader(resp.block.SignedHeader, trustedBlockID) - if err != nil { + if err := r.blockStore.SaveSignedHeader(resp.block.SignedHeader, trustedBlockID); err != nil { return err } // check if there has been a change in the validator set if lastValidatorSet != nil && !bytes.Equal(resp.block.Header.ValidatorsHash, resp.block.Header.NextValidatorsHash) { // save all the heights that the last validator set was the same - err = r.stateStore.SaveValidatorSets(resp.block.Height+1, lastChangeHeight, lastValidatorSet) - if err != nil { + if err := r.stateStore.SaveValidatorSets(resp.block.Height+1, lastChangeHeight, lastValidatorSet); err != nil { return err } @@ -535,8 +601,8 @@ func (r *Reactor) backfill( } trustedBlockID = resp.block.LastBlockID - queue.success(resp.block.Height) - r.Logger.Info("backfill: verified and stored light block", "height", resp.block.Height) + queue.success() + r.logger.Info("backfill: verified and stored light block", "height", resp.block.Height) lastValidatorSet = resp.block.ValidatorSet @@ -560,7 +626,7 @@ func (r *Reactor) backfill( return err } - r.Logger.Info("successfully completed backfill process", "endHeight", queue.terminal.Height) + r.logger.Info("successfully completed backfill process", "endHeight", queue.terminal.Height) return nil } } @@ -569,12 +635,12 @@ func (r *Reactor) backfill( // handleSnapshotMessage handles envelopes sent from peers on the // SnapshotChannel. It returns an error only if the Envelope.Message is unknown // for this channel. This should never be called outside of handleMessage. -func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error { - logger := r.Logger.With("peer", envelope.From) +func (r *Reactor) handleSnapshotMessage(ctx context.Context, envelope *p2p.Envelope, snapshotCh *p2p.Channel) error { + logger := r.logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { case *ssproto.SnapshotsRequest: - snapshots, err := r.recentSnapshots(recentSnapshots) + snapshots, err := r.recentSnapshots(ctx, recentSnapshots) if err != nil { logger.Error("failed to fetch snapshots", "err", err) return nil @@ -587,7 +653,8 @@ func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error { "format", snapshot.Format, "peer", envelope.From, ) - r.snapshotCh.Out <- p2p.Envelope{ + + if err := snapshotCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.SnapshotsResponse{ Height: snapshot.Height, @@ -596,6 +663,8 @@ func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error { Hash: snapshot.Hash, Metadata: snapshot.Metadata, }, + }); err != nil { + return err } } @@ -621,8 +690,8 @@ func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error { "failed to add snapshot", "height", msg.Height, "format", msg.Format, + "channel", envelope.ChannelID, "err", err, - "channel", r.snapshotCh.ID, ) return nil } @@ -638,23 +707,23 @@ func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error { // handleChunkMessage handles envelopes sent from peers on the ChunkChannel. // It returns an error only if the Envelope.Message is unknown for this channel. // This should never be called outside of handleMessage. -func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { +func (r *Reactor) handleChunkMessage(ctx context.Context, envelope *p2p.Envelope, chunkCh *p2p.Channel) error { switch msg := envelope.Message.(type) { case *ssproto.ChunkRequest: - r.Logger.Debug( + r.logger.Debug( "received chunk request", "height", msg.Height, "format", msg.Format, "chunk", msg.Index, "peer", envelope.From, ) - resp, err := r.conn.LoadSnapshotChunkSync(context.Background(), abci.RequestLoadSnapshotChunk{ + resp, err := r.conn.LoadSnapshotChunk(ctx, &abci.RequestLoadSnapshotChunk{ Height: msg.Height, Format: msg.Format, Chunk: msg.Index, }) if err != nil { - r.Logger.Error( + r.logger.Error( "failed to load chunk", "height", msg.Height, "format", msg.Format, @@ -665,14 +734,14 @@ func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { return nil } - r.Logger.Debug( + r.logger.Debug( "sending chunk", "height", msg.Height, "format", msg.Format, "chunk", msg.Index, "peer", envelope.From, ) - r.chunkCh.Out <- p2p.Envelope{ + if err := chunkCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.ChunkResponse{ Height: msg.Height, @@ -681,6 +750,8 @@ func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { Chunk: resp.Chunk, Missing: resp.Chunk == nil, }, + }); err != nil { + return err } case *ssproto.ChunkResponse: @@ -688,11 +759,11 @@ func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { defer r.mtx.RUnlock() if r.syncer == nil { - r.Logger.Debug("received unexpected chunk; no state sync in progress", "peer", envelope.From) + r.logger.Debug("received unexpected chunk; no state sync in progress", "peer", envelope.From) return nil } - r.Logger.Debug( + r.logger.Debug( "received chunk; adding to sync", "height", msg.Height, "format", msg.Format, @@ -707,7 +778,7 @@ func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { Sender: envelope.From, }) if err != nil { - r.Logger.Error( + r.logger.Error( "failed to add chunk", "height", msg.Height, "format", msg.Format, @@ -725,48 +796,54 @@ func (r *Reactor) handleChunkMessage(envelope p2p.Envelope) error { return nil } -func (r *Reactor) handleLightBlockMessage(envelope p2p.Envelope) error { +func (r *Reactor) handleLightBlockMessage(ctx context.Context, envelope *p2p.Envelope, blockCh *p2p.Channel) error { switch msg := envelope.Message.(type) { case *ssproto.LightBlockRequest: - r.Logger.Info("received light block request", "height", msg.Height) + r.logger.Info("received light block request", "height", msg.Height) lb, err := r.fetchLightBlock(msg.Height) if err != nil { - r.Logger.Error("failed to retrieve light block", "err", err, "height", msg.Height) + r.logger.Error("failed to retrieve light block", "err", err, "height", msg.Height) return err } if lb == nil { - r.blockCh.Out <- p2p.Envelope{ + if err := blockCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.LightBlockResponse{ LightBlock: nil, }, + }); err != nil { + return err } return nil } lbproto, err := lb.ToProto() if err != nil { - r.Logger.Error("marshaling light block to proto", "err", err) + r.logger.Error("marshaling light block to proto", "err", err) return nil } // NOTE: If we don't have the light block we will send a nil light block // back to the requested node, indicating that we don't have it. - r.blockCh.Out <- p2p.Envelope{ + if err := blockCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.LightBlockResponse{ LightBlock: lbproto, }, + }); err != nil { + return err } - case *ssproto.LightBlockResponse: var height int64 if msg.LightBlock != nil { height = msg.LightBlock.SignedHeader.Header.Height } - r.Logger.Info("received light block response", "peer", envelope.From, "height", height) - if err := r.dispatcher.Respond(msg.LightBlock, envelope.From); err != nil { - r.Logger.Error("error processing light block response", "err", err, "height", height) + r.logger.Info("received light block response", "peer", envelope.From, "height", height) + if err := r.dispatcher.Respond(ctx, msg.LightBlock, envelope.From); err != nil { + if errors.Is(err, context.Canceled) { + return err + } + r.logger.Error("error processing light block response", "err", err, "height", height) } default: @@ -776,40 +853,43 @@ func (r *Reactor) handleLightBlockMessage(envelope p2p.Envelope) error { return nil } -func (r *Reactor) handleParamsMessage(envelope p2p.Envelope) error { +func (r *Reactor) handleParamsMessage(ctx context.Context, envelope *p2p.Envelope, paramsCh *p2p.Channel) error { switch msg := envelope.Message.(type) { case *ssproto.ParamsRequest: - r.Logger.Debug("received consensus params request", "height", msg.Height) + r.logger.Debug("received consensus params request", "height", msg.Height) cp, err := r.stateStore.LoadConsensusParams(int64(msg.Height)) if err != nil { - r.Logger.Error("failed to fetch requested consensus params", "err", err, "height", msg.Height) + r.logger.Error("failed to fetch requested consensus params", "err", err, "height", msg.Height) return nil } cpproto := cp.ToProto() - r.paramsCh.Out <- p2p.Envelope{ + if err := paramsCh.Send(ctx, p2p.Envelope{ To: envelope.From, Message: &ssproto.ParamsResponse{ Height: msg.Height, ConsensusParams: cpproto, }, + }); err != nil { + return err } - case *ssproto.ParamsResponse: r.mtx.RLock() defer r.mtx.RUnlock() - r.Logger.Debug("received consensus params response", "height", msg.Height) + r.logger.Debug("received consensus params response", "height", msg.Height) cp := types.ConsensusParamsFromProto(msg.ConsensusParams) if sp, ok := r.stateProvider.(*stateProviderP2P); ok { select { case sp.paramsRecvCh <- cp: + case <-ctx.Done(): + return ctx.Err() case <-time.After(time.Second): return errors.New("failed to send consensus params, stateprovider not ready for response") } } else { - r.Logger.Debug("received unexpected params response; using RPC state provider", "peer", envelope.From) + r.logger.Debug("received unexpected params response; using RPC state provider", "peer", envelope.From) } default: @@ -822,11 +902,11 @@ func (r *Reactor) handleParamsMessage(envelope p2p.Envelope) error { // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, chans map[p2p.ChannelID]*p2p.Channel) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) - r.Logger.Error( + r.logger.Error( "recovering from processing message panic", "err", err, "stack", string(debug.Stack()), @@ -834,80 +914,71 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err } }() - // r.Logger.Debug("received message", "msg", reflect.TypeOf(envelope.Message), "peer", envelope.From) + //r.logger.Debug("received message", "msg", reflect.TypeOf(envelope.Message), "peer", envelope.From) - switch chID { + switch envelope.ChannelID { case SnapshotChannel: - err = r.handleSnapshotMessage(envelope) - + err = r.handleSnapshotMessage(ctx, envelope, chans[SnapshotChannel]) case ChunkChannel: - err = r.handleChunkMessage(envelope) - + err = r.handleChunkMessage(ctx, envelope, chans[ChunkChannel]) case LightBlockChannel: - err = r.handleLightBlockMessage(envelope) - + err = r.handleLightBlockMessage(ctx, envelope, chans[LightBlockChannel]) case ParamsChannel: - err = r.handleParamsMessage(envelope) - + err = r.handleParamsMessage(ctx, envelope, chans[ParamsChannel]) default: - err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) + err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", envelope.ChannelID, envelope) } return err } -// processSnapshotCh initiates a blocking process where we listen for and handle -// envelopes on the SnapshotChannel. -func (r *Reactor) processSnapshotCh() { - r.processCh(r.snapshotCh, "snapshot") -} - -// processChunkCh initiates a blocking process where we listen for and handle -// envelopes on the ChunkChannel. -func (r *Reactor) processChunkCh() { - r.processCh(r.chunkCh, "chunk") -} - -// processBlockCh initiates a blocking process where we listen for and handle -// envelopes on the LightBlockChannel. -func (r *Reactor) processBlockCh() { - r.processCh(r.blockCh, "light block") -} - -func (r *Reactor) processParamsCh() { - r.processCh(r.paramsCh, "consensus params") -} - // processCh routes state sync messages to their respective handlers. Any error // encountered during message execution will result in a PeerError being sent on // the respective channel. When the reactor is stopped, we will catch the signal // and close the p2p Channel gracefully. -func (r *Reactor) processCh(ch *p2p.Channel, chName string) { - defer ch.Close() +func (r *Reactor) processChannels(ctx context.Context, chanTable map[p2p.ChannelID]*p2p.Channel) { + // make sure that the iterator gets cleaned up in case of error + ctx, cancel := context.WithCancel(ctx) + defer cancel() - for { - select { - case envelope := <-ch.In: - if err := r.handleMessage(ch.ID, envelope); err != nil { - r.Logger.Error(fmt.Sprintf("failed to process %s message", chName), - "ch_id", ch.ID, "envelope", envelope, "err", err) - ch.Error <- p2p.PeerError{ - NodeID: envelope.From, - Err: err, - } - } + chs := make([]*p2p.Channel, 0, len(chanTable)) + for key := range chanTable { + chs = append(chs, chanTable[key]) + } - case <-r.closeCh: - r.Logger.Debug(fmt.Sprintf("stopped listening on %s channel; closing...", chName)) - return + iter := p2p.MergedChannelIterator(ctx, chs...) + for iter.Next(ctx) { + envelope := iter.Envelope() + if err := r.handleMessage(ctx, envelope, chanTable); err != nil { + ch, ok := chanTable[envelope.ChannelID] + if !ok { + r.logger.Error("received impossible message", + "envelope_from", envelope.From, + "envelope_ch", envelope.ChannelID, + "num_chs", len(chanTable), + "err", err, + ) + return + } + r.logger.Error("failed to process message", + "err", err, + "channel", ch.String(), + "ch_id", envelope.ChannelID, + "envelope", envelope) + if serr := ch.SendError(ctx, p2p.PeerError{ + NodeID: envelope.From, + Err: err, + }); serr != nil { + return + } } } } // processPeerUpdate processes a PeerUpdate, returning an error upon failing to // handle the PeerUpdate or if a panic is recovered. -func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Info("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) +func (r *Reactor) processPeerUpdate(ctx context.Context, peerUpdate p2p.PeerUpdate) { + r.logger.Info("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) switch peerUpdate.Status { case p2p.PeerStatusUp: @@ -919,7 +990,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { r.peers.Append(peerUpdate.NodeID) } else { - r.Logger.Error("could not use peer for statesync", "peer", peerUpdate.NodeID) + r.logger.Error("could not use peer for statesync", "peer", peerUpdate.NodeID) } case p2p.PeerStatusDown: @@ -928,6 +999,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { r.mtx.Lock() defer r.mtx.Unlock() + if r.syncer == nil { return } @@ -936,10 +1008,11 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { case p2p.PeerStatusUp: newProvider := NewBlockProvider(peerUpdate.NodeID, r.chainID, r.dispatcher) + r.providers[peerUpdate.NodeID] = newProvider - err := r.syncer.AddPeer(peerUpdate.NodeID) + err := r.syncer.AddPeer(ctx, peerUpdate.NodeID) if err != nil { - r.Logger.Error("error adding peer to syncer", "error", err) + r.logger.Error("error adding peer to syncer", "error", err) return } if sp, ok := r.stateProvider.(*stateProviderP2P); ok { @@ -952,30 +1025,26 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { delete(r.providers, peerUpdate.NodeID) r.syncer.RemovePeer(peerUpdate.NodeID) } - r.Logger.Info("processed peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) + r.logger.Info("processed peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) } // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *Reactor) processPeerUpdates() { - defer r.peerUpdates.Close() - +func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerUpdates) { for { select { - case peerUpdate := <-r.peerUpdates.Updates(): - r.processPeerUpdate(peerUpdate) - - case <-r.closeCh: - r.Logger.Debug("stopped listening on peer updates channel; closing...") + case <-ctx.Done(): return + case peerUpdate := <-peerUpdates.Updates(): + r.processPeerUpdate(ctx, peerUpdate) } } } // recentSnapshots fetches the n most recent snapshots from the app -func (r *Reactor) recentSnapshots(n uint32) ([]*snapshot, error) { - resp, err := r.conn.ListSnapshotsSync(context.Background(), abci.RequestListSnapshots{}) +func (r *Reactor) recentSnapshots(ctx context.Context, n uint32) ([]*snapshot, error) { + resp, err := r.conn.ListSnapshots(ctx, &abci.RequestListSnapshots{}) if err != nil { return nil, err } @@ -1057,13 +1126,10 @@ func (r *Reactor) waitForEnoughPeers(ctx context.Context, numPeers int) error { case <-ctx.Done(): return fmt.Errorf("operation canceled while waiting for peers after %.2fs [%d/%d]", time.Since(startAt).Seconds(), r.peers.Len(), numPeers) - case <-r.closeCh: - return fmt.Errorf("shutdown while waiting for peers after %.2fs [%d/%d]", - time.Since(startAt).Seconds(), r.peers.Len(), numPeers) case <-t.C: continue case <-logT.C: - r.Logger.Info("waiting for sufficient peers to start statesync", + r.logger.Info("waiting for sufficient peers to start statesync", "duration", time.Since(startAt).String(), "target", numPeers, "peers", r.peers.Len(), @@ -1075,36 +1141,6 @@ func (r *Reactor) waitForEnoughPeers(ctx context.Context, numPeers int) error { return nil } -func (r *Reactor) initStateProvider(ctx context.Context, chainID string, initialHeight int64) error { - var err error - spLogger := r.Logger.With("module", "stateprovider") - spLogger.Info("initializing state provider", "trustPeriod", r.cfg.TrustPeriod, - "trustHeight", r.cfg.TrustHeight, "useP2P", r.cfg.UseP2P) - - if r.cfg.UseP2P { - if err := r.waitForEnoughPeers(ctx, 2); err != nil { - return err - } - - peers := r.peers.All() - providers := make([]provider.Provider, len(peers)) - for idx, p := range peers { - providers[idx] = NewBlockProvider(p, chainID, r.dispatcher) - } - - r.stateProvider, err = NewP2PStateProvider(ctx, chainID, initialHeight, r.cfg.TrustHeight, providers, r.paramsCh.Out, spLogger, r.dashCoreClient) - if err != nil { - return fmt.Errorf("failed to initialize P2P state provider: %w", err) - } - } else { - r.stateProvider, err = NewRPCStateProvider(ctx, chainID, initialHeight, r.cfg.RPCServers, r.cfg.TrustHeight, spLogger, r.dashCoreClient) - if err != nil { - return fmt.Errorf("failed to initialize RPC state provider: %w", err) - } - } - return nil -} - func (r *Reactor) TotalSnapshots() int64 { r.mtx.RLock() defer r.mtx.RUnlock() diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index a77c9c5918..e64a72096c 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -8,19 +8,19 @@ import ( "testing" "time" + "github.com/dashevo/dashd-go/btcjson" "github.com/fortytw2/leaktest" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" - "github.com/dashevo/dashd-go/btcjson" + clientmocks "github.com/tendermint/tendermint/abci/client/mocks" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" - dashcore "github.com/tendermint/tendermint/dashcore/rpc" + dashcore "github.com/tendermint/tendermint/dash/core" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" - proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks" smmocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/internal/statesync/mocks" "github.com/tendermint/tendermint/internal/store" @@ -33,8 +33,9 @@ import ( ) const ( - chainID = "test-chain" - llmqType = btcjson.LLMQType_5_60 + chainID = "test-chain" + llmqType = btcjson.LLMQType_5_60 + testAppVersion = 9 ) var ( @@ -45,8 +46,7 @@ type reactorTestSuite struct { reactor *Reactor syncer *syncer - conn *proxymocks.AppConnSnapshot - connQuery *proxymocks.AppConnQuery + conn *clientmocks.Client stateProvider *mocks.StateProvider snapshotChannel *p2p.Channel @@ -80,22 +80,16 @@ type reactorTestSuite struct { } func setup( + ctx context.Context, t *testing.T, - conn *proxymocks.AppConnSnapshot, - connQuery *proxymocks.AppConnQuery, + conn *clientmocks.Client, stateProvider *mocks.StateProvider, chBuf uint, ) *reactorTestSuite { t.Helper() if conn == nil { - conn = &proxymocks.AppConnSnapshot{} - } - if connQuery == nil { - connQuery = &proxymocks.AppConnQuery{} - } - if stateProvider == nil { - stateProvider = &mocks.StateProvider{} + conn = &clientmocks.Client{} } rts := &reactorTestSuite{ @@ -112,7 +106,6 @@ func setup( paramsOutCh: make(chan p2p.Envelope, chBuf), paramsPeerErrCh: make(chan p2p.PeerError, chBuf), conn: conn, - connQuery: connQuery, stateProvider: stateProvider, } @@ -159,66 +152,85 @@ func setup( rts.privVal = types.NewMockPV() rts.dashcoreClient = dashcore.NewMockClient(chainID, llmqType, rts.privVal, false) + chCreator := func(ctx context.Context, desc *p2p.ChannelDescriptor) (*p2p.Channel, error) { + switch desc.ID { + case SnapshotChannel: + return rts.snapshotChannel, nil + case ChunkChannel: + return rts.chunkChannel, nil + case LightBlockChannel: + return rts.blockChannel, nil + case ParamsChannel: + return rts.paramsChannel, nil + default: + return nil, fmt.Errorf("invalid channel; %v", desc.ID) + } + } + + logger := log.NewNopLogger() + rts.reactor = NewReactor( factory.DefaultTestChainID, 1, *cfg, - log.TestingLogger(), + logger.With("component", "reactor"), conn, - connQuery, - rts.snapshotChannel, - rts.chunkChannel, - rts.blockChannel, - rts.paramsChannel, - rts.peerUpdates, + chCreator, + func(context.Context) *p2p.PeerUpdates { return rts.peerUpdates }, rts.stateStore, rts.blockStore, "", m, + nil, // eventbus can be nil + nil, // post-sync-hook + false, // run Sync during Start() rts.dashcoreClient, nil, ) - rts.syncer = newSyncer( - *cfg, - log.NewNopLogger(), - conn, - connQuery, - stateProvider, - rts.snapshotOutCh, - rts.chunkOutCh, - rts.snapshotChannel.Done(), - "", - rts.reactor.metrics, - ) + rts.syncer = &syncer{ + logger: logger, + stateProvider: stateProvider, + conn: conn, + snapshots: newSnapshotPool(), + snapshotCh: rts.snapshotChannel, + chunkCh: rts.chunkChannel, + tempDir: t.TempDir(), + fetchers: cfg.Fetchers, + retryTimeout: cfg.ChunkRequestTimeout, + metrics: rts.reactor.metrics, + } - require.NoError(t, rts.reactor.Start()) + ctx, cancel := context.WithCancel(ctx) + + require.NoError(t, rts.reactor.Start(ctx)) require.True(t, rts.reactor.IsRunning()) - t.Cleanup(func() { - require.NoError(t, rts.reactor.Stop()) - rts.reactor.Wait() - require.False(t, rts.reactor.IsRunning()) - }) + t.Cleanup(cancel) + t.Cleanup(rts.reactor.Wait) + t.Cleanup(leaktest.Check(t)) return rts } func TestReactor_Sync(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + const snapshotHeight = 7 - rts := setup(t, nil, nil, nil, 2) - chain := buildLightBlockChain(t, 1, 10, time.Now(), rts.privVal) + rts := setup(ctx, t, nil, nil, 100) + chain := buildLightBlockChain(ctx, t, 1, 10, time.Now(), rts.privVal) // app accepts any snapshot - rts.conn.On("OfferSnapshotSync", ctx, mock.AnythingOfType("types.RequestOfferSnapshot")). + rts.conn.On("OfferSnapshot", ctx, mock.IsType(&abci.RequestOfferSnapshot{})). Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil) // app accepts every chunk - rts.conn.On("ApplySnapshotChunkSync", ctx, mock.AnythingOfType("types.RequestApplySnapshotChunk")). + rts.conn.On("ApplySnapshotChunk", ctx, mock.IsType(&abci.RequestApplySnapshotChunk{})). Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) // app query returns valid state app hash - rts.connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(&abci.ResponseInfo{ - AppVersion: 0, + rts.conn.On("Info", mock.Anything, &proxy.RequestInfo).Return(&abci.ResponseInfo{ + AppVersion: testAppVersion, LastBlockHeight: snapshotHeight, LastBlockAppHash: chain[snapshotHeight+1].AppHash, }, nil) @@ -230,10 +242,9 @@ func TestReactor_Sync(t *testing.T) { closeCh := make(chan struct{}) defer close(closeCh) - go handleLightBlockRequests(t, chain, rts.blockOutCh, - rts.blockInCh, closeCh, 0) - go graduallyAddPeers(rts.peerUpdateCh, closeCh, 1*time.Second) - go handleSnapshotRequests(t, rts.snapshotOutCh, rts.snapshotInCh, closeCh, []snapshot{ + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) + go graduallyAddPeers(ctx, t, rts.peerUpdateCh, closeCh, 1*time.Second) + go handleSnapshotRequests(ctx, t, rts.snapshotOutCh, rts.snapshotInCh, closeCh, []snapshot{ { Height: uint64(snapshotHeight), Format: 1, @@ -241,9 +252,9 @@ func TestReactor_Sync(t *testing.T) { }, }) - go handleChunkRequests(t, rts.chunkOutCh, rts.chunkInCh, closeCh, []byte("abc")) + go handleChunkRequests(ctx, t, rts.chunkOutCh, rts.chunkInCh, closeCh, []byte("abc")) - go handleConsensusParamsRequest(t, rts.paramsOutCh, rts.paramsInCh, closeCh) + go handleConsensusParamsRequest(ctx, t, rts.paramsOutCh, rts.paramsInCh, closeCh) // update the config to use the p2p provider rts.reactor.cfg.UseP2P = true @@ -252,16 +263,20 @@ func TestReactor_Sync(t *testing.T) { rts.reactor.cfg.DiscoveryTime = 1 * time.Second // Run state sync - _, err := rts.reactor.Sync(context.Background()) + _, err := rts.reactor.Sync(ctx) require.NoError(t, err) } func TestReactor_ChunkRequest_InvalidRequest(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, 2) rts.chunkInCh <- p2p.Envelope{ - From: "aa", - Message: &ssproto.SnapshotsRequest{}, + From: types.NodeID("aa"), + ChannelID: ChunkChannel, + Message: &ssproto.SnapshotsRequest{}, } response := <-rts.chunkPeerErrCh @@ -299,23 +314,28 @@ func TestReactor_ChunkRequest(t *testing.T) { }, } - for name, tc := range testcases { - tc := tc + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + for name, tc := range testcases { t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() + // mock ABCI connection to return local snapshots - conn := &proxymocks.AppConnSnapshot{} - conn.On("LoadSnapshotChunkSync", context.Background(), abci.RequestLoadSnapshotChunk{ + conn := &clientmocks.Client{} + conn.On("LoadSnapshotChunk", mock.Anything, &abci.RequestLoadSnapshotChunk{ Height: tc.request.Height, Format: tc.request.Format, Chunk: tc.request.Index, }).Return(&abci.ResponseLoadSnapshotChunk{Chunk: tc.chunk}, nil) - rts := setup(t, conn, nil, nil, 2) + rts := setup(ctx, t, conn, nil, 2) rts.chunkInCh <- p2p.Envelope{ - From: "aa", - Message: tc.request, + From: types.NodeID("aa"), + ChannelID: ChunkChannel, + Message: tc.request, } response := <-rts.chunkOutCh @@ -328,11 +348,15 @@ func TestReactor_ChunkRequest(t *testing.T) { } func TestReactor_SnapshotsRequest_InvalidRequest(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, 2) rts.snapshotInCh <- p2p.Envelope{ - From: "aa", - Message: &ssproto.ChunkRequest{}, + From: types.NodeID("aa"), + ChannelID: SnapshotChannel, + Message: &ssproto.ChunkRequest{}, } response := <-rts.snapshotPeerErrCh @@ -377,26 +401,32 @@ func TestReactor_SnapshotsRequest(t *testing.T) { }, }, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + // mock ABCI connection to return local snapshots - conn := &proxymocks.AppConnSnapshot{} - conn.On("ListSnapshotsSync", context.Background(), abci.RequestListSnapshots{}).Return(&abci.ResponseListSnapshots{ + conn := &clientmocks.Client{} + conn.On("ListSnapshots", mock.Anything, &abci.RequestListSnapshots{}).Return(&abci.ResponseListSnapshots{ Snapshots: tc.snapshots, }, nil) - rts := setup(t, conn, nil, nil, 100) + rts := setup(ctx, t, conn, nil, 100) rts.snapshotInCh <- p2p.Envelope{ - From: "aa", - Message: &ssproto.SnapshotsRequest{}, + From: types.NodeID("aa"), + ChannelID: SnapshotChannel, + Message: &ssproto.SnapshotsRequest{}, } if len(tc.expectResponses) > 0 { - retryUntil(t, func() bool { return len(rts.snapshotOutCh) == len(tc.expectResponses) }, time.Second) + retryUntil(ctx, t, func() bool { return len(rts.snapshotOutCh) == len(tc.expectResponses) }, time.Second) } responses := make([]*ssproto.SnapshotsResponse, len(tc.expectResponses)) @@ -412,10 +442,14 @@ func TestReactor_SnapshotsRequest(t *testing.T) { } func TestReactor_LightBlockResponse(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, 2) var height int64 = 10 - h := factory.MakeRandomHeader() + // generates a random header + h := factory.MakeHeader(t, &types.Header{}) h.Height = height blockID := factory.MakeBlockIDWithHash(h.Hash()) stateID := types.StateID{ @@ -423,7 +457,7 @@ func TestReactor_LightBlockResponse(t *testing.T) { LastAppHash: h.AppHash, } vals, pv := types.RandValidatorSet(1) - vote, err := factory.MakeVote(pv[0], vals, h.ChainID, 0, h.Height, 0, 2, + vote, err := factory.MakeVote(ctx, pv[0], vals, h.ChainID, 0, h.Height, 0, 2, blockID, stateID) require.NoError(t, err) @@ -449,7 +483,8 @@ func TestReactor_LightBlockResponse(t *testing.T) { rts.stateStore.On("LoadValidators", height).Return(vals, nil) rts.blockInCh <- p2p.Envelope{ - From: "aa", + From: types.NodeID("aa"), + ChannelID: LightBlockChannel, Message: &ssproto.LightBlockRequest{ Height: 10, }, @@ -470,7 +505,10 @@ func TestReactor_LightBlockResponse(t *testing.T) { } func TestReactor_BlockProviders(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, nil, 2) rts.peerUpdateCh <- p2p.PeerUpdate{ NodeID: "aa", Status: p2p.PeerStatusUp, @@ -495,8 +533,8 @@ func TestReactor_BlockProviders(t *testing.T) { closeCh := make(chan struct{}) defer close(closeCh) - chain := buildLightBlockChain(t, 1, 10, time.Now(), rts.privVal) - go handleLightBlockRequests(t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) + chain := buildLightBlockChain(ctx, t, 1, 10, time.Now(), rts.privVal) + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) peers := rts.reactor.peers.All() require.Len(t, peers, 2) @@ -513,7 +551,7 @@ func TestReactor_BlockProviders(t *testing.T) { go func(t *testing.T, p provider.Provider) { defer wg.Done() for height := 2; height < 10; height++ { - lb, err := p.LightBlock(context.Background(), int64(height)) + lb, err := p.LightBlock(ctx, int64(height)) require.NoError(t, err) require.NotNil(t, lb) require.Equal(t, height, int(lb.Height)) @@ -521,7 +559,6 @@ func TestReactor_BlockProviders(t *testing.T) { }(t, p) } - ctx, cancel := context.WithCancel(context.Background()) go func() { wg.Wait(); cancel() }() select { @@ -535,9 +572,10 @@ func TestReactor_BlockProviders(t *testing.T) { } func TestReactor_StateProviderP2P(t *testing.T) { - t.Cleanup(leaktest.CheckTimeout(t, 1*time.Minute)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - rts := setup(t, nil, nil, nil, 2) + rts := setup(ctx, t, nil, nil, 2) // make syncer non nil else test won't think we are state syncing rts.reactor.syncer = rts.syncer peerA := types.NodeID(strings.Repeat("a", 2*types.NodeIDByteLength)) @@ -554,9 +592,9 @@ func TestReactor_StateProviderP2P(t *testing.T) { closeCh := make(chan struct{}) defer close(closeCh) - chain := buildLightBlockChain(t, 1, 10, time.Now(), rts.privVal) - go handleLightBlockRequests(t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) - go handleConsensusParamsRequest(t, rts.paramsOutCh, rts.paramsInCh, closeCh) + chain := buildLightBlockChain(ctx, t, 1, 10, time.Now(), rts.privVal) + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) + go handleConsensusParamsRequest(ctx, t, rts.paramsOutCh, rts.paramsInCh, closeCh) rts.reactor.cfg.UseP2P = true rts.reactor.cfg.TrustHeight = 1 @@ -569,10 +607,7 @@ func TestReactor_StateProviderP2P(t *testing.T) { } require.True(t, rts.reactor.peers.Len() >= 2, "peer network not configured") - bctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - ictx, cancel := context.WithTimeout(bctx, time.Second) + ictx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() rts.reactor.mtx.Lock() @@ -605,13 +640,19 @@ func TestReactor_StateProviderP2P(t *testing.T) { } func TestReactor_Backfill(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // test backfill algorithm with varying failure rates [0, 10] failureRates := []int{0, 2, 9} for _, failureRate := range failureRates { failureRate := failureRate t.Run(fmt.Sprintf("failure rate: %d", failureRate), func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + t.Cleanup(leaktest.CheckTimeout(t, 1*time.Minute)) - rts := setup(t, nil, nil, nil, 21) + rts := setup(ctx, t, nil, nil, 21) var ( startHeight int64 = 20 @@ -643,15 +684,15 @@ func TestReactor_Backfill(t *testing.T) { return nil }) - chain := buildLightBlockChain(t, stopHeight-1, startHeight+1, stopTime, rts.privVal) + chain := buildLightBlockChain(ctx, t, stopHeight-1, startHeight+1, stopTime, rts.privVal) closeCh := make(chan struct{}) defer close(closeCh) - go handleLightBlockRequests(t, chain, rts.blockOutCh, + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, failureRate) err := rts.reactor.backfill( - context.Background(), + ctx, factory.DefaultTestChainID, startHeight, stopHeight, @@ -688,8 +729,8 @@ func TestReactor_Backfill(t *testing.T) { // retryUntil will continue to evaluate fn and will return successfully when true // or fail when the timeout is reached. -func retryUntil(t *testing.T, fn func() bool, timeout time.Duration) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) +func retryUntil(ctx context.Context, t *testing.T, fn func() bool, timeout time.Duration) { + ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() for { @@ -700,7 +741,9 @@ func retryUntil(t *testing.T, fn func() bool, timeout time.Duration) { } } -func handleLightBlockRequests(t *testing.T, +func handleLightBlockRequests( + ctx context.Context, + t *testing.T, chain map[int64]*types.LightBlock, receiving chan p2p.Envelope, sending chan p2p.Envelope, @@ -710,36 +753,53 @@ func handleLightBlockRequests(t *testing.T, errorCount := 0 for { select { + case <-ctx.Done(): + return case envelope := <-receiving: if msg, ok := envelope.Message.(*ssproto.LightBlockRequest); ok { if requests%10 >= failureRate { lb, err := chain[int64(msg.Height)].ToProto() require.NoError(t, err) - sending <- p2p.Envelope{ - From: envelope.To, + select { + case sending <- p2p.Envelope{ + From: envelope.To, + ChannelID: LightBlockChannel, Message: &ssproto.LightBlockResponse{ LightBlock: lb, }, + }: + case <-ctx.Done(): + return } } else { switch errorCount % 3 { case 0: // send a different block vals, pv := types.RandValidatorSet(3) - _, _, lb := mockLB(t, int64(msg.Height), factory.DefaultTestTime, factory.MakeBlockID(), vals, pv) + _, _, lb := mockLB(ctx, t, int64(msg.Height), factory.DefaultTestTime, factory.MakeBlockID(), vals, pv) differntLB, err := lb.ToProto() require.NoError(t, err) - sending <- p2p.Envelope{ - From: envelope.To, + select { + case sending <- p2p.Envelope{ + From: envelope.To, + ChannelID: LightBlockChannel, Message: &ssproto.LightBlockResponse{ LightBlock: differntLB, }, + }: + case <-ctx.Done(): + return } case 1: // send nil block i.e. pretend we don't have it - sending <- p2p.Envelope{ - From: envelope.To, + select { + case sending <- p2p.Envelope{ + From: envelope.To, + ChannelID: LightBlockChannel, Message: &ssproto.LightBlockResponse{ LightBlock: nil, }, + }: + case <-ctx.Done(): + return } case 2: // don't do anything } @@ -753,22 +813,38 @@ func handleLightBlockRequests(t *testing.T, } } -func handleConsensusParamsRequest(t *testing.T, receiving, sending chan p2p.Envelope, closeCh chan struct{}) { +func handleConsensusParamsRequest( + ctx context.Context, + t *testing.T, + receiving, sending chan p2p.Envelope, + closeCh chan struct{}, +) { t.Helper() params := types.DefaultConsensusParams() paramsProto := params.ToProto() for { select { + case <-ctx.Done(): + return case envelope := <-receiving: - t.Log("received consensus params request") msg, ok := envelope.Message.(*ssproto.ParamsRequest) - require.True(t, ok) - sending <- p2p.Envelope{ - From: envelope.To, + if !ok { + t.Errorf("message was %T which is not a params request", envelope.Message) + return + } + select { + case sending <- p2p.Envelope{ + From: envelope.To, + ChannelID: ParamsChannel, Message: &ssproto.ParamsResponse{ Height: msg.Height, ConsensusParams: paramsProto, }, + }: + case <-ctx.Done(): + return + case <-closeCh: + return } case <-closeCh: @@ -777,7 +853,8 @@ func handleConsensusParamsRequest(t *testing.T, receiving, sending chan p2p.Enve } } -func buildLightBlockChain(t *testing.T, fromHeight, toHeight int64, startTime time.Time, privVal *types.MockPV) map[int64]*types.LightBlock { +func buildLightBlockChain(ctx context.Context, t *testing.T, fromHeight, toHeight int64, startTime time.Time, privVal *types.MockPV) map[int64]*types.LightBlock { + t.Helper() chain := make(map[int64]*types.LightBlock, toHeight-fromHeight) lastBlockID := factory.MakeBlockID() blockTime := startTime.Add(time.Duration(fromHeight-toHeight) * time.Minute) @@ -785,22 +862,24 @@ func buildLightBlockChain(t *testing.T, fromHeight, toHeight int64, startTime ti for height := fromHeight; height < toHeight; height++ { pk, _ := pv[0].GetPrivateKey(context.Background(), vals.QuorumHash) privVal.UpdatePrivateKey(context.Background(), pk, vals.QuorumHash, vals.ThresholdPublicKey, height) - vals, pv, chain[height] = mockLB(t, height, blockTime, lastBlockID, vals, pv) + vals, pv, chain[height] = mockLB(ctx, t, height, blockTime, lastBlockID, vals, pv) lastBlockID = factory.MakeBlockIDWithHash(chain[height].Header.Hash()) blockTime = blockTime.Add(1 * time.Minute) } return chain } -func mockLB(t *testing.T, height int64, time time.Time, lastBlockID types.BlockID, +func mockLB(ctx context.Context, t *testing.T, height int64, time time.Time, lastBlockID types.BlockID, currentVals *types.ValidatorSet, currentPrivVals []types.PrivValidator, ) (*types.ValidatorSet, []types.PrivValidator, *types.LightBlock) { - header, err := factory.MakeHeader(&types.Header{ + t.Helper() + header := factory.MakeHeader(t, &types.Header{ Height: height, LastBlockID: lastBlockID, Time: time, }) - require.NoError(t, err) + header.Version.App = testAppVersion + nextVals, nextPrivVals := types.RandValidatorSet(3) header.ValidatorsHash = currentVals.Hash() header.NextValidatorsHash = nextVals.Hash() @@ -811,7 +890,7 @@ func mockLB(t *testing.T, height int64, time time.Time, lastBlockID types.BlockI LastAppHash: header.AppHash, } voteSet := types.NewVoteSet(factory.DefaultTestChainID, height, 0, tmproto.PrecommitType, currentVals, stateID) - commit, err := factory.MakeCommit(lastBlockID, height, 0, voteSet, currentVals, currentPrivVals, stateID) + commit, err := factory.MakeCommit(ctx, lastBlockID, height, 0, voteSet, currentVals, currentPrivVals, stateID) require.NoError(t, err) return nextVals, nextPrivVals, &types.LightBlock{ SignedHeader: &types.SignedHeader{ @@ -825,6 +904,8 @@ func mockLB(t *testing.T, height int64, time time.Time, lastBlockID types.BlockI // graduallyAddPeers delivers a new randomly-generated peer update on peerUpdateCh once // per interval, until closeCh is closed. Each peer update is assigned a random node ID. func graduallyAddPeers( + ctx context.Context, + t *testing.T, peerUpdateCh chan p2p.PeerUpdate, closeCh chan struct{}, interval time.Duration, @@ -832,9 +913,13 @@ func graduallyAddPeers( ticker := time.NewTicker(interval) for { select { + case <-ctx.Done(): + return + case <-closeCh: + return case <-ticker.C: peerUpdateCh <- p2p.PeerUpdate{ - NodeID: factory.RandomNodeID(), + NodeID: factory.RandomNodeID(t), Status: p2p.PeerStatusUp, Channels: p2p.ChannelIDSet{ SnapshotChannel: struct{}{}, @@ -843,13 +928,12 @@ func graduallyAddPeers( ParamsChannel: struct{}{}, }, } - case <-closeCh: - return } } } func handleSnapshotRequests( + ctx context.Context, t *testing.T, receivingCh chan p2p.Envelope, sendingCh chan p2p.Envelope, @@ -859,12 +943,17 @@ func handleSnapshotRequests( t.Helper() for { select { + case <-ctx.Done(): + return + case <-closeCh: + return case envelope := <-receivingCh: _, ok := envelope.Message.(*ssproto.SnapshotsRequest) require.True(t, ok) for _, snapshot := range snapshots { sendingCh <- p2p.Envelope{ - From: envelope.To, + From: envelope.To, + ChannelID: SnapshotChannel, Message: &ssproto.SnapshotsResponse{ Height: snapshot.Height, Format: snapshot.Format, @@ -874,13 +963,12 @@ func handleSnapshotRequests( }, } } - case <-closeCh: - return } } } func handleChunkRequests( + ctx context.Context, t *testing.T, receivingCh chan p2p.Envelope, sendingCh chan p2p.Envelope, @@ -890,11 +978,16 @@ func handleChunkRequests( t.Helper() for { select { + case <-ctx.Done(): + return + case <-closeCh: + return case envelope := <-receivingCh: msg, ok := envelope.Message.(*ssproto.ChunkRequest) require.True(t, ok) sendingCh <- p2p.Envelope{ - From: envelope.To, + From: envelope.To, + ChannelID: ChunkChannel, Message: &ssproto.ChunkResponse{ Height: msg.Height, Format: msg.Format, @@ -904,8 +997,6 @@ func handleChunkRequests( }, } - case <-closeCh: - return } } } diff --git a/internal/statesync/snapshots.go b/internal/statesync/snapshots.go index 4c0f4966b1..bc309bce46 100644 --- a/internal/statesync/snapshots.go +++ b/internal/statesync/snapshots.go @@ -6,8 +6,8 @@ import ( "math/rand" "sort" "strings" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/types" ) @@ -46,7 +46,7 @@ func (s *snapshot) Key() snapshotKey { // snapshotPool discovers and aggregates snapshots across peers. type snapshotPool struct { - tmsync.Mutex + sync.Mutex snapshots map[snapshotKey]*snapshot snapshotPeers map[snapshotKey]map[types.NodeID]types.NodeID diff --git a/internal/statesync/stateprovider.go b/internal/statesync/stateprovider.go index b190e98e15..6ca671ee7c 100644 --- a/internal/statesync/stateprovider.go +++ b/internal/statesync/stateprovider.go @@ -11,8 +11,7 @@ import ( dbm "github.com/tendermint/tm-db" - dashcore "github.com/tendermint/tendermint/dashcore/rpc" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + dashcore "github.com/tendermint/tendermint/dash/core" "github.com/tendermint/tendermint/internal/p2p" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" @@ -42,10 +41,11 @@ type StateProvider interface { } type stateProviderRPC struct { - tmsync.Mutex // light.Client is not concurrency-safe + sync.Mutex // light.Client is not concurrency-safe lc *light.Client initialHeight int64 providers map[lightprovider.Provider]string + logger log.Logger } // NewRPCStateProvider creates a new StateProvider using a light client and RPC clients. @@ -82,6 +82,7 @@ func NewRPCStateProvider( return nil, err } return &stateProviderRPC{ + logger: logger, lc: lc, initialHeight: initialHeight, providers: providerRemotes, @@ -89,7 +90,7 @@ func NewRPCStateProvider( } func (s *stateProviderRPC) verifyLightBlockAtHeight(ctx context.Context, height uint64, ts time.Time) (*types.LightBlock, error) { - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + ctx, cancel := context.WithTimeout(ctx, 20*time.Second) defer cancel() return s.lc.VerifyLightBlockAtHeight(ctx, int64(height), ts) } @@ -187,7 +188,7 @@ func (s *stateProviderRPC) State(ctx context.Context, height uint64) (sm.State, if err != nil { return sm.State{}, fmt.Errorf("unable to create RPC client: %w", err) } - rpcclient := lightrpc.NewClient(primaryRPC, s.lc) + rpcclient := lightrpc.NewClient(s.logger, primaryRPC, s.lc) result, err := rpcclient.ConsensusParams(ctx, ¤tLightBlock.Height) if err != nil { return sm.State{}, fmt.Errorf("unable to fetch consensus parameters for height %v: %w", @@ -208,10 +209,10 @@ func rpcClient(server string) (*rpchttp.HTTP, error) { } type stateProviderP2P struct { - tmsync.Mutex // light.Client is not concurrency-safe + sync.Mutex // light.Client is not concurrency-safe lc *light.Client initialHeight int64 - paramsSendCh chan<- p2p.Envelope + paramsSendCh *p2p.Channel paramsRecvCh chan types.ConsensusParams } @@ -223,7 +224,7 @@ func NewP2PStateProvider( initialHeight int64, trustHeight int64, providers []lightprovider.Provider, - paramsSendCh chan<- p2p.Envelope, + paramsSendCh *p2p.Channel, logger log.Logger, dashCoreClient dashcore.Client, ) (StateProvider, error) { @@ -246,7 +247,7 @@ func NewP2PStateProvider( } func (s *stateProviderP2P) verifyLightBlockAtHeight(ctx context.Context, height uint64, ts time.Time) (*types.LightBlock, error) { - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + ctx, cancel := context.WithTimeout(ctx, 20*time.Second) defer cancel() return s.lc.VerifyLightBlockAtHeight(ctx, int64(height), ts) } @@ -385,7 +386,7 @@ func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (t } wg.Add(1) - go func(p *BlockProvider, peer types.NodeID, requestCh chan<- p2p.Envelope, responseCh <-chan types.ConsensusParams) { + go func(peer types.NodeID) { defer wg.Done() timer := time.NewTimer(0) @@ -394,14 +395,17 @@ func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (t for { iterCount++ - select { - case s.paramsSendCh <- p2p.Envelope{ + if err := s.paramsSendCh.Send(ctx, p2p.Envelope{ To: peer, Message: &ssproto.ParamsRequest{ Height: uint64(height), }, - }: - case <-ctx.Done(): + }); err != nil { + // this only errors if + // the context is + // canceled which we + // don't need to + // propagate here return } @@ -414,7 +418,7 @@ func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (t continue case <-ctx.Done(): return - case params, ok := <-responseCh: + case params, ok := <-s.paramsRecvCh: if !ok { return } @@ -427,7 +431,7 @@ func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (t } } - }(p, peer, s.paramsSendCh, s.paramsRecvCh) + }(peer) } sig := make(chan struct{}) go func() { wg.Wait(); close(sig) }() @@ -462,4 +466,5 @@ func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (t return param, nil } } + } diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index 4dd0d2778b..6745abf6be 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -5,11 +5,11 @@ import ( "context" "errors" "fmt" + "sync" "time" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/config" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" @@ -54,52 +54,21 @@ var ( type syncer struct { logger log.Logger stateProvider StateProvider - conn proxy.AppConnSnapshot - connQuery proxy.AppConnQuery + conn abciclient.Client snapshots *snapshotPool - snapshotCh chan<- p2p.Envelope - chunkCh chan<- p2p.Envelope + snapshotCh *p2p.Channel + chunkCh *p2p.Channel tempDir string fetchers int32 retryTimeout time.Duration - mtx tmsync.RWMutex + mtx sync.RWMutex chunks *chunkQueue metrics *Metrics avgChunkTime int64 lastSyncedSnapshotHeight int64 processingSnapshot *snapshot - closeCh <-chan struct{} -} - -// newSyncer creates a new syncer. -func newSyncer( - cfg config.StateSyncConfig, - logger log.Logger, - conn proxy.AppConnSnapshot, - connQuery proxy.AppConnQuery, - stateProvider StateProvider, - snapshotCh chan<- p2p.Envelope, - chunkCh chan<- p2p.Envelope, - closeCh <-chan struct{}, - tempDir string, - metrics *Metrics, -) *syncer { - return &syncer{ - logger: logger, - stateProvider: stateProvider, - conn: conn, - connQuery: connQuery, - snapshots: newSnapshotPool(), - snapshotCh: snapshotCh, - chunkCh: chunkCh, - tempDir: tempDir, - fetchers: cfg.Fetchers, - retryTimeout: cfg.ChunkRequestTimeout, - metrics: metrics, - closeCh: closeCh, - } } // AddChunk adds a chunk to the chunk queue, if any. It returns false if the chunk has already @@ -141,29 +110,13 @@ func (s *syncer) AddSnapshot(peerID types.NodeID, snapshot *snapshot) (bool, err // AddPeer adds a peer to the pool. For now we just keep it simple and send a // single request to discover snapshots, later we may want to do retries and stuff. -func (s *syncer) AddPeer(peerID types.NodeID) (err error) { - defer func() { - // TODO: remove panic recover once AddPeer can no longer accientally send on - // closed channel. - // This recover was added to protect against the p2p message being sent - // to the snapshot channel after the snapshot channel was closed. - if r := recover(); r != nil { - err = fmt.Errorf("panic sending peer snapshot request: %v", r) - } - }() - +func (s *syncer) AddPeer(ctx context.Context, peerID types.NodeID) error { s.logger.Debug("Requesting snapshots from peer", "peer", peerID) - msg := p2p.Envelope{ + return s.snapshotCh.Send(ctx, p2p.Envelope{ To: peerID, Message: &ssproto.SnapshotsRequest{}, - } - - select { - case <-s.closeCh: - case s.snapshotCh <- msg: - } - return err + }) } // RemovePeer removes a peer from the pool. @@ -178,14 +131,16 @@ func (s *syncer) RemovePeer(peerID types.NodeID) { func (s *syncer) SyncAny( ctx context.Context, discoveryTime time.Duration, - requestSnapshots func(), + requestSnapshots func() error, ) (sm.State, *types.Commit, error) { if discoveryTime != 0 && discoveryTime < minimumDiscoveryTime { discoveryTime = minimumDiscoveryTime } if discoveryTime > 0 { - requestSnapshots() + if err := requestSnapshots(); err != nil { + return sm.State{}, nil, err + } s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime)) time.Sleep(discoveryTime) } @@ -365,7 +320,7 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu return sm.State{}, nil, err } - // Verify app hash and version + // Verify app and app version if err := s.verifyApp(ctx, snapshot, state.Version.Consensus.App); err != nil { return sm.State{}, nil, err } @@ -382,7 +337,7 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu func (s *syncer) offerSnapshot(ctx context.Context, snapshot *snapshot) error { s.logger.Info("Offering snapshot to ABCI app", "height", snapshot.Height, "format", snapshot.Format, "hash", snapshot.Hash) - resp, err := s.conn.OfferSnapshotSync(ctx, abci.RequestOfferSnapshot{ + resp, err := s.conn.OfferSnapshot(ctx, &abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: snapshot.Height, Format: snapshot.Format, @@ -424,7 +379,7 @@ func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue, start time return fmt.Errorf("failed to fetch chunk: %w", err) } - resp, err := s.conn.ApplySnapshotChunkSync(ctx, abci.RequestApplySnapshotChunk{ + resp, err := s.conn.ApplySnapshotChunk(ctx, &abci.RequestApplySnapshotChunk{ Index: chunk.Index, Chunk: chunk.Chunk, Sender: string(chunk.Sender), @@ -492,8 +447,6 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch select { case <-ctx.Done(): return - case <-s.closeCh: - return case <-time.After(2 * time.Second): continue } @@ -509,7 +462,9 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch ticker := time.NewTicker(s.retryTimeout) defer ticker.Stop() - s.requestChunk(snapshot, index) + if err := s.requestChunk(ctx, snapshot, index); err != nil { + return + } select { case <-chunks.WaitFor(index): @@ -520,8 +475,6 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch case <-ctx.Done(): return - case <-s.closeCh: - return } ticker.Stop() @@ -529,12 +482,16 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch } // requestChunk requests a chunk from a peer. -func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { +// +// returns nil if there are no peers for the given snapshot or the +// request is successfully made and an error if the request cannot be +// completed +func (s *syncer) requestChunk(ctx context.Context, snapshot *snapshot, chunk uint32) error { peer := s.snapshots.GetPeer(snapshot) if peer == "" { s.logger.Error("No valid peers found for snapshot", "height", snapshot.Height, "format", snapshot.Format, "hash", snapshot.Hash) - return + return nil } s.logger.Debug( @@ -554,15 +511,15 @@ func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { }, } - select { - case s.chunkCh <- msg: - case <-s.closeCh: + if err := s.chunkCh.Send(ctx, msg); err != nil { + return err } + return nil } // verifyApp verifies the sync, checking the app hash, last block height and app version func (s *syncer) verifyApp(ctx context.Context, snapshot *snapshot, appVersion uint64) error { - resp, err := s.connQuery.InfoSync(ctx, proxy.RequestInfo) + resp, err := s.conn.Info(ctx, &proxy.RequestInfo) if err != nil { return fmt.Errorf("failed to query ABCI app for appHash: %w", err) } diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index e9864ecd85..85abe23440 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -11,11 +11,10 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + clientmocks "github.com/tendermint/tendermint/abci/client/mocks" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/proxy" - proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/statesync/mocks" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" @@ -23,11 +22,10 @@ import ( "github.com/tendermint/tendermint/version" ) -var ctx = context.Background() - -const testAppVersion = 9 - func TestSyncer_SyncAny(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + state := sm.State{ ChainID: "chain", Version: sm.Version{ @@ -45,13 +43,13 @@ func TestSyncer_SyncAny(t *testing.T) { AppHash: []byte("app_hash"), LastValidators: &types.ValidatorSet{ - Proposer: &types.Validator{ProTxHash: crypto.Sha256([]byte("val1"))}, + Proposer: &types.Validator{ProTxHash: crypto.Checksum([]byte("val1"))}, }, Validators: &types.ValidatorSet{ - Proposer: &types.Validator{ProTxHash: crypto.Sha256([]byte("val2"))}, + Proposer: &types.Validator{ProTxHash: crypto.Checksum([]byte("val2"))}, }, NextValidators: &types.ValidatorSet{ - Proposer: &types.Validator{ProTxHash: crypto.Sha256([]byte("val3"))}, + Proposer: &types.Validator{ProTxHash: crypto.Checksum([]byte("val3"))}, }, ConsensusParams: *types.DefaultConsensusParams(), @@ -71,13 +69,12 @@ func TestSyncer_SyncAny(t *testing.T) { stateProvider.On("AppHash", mock.Anything, uint64(2)).Return([]byte("app_hash_2"), nil) stateProvider.On("Commit", mock.Anything, uint64(1)).Return(commit, nil) stateProvider.On("State", mock.Anything, uint64(1)).Return(state, nil) - connSnapshot := &proxymocks.AppConnSnapshot{} - connQuery := &proxymocks.AppConnQuery{} + conn := &clientmocks.Client{} peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") peerCID := types.NodeID("cc") - rts := setup(t, connSnapshot, connQuery, stateProvider, 3) + rts := setup(ctx, t, conn, stateProvider, 4) rts.reactor.syncer = rts.syncer @@ -86,13 +83,13 @@ func TestSyncer_SyncAny(t *testing.T) { require.Error(t, err) // Adding a couple of peers should trigger snapshot discovery messages - err = rts.syncer.AddPeer(peerAID) + err = rts.syncer.AddPeer(ctx, peerAID) require.NoError(t, err) e := <-rts.snapshotOutCh require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message) require.Equal(t, peerAID, e.To) - err = rts.syncer.AddPeer(peerBID) + err = rts.syncer.AddPeer(ctx, peerBID) require.NoError(t, err) e = <-rts.snapshotOutCh require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message) @@ -119,7 +116,7 @@ func TestSyncer_SyncAny(t *testing.T) { // We start a sync, with peers sending back chunks when requested. We first reject the snapshot // with height 2 format 2, and accept the snapshot at height 1. - connSnapshot.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: 2, Format: 2, @@ -128,7 +125,7 @@ func TestSyncer_SyncAny(t *testing.T) { }, AppHash: []byte("app_hash_2"), }).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) - connSnapshot.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ Snapshot: &abci.Snapshot{ Height: s.Height, Format: s.Format, @@ -140,62 +137,73 @@ func TestSyncer_SyncAny(t *testing.T) { }).Times(2).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil) chunkRequests := make(map[uint32]int) - chunkRequestsMtx := tmsync.Mutex{} + chunkRequestsMtx := sync.Mutex{} - var wg sync.WaitGroup - wg.Add(4) + chunkProcessDone := make(chan struct{}) go func() { - for e := range rts.chunkOutCh { - msg, ok := e.Message.(*ssproto.ChunkRequest) - assert.True(t, ok) - - assert.EqualValues(t, 1, msg.Height) - assert.EqualValues(t, 1, msg.Format) - assert.LessOrEqual(t, msg.Index, uint32(len(chunks))) - - added, err := rts.syncer.AddChunk(chunks[msg.Index]) - assert.NoError(t, err) - assert.True(t, added) - - chunkRequestsMtx.Lock() - chunkRequests[msg.Index]++ - chunkRequestsMtx.Unlock() + defer close(chunkProcessDone) + var seen int + for { + if seen >= 4 { + return + } - wg.Done() + select { + case <-ctx.Done(): + t.Logf("sent %d chunks", seen) + return + case e := <-rts.chunkOutCh: + msg, ok := e.Message.(*ssproto.ChunkRequest) + assert.True(t, ok) + + assert.EqualValues(t, 1, msg.Height) + assert.EqualValues(t, 1, msg.Format) + assert.LessOrEqual(t, msg.Index, uint32(len(chunks))) + + added, err := rts.syncer.AddChunk(chunks[msg.Index]) + assert.NoError(t, err) + assert.True(t, added) + + chunkRequestsMtx.Lock() + chunkRequests[msg.Index]++ + chunkRequestsMtx.Unlock() + seen++ + t.Logf("added chunk (%d of 4): %d", seen, msg.Index) + } } }() // The first time we're applying chunk 2 we tell it to retry the snapshot and discard chunk 1, // which should cause it to keep the existing chunk 0 and 2, and restart restoration from // beginning. We also wait for a little while, to exercise the retry logic in fetchChunks(). - connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{1, 1, 2}, - }).Once().Run(func(args mock.Arguments) { time.Sleep(2 * time.Second) }).Return( + }).Once().Run(func(args mock.Arguments) { time.Sleep(1 * time.Second) }).Return( &abci.ResponseApplySnapshotChunk{ Result: abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT, RefetchChunks: []uint32{1}, }, nil) - connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ Index: 0, Chunk: []byte{1, 1, 0}, }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ Index: 1, Chunk: []byte{1, 1, 1}, }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{1, 1, 2}, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(&abci.ResponseInfo{ + conn.On("Info", mock.Anything, &proxy.RequestInfo).Return(&abci.ResponseInfo{ AppVersion: testAppVersion, LastBlockHeight: 1, LastBlockAppHash: []byte("app_hash"), }, nil) - newState, lastCommit, err := rts.syncer.SyncAny(ctx, 0, func() {}) + newState, lastCommit, err := rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.NoError(t, err) - wg.Wait() + <-chunkProcessDone chunkRequestsMtx.Lock() require.Equal(t, map[uint32]int{0: 1, 1: 2, 2: 1}, chunkRequests) @@ -215,17 +223,19 @@ func TestSyncer_SyncAny(t *testing.T) { require.Equal(t, int64(len(rts.syncer.snapshots.snapshots)), rts.reactor.TotalSnapshots()) require.Equal(t, int64(0), rts.reactor.SnapshotChunksCount()) - connSnapshot.AssertExpectations(t) - connQuery.AssertExpectations(t) + conn.AssertExpectations(t) } func TestSyncer_SyncAny_noSnapshots(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, stateProvider, 2) - _, _, err := rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err := rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.Equal(t, errNoSnapshots, err) } @@ -233,7 +243,10 @@ func TestSyncer_SyncAny_abort(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, stateProvider, 2) s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} peerID := types.NodeID("aa") @@ -241,11 +254,11 @@ func TestSyncer_SyncAny_abort(t *testing.T) { _, err := rts.syncer.AddSnapshot(peerID, s) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.Equal(t, errAbort, err) rts.conn.AssertExpectations(t) } @@ -254,7 +267,10 @@ func TestSyncer_SyncAny_reject(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, stateProvider, 2) // s22 is tried first, then s12, then s11, then errNoSnapshots s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -272,19 +288,19 @@ func TestSyncer_SyncAny_reject(t *testing.T) { _, err = rts.syncer.AddSnapshot(peerID, s11) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ Snapshot: toABCI(s22), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ Snapshot: toABCI(s12), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ Snapshot: toABCI(s11), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.Equal(t, errNoSnapshots, err) rts.conn.AssertExpectations(t) } @@ -293,7 +309,10 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, stateProvider, 2) // s22 is tried first, which reject s22 and s12, then s11 will abort. s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -311,15 +330,15 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) { _, err = rts.syncer.AddSnapshot(peerID, s11) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ Snapshot: toABCI(s22), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ Snapshot: toABCI(s11), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.Equal(t, errAbort, err) rts.conn.AssertExpectations(t) } @@ -328,7 +347,10 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, stateProvider, 2) peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") @@ -357,15 +379,15 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) { _, err = rts.syncer.AddSnapshot(peerCID, sbc) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ Snapshot: toABCI(sbc), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_SENDER}, nil) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ Snapshot: toABCI(sa), AppHash: []byte("app_hash"), }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.Equal(t, errNoSnapshots, err) rts.conn.AssertExpectations(t) } @@ -374,7 +396,10 @@ func TestSyncer_SyncAny_abciError(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + rts := setup(ctx, t, nil, stateProvider, 2) errBoom := errors.New("boom") s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} @@ -384,11 +409,11 @@ func TestSyncer_SyncAny_abciError(t *testing.T) { _, err := rts.syncer.AddSnapshot(peerID, s) require.NoError(t, err) - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Once().Return(nil, errBoom) - _, _, err = rts.syncer.SyncAny(ctx, 0, func() {}) + _, _, err = rts.syncer.SyncAny(ctx, 0, func() error { return nil }) require.True(t, errors.Is(err, errBoom)) rts.conn.AssertExpectations(t) } @@ -411,16 +436,23 @@ func TestSyncer_offerSnapshot(t *testing.T) { "error": {0, boom, boom}, "unknown non-zero": {9, nil, unknownErr}, } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, stateProvider, 2) s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} - rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{ + rts.conn.On("OfferSnapshot", mock.Anything, &abci.RequestOfferSnapshot{ Snapshot: toABCI(s), AppHash: []byte("app_hash"), }).Return(&abci.ResponseOfferSnapshot{Result: tc.result}, tc.err) @@ -457,16 +489,23 @@ func TestSyncer_applyChunks_Results(t *testing.T) { "error": {0, boom, boom}, "unknown non-zero": {9, nil, unknownErr}, } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, stateProvider, 2) body := []byte{1, 2, 3} - chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, "") + chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, t.TempDir()) require.NoError(t, err) fetchStartTime := time.Now() @@ -474,11 +513,11 @@ func TestSyncer_applyChunks_Results(t *testing.T) { _, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: body}) require.NoError(t, err) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ Index: 0, Chunk: body, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: tc.result}, tc.err) if tc.result == abci.ResponseApplySnapshotChunk_RETRY { - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ Index: 0, Chunk: body, }).Once().Return(&abci.ResponseApplySnapshotChunk{ Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) @@ -511,15 +550,21 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT}, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, stateProvider, 2) - chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 3}, "") + chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 3}, t.TempDir()) require.NoError(t, err) fetchStartTime := time.Now() @@ -535,13 +580,13 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { require.NoError(t, err) // The first two chunks are accepted, before the last one asks for 1 to be refetched - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ Index: 0, Chunk: []byte{0}, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ Index: 1, Chunk: []byte{1}, }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{2}, }).Once().Return(&abci.ResponseApplySnapshotChunk{ Result: tc.result, @@ -576,13 +621,19 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT}, "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT}, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - rts := setup(t, nil, nil, stateProvider, 2) + rts := setup(ctx, t, nil, stateProvider, 2) // Set up three peers across two snapshots, and ask for one of them to be banned. // It should be banned from all snapshots. @@ -611,7 +662,7 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { _, err = rts.syncer.AddSnapshot(peerCID, s2) require.NoError(t, err) - chunks, err := newChunkQueue(s1, "") + chunks, err := newChunkQueue(s1, t.TempDir()) require.NoError(t, err) fetchStartTime := time.Now() @@ -629,13 +680,13 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { require.NoError(t, err) // The first two chunks are accepted, before the last one asks for b sender to be rejected - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ Index: 0, Chunk: []byte{0}, Sender: "aa", }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ Index: 1, Chunk: []byte{1}, Sender: "bb", }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{2}, Sender: "cc", }).Once().Return(&abci.ResponseApplySnapshotChunk{ Result: tc.result, @@ -644,7 +695,7 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { // On retry, the last chunk will be tried again, so we just accept it then. if tc.result == abci.ResponseApplySnapshotChunk_RETRY { - rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{ + rts.conn.On("ApplySnapshotChunk", mock.Anything, &abci.RequestApplySnapshotChunk{ Index: 2, Chunk: []byte{2}, Sender: "cc", }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) } @@ -681,42 +732,49 @@ func TestSyncer_verifyApp(t *testing.T) { testcases := map[string]struct { response *abci.ResponseInfo + err error expectErr error }{ "verified": {&abci.ResponseInfo{ LastBlockHeight: 3, LastBlockAppHash: []byte("app_hash"), AppVersion: appVersion, - }, nil}, + }, nil, nil}, "invalid app version": {&abci.ResponseInfo{ LastBlockHeight: 3, LastBlockAppHash: []byte("app_hash"), AppVersion: 2, - }, appVersionMismatchErr}, + }, nil, appVersionMismatchErr}, "invalid height": {&abci.ResponseInfo{ LastBlockHeight: 5, LastBlockAppHash: []byte("app_hash"), AppVersion: appVersion, - }, errVerifyFailed}, + }, nil, errVerifyFailed}, "invalid hash": {&abci.ResponseInfo{ LastBlockHeight: 3, LastBlockAppHash: []byte("xxx"), AppVersion: appVersion, - }, errVerifyFailed}, - "error": {nil, boom}, + }, nil, errVerifyFailed}, + "error": {nil, boom, boom}, } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for name, tc := range testcases { tc := tc t.Run(name, func(t *testing.T) { - rts := setup(t, nil, nil, nil, 2) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + rts := setup(ctx, t, nil, nil, 2) - rts.connQuery.On("InfoSync", mock.Anything, proxy.RequestInfo).Return(tc.response, tc.expectErr) + rts.conn.On("Info", mock.Anything, &proxy.RequestInfo).Return(tc.response, tc.err) err := rts.syncer.verifyApp(ctx, s, appVersion) - if tc.expectErr != nil { - require.ErrorIs(t, err, tc.expectErr) - } else { - require.NoError(t, err) + unwrapped := errors.Unwrap(err) + if unwrapped != nil { + err = unwrapped } + require.Equal(t, tc.expectErr, err) }) } } diff --git a/internal/store/store.go b/internal/store/store.go index 4c7a92e29a..39257c5194 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -157,7 +157,7 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block { if err != nil { // NOTE: The existence of meta should imply the existence of the // block. So, make sure meta is only saved after blocks are saved. - panic(fmt.Sprintf("Error reading block: %v", err)) + panic(fmt.Errorf("error reading block: %w", err)) } block, err := types.BlockFromProto(pbb) @@ -229,7 +229,7 @@ func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { } part, err := types.PartFromProto(pbpart) if err != nil { - panic(fmt.Sprintf("Error reading block part: %v", err)) + panic(fmt.Errorf("error reading block part: %w", err)) } return part @@ -281,7 +281,7 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { } commit, err := types.CommitFromProto(pbc) if err != nil { - panic(fmt.Sprintf("Error reading block commit: %v", err)) + panic(fmt.Errorf("error reading block commit: %w", err)) } return commit } @@ -301,7 +301,7 @@ func (bs *BlockStore) LoadSeenCommit() *types.Commit { } err = proto.Unmarshal(bz, pbc) if err != nil { - panic(fmt.Sprintf("error reading block seen commit: %v", err)) + panic(fmt.Errorf("error reading block seen commit: %w", err)) } commit, err := types.CommitFromProto(pbc) diff --git a/internal/store/store_test.go b/internal/store/store_test.go index 8960bdabac..4c51fc9a9d 100644 --- a/internal/store/store_test.go +++ b/internal/store/store_test.go @@ -9,30 +9,24 @@ import ( "testing" "time" - "github.com/tendermint/tendermint/crypto/bls12381" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/bls12381" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/test/factory" - "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) -// A cleanupFunc cleans up any config / test files created for a particular -// test. -type cleanupFunc func() - // make a Commit with a single vote containing just the height and a timestamp -func makeTestCommit(height int64, timestamp time.Time) *types.Commit { +func makeTestCommit(state sm.State, height int64, timestamp time.Time) *types.Commit { blockID := types.BlockID{Hash: []byte(""), PartSetHeader: types.PartSetHeader{Hash: []byte(""), Total: 2}} stateID := types.RandStateID().WithHeight(height - 1) goodVote := &types.Vote{ @@ -61,54 +55,24 @@ func makeTestCommit(height int64, timestamp time.Time) *types.Commit { goodVote.BlockSignature, goodVote.StateSignature) } -func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) { - cfg, err := config.ResetTestRoot("blockchain_reactor_test") - if err != nil { - panic(err) - } +func makeStateAndBlockStore(t *testing.T, dir string) (sm.State, *BlockStore) { + cfg, err := config.ResetTestRoot(dir, "blockchain_reactor_test") + require.NoError(t, err) blockDB := dbm.NewMemDB() state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) - if err != nil { - panic(fmt.Errorf("error constructing state from genesis file: %w", err)) - } - return state, NewBlockStore(blockDB), func() { os.RemoveAll(cfg.RootDir) } + require.NoError(t, err) + t.Cleanup(func() { _ = os.RemoveAll(cfg.RootDir) }) + return state, NewBlockStore(blockDB) } -func freshBlockStore() (*BlockStore, dbm.DB) { +func newInMemoryBlockStore() (*BlockStore, dbm.DB) { db := dbm.NewMemDB() return NewBlockStore(db), db } -var ( - state sm.State - block *types.Block - partSet *types.PartSet - part1 *types.Part - part2 *types.Part - seenCommit1 *types.Commit -) - -func TestMain(m *testing.M) { - var cleanup cleanupFunc - state, _, cleanup = makeStateAndBlockStore(log.NewNopLogger()) - var err error - block, err = factory.MakeBlock(state, 1, new(types.Commit), nil, 0) - if err != nil { - panic(err) - } - partSet = block.MakePartSet(2) - part1 = partSet.GetPart(0) - part2 = partSet.GetPart(1) - seenCommit1 = makeTestCommit(10, tmtime.Now()) - code := m.Run() - cleanup() - os.Exit(code) -} - // TODO: This test should be simplified ... func TestBlockStoreSaveLoadBlock(t *testing.T) { - state, bs, cleanup := makeStateAndBlockStore(log.NewNopLogger()) - defer cleanup() + state, bs := makeStateAndBlockStore(t, t.TempDir()) require.Equal(t, bs.Base(), int64(0), "initially the base should be zero") require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") @@ -123,9 +87,12 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { // save a block block, err := factory.MakeBlock(state, bs.Height()+1, new(types.Commit), nil, 0) require.NoError(t, err) - validPartSet := block.MakePartSet(2) - seenCommit := makeTestCommit(10, tmtime.Now()) - bs.SaveBlock(block, partSet, seenCommit) + validPartSet, err := block.MakePartSet(2) + require.NoError(t, err) + part2 := validPartSet.GetPart(1) + + seenCommit := makeTestCommit(state, 10, tmtime.Now()) + bs.SaveBlock(block, validPartSet, seenCommit) require.EqualValues(t, 1, bs.Base(), "expecting the new height to be changed") require.EqualValues(t, block.Header.Height, bs.Height(), "expecting the new height to be changed") @@ -143,7 +110,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } // End of setup, test data - commitAtH10 := makeTestCommit(10, tmtime.Now()) + commitAtH10 := makeTestCommit(state, 10, tmtime.Now()) tuples := []struct { block *types.Block parts *types.PartSet @@ -160,7 +127,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { { block: newBlock(header1, commitAtH10), parts: validPartSet, - seenCommit: seenCommit1, + seenCommit: seenCommit, }, { @@ -176,10 +143,10 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { ChainID: "block_test", Time: tmtime.Now(), ProposerProTxHash: tmrand.Bytes(crypto.DefaultHashSize)}, - makeTestCommit(5, tmtime.Now()), + makeTestCommit(state, 5, tmtime.Now()), ), parts: validPartSet, - seenCommit: makeTestCommit(5, tmtime.Now()), + seenCommit: makeTestCommit(state, 5, tmtime.Now()), }, { @@ -191,7 +158,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { { block: newBlock(header1, commitAtH10), parts: validPartSet, - seenCommit: seenCommit1, + seenCommit: seenCommit, corruptCommitInDB: true, // Corrupt the DB's commit entry wantPanic: "error reading block commit", }, @@ -199,7 +166,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { { block: newBlock(header1, commitAtH10), parts: validPartSet, - seenCommit: seenCommit1, + seenCommit: seenCommit, wantPanic: "unmarshal to tmproto.BlockMeta", corruptBlockInDB: true, // Corrupt the DB's block entry }, @@ -207,7 +174,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { { block: newBlock(header1, commitAtH10), parts: validPartSet, - seenCommit: seenCommit1, + seenCommit: seenCommit, // Expecting no error and we want a nil back eraseSeenCommitInDB: true, @@ -216,7 +183,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { { block: newBlock(header1, commitAtH10), parts: validPartSet, - seenCommit: seenCommit1, + seenCommit: seenCommit, corruptSeenCommitInDB: true, wantPanic: "error reading block seen commit", @@ -225,7 +192,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { { block: newBlock(header1, commitAtH10), parts: validPartSet, - seenCommit: seenCommit1, + seenCommit: seenCommit, // Expecting no error and we want a nil back eraseCommitInDB: true, @@ -242,7 +209,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { for i, tuple := range tuples { tuple := tuple - bs, db := freshBlockStore() + bs, db := newInMemoryBlockStore() // SaveBlock res, err, panicErr := doFn(func() (interface{}, error) { bs.SaveBlock(tuple.block, tuple.parts, tuple.seenCommit) @@ -298,7 +265,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } assert.Nil(t, panicErr, "#%d: unexpected panic", i) - assert.Nil(t, err, "#%d: expecting a non-nil error", i) + assert.NoError(t, err, "#%d: expecting a non-nil error", i) qua, ok := res.(*quad) if !ok || qua == nil { t.Errorf("#%d: got nil quad back; gotType=%T", i, res) @@ -316,7 +283,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } func TestLoadBaseMeta(t *testing.T) { - cfg, err := config.ResetTestRoot("blockchain_reactor_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "blockchain_reactor_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -328,8 +295,9 @@ func TestLoadBaseMeta(t *testing.T) { state.LastBlockHeight = h - 1 block, err := factory.MakeBlock(state, h, new(types.Commit), nil, 0) require.NoError(t, err) - partSet := block.MakePartSet(2) - seenCommit := makeTestCommit(h, tmtime.Now()) + partSet, err := block.MakePartSet(2) + require.NoError(t, err) + seenCommit := makeTestCommit(state, h, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) } @@ -343,13 +311,19 @@ func TestLoadBaseMeta(t *testing.T) { } func TestLoadBlockPart(t *testing.T) { - bs, db := freshBlockStore() - height, index := int64(10), 1 + cfg, err := config.ResetTestRoot(t.TempDir(), "blockchain_reactor_test") + require.NoError(t, err) + + bs, db := newInMemoryBlockStore() + const height, index = 10, 1 loadPart := func() (interface{}, error) { part := bs.LoadBlockPart(height, index) return part, nil } + state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) + require.NoError(t, err) + // Initially no contents. // 1. Requesting for a non-existent block shouldn't fail res, _, panicErr := doFn(loadPart) @@ -357,13 +331,20 @@ func TestLoadBlockPart(t *testing.T) { require.Nil(t, res, "a non-existent block part should return nil") // 2. Next save a corrupted block then try to load it - err := db.Set(blockPartKey(height, index), []byte("Tendermint")) + err = db.Set(blockPartKey(height, index), []byte("Tendermint")) require.NoError(t, err) res, _, panicErr = doFn(loadPart) require.NotNil(t, panicErr, "expecting a non-nil panic") require.Contains(t, panicErr.Error(), "unmarshal to tmproto.Part failed") // 3. A good block serialized and saved to the DB should be retrievable + block, err := factory.MakeBlock(state, 1, new(types.Commit), nil, 0) + require.NoError(t, err) + partSet, err := block.MakePartSet(2) + require.NoError(t, err) + require.NoError(t, err) + part1 := partSet.GetPart(0) + pb1, err := part1.ToProto() require.NoError(t, err) err = db.Set(blockPartKey(height, index), mustEncode(pb1)) @@ -376,7 +357,7 @@ func TestLoadBlockPart(t *testing.T) { } func TestPruneBlocks(t *testing.T) { - cfg, err := config.ResetTestRoot("blockchain_reactor_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "blockchain_reactor_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) @@ -396,8 +377,9 @@ func TestPruneBlocks(t *testing.T) { state.LastBlockHeight = h - 1 block, err := factory.MakeBlock(state, h, new(types.Commit), nil, 0) require.NoError(t, err) - partSet := block.MakePartSet(2) - seenCommit := makeTestCommit(h, tmtime.Now()) + partSet, err := block.MakePartSet(2) + require.NoError(t, err) + seenCommit := makeTestCommit(state, h, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) } @@ -458,7 +440,7 @@ func TestPruneBlocks(t *testing.T) { } func TestLoadBlockMeta(t *testing.T) { - bs, db := freshBlockStore() + bs, db := newInMemoryBlockStore() height := int64(10) loadMeta := func() (interface{}, error) { meta := bs.LoadBlockMeta(height) @@ -497,14 +479,14 @@ func TestLoadBlockMeta(t *testing.T) { } func TestBlockFetchAtHeight(t *testing.T) { - state, bs, cleanup := makeStateAndBlockStore(log.NewNopLogger()) - defer cleanup() + state, bs := makeStateAndBlockStore(t, t.TempDir()) require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") block, err := factory.MakeBlock(state, bs.Height()+1, new(types.Commit), nil, 0) require.NoError(t, err) - partSet := block.MakePartSet(2) - seenCommit := makeTestCommit(10, tmtime.Now()) + partSet, err := block.MakePartSet(2) + require.NoError(t, err) + seenCommit := makeTestCommit(state, 10, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") @@ -526,9 +508,9 @@ func TestBlockFetchAtHeight(t *testing.T) { } func TestSeenAndCanonicalCommit(t *testing.T) { - bs, _ := freshBlockStore() + state, store := makeStateAndBlockStore(t, t.TempDir()) loadCommit := func() (interface{}, error) { - meta := bs.LoadSeenCommit() + meta := store.LoadSeenCommit() return meta, nil } @@ -542,19 +524,20 @@ func TestSeenAndCanonicalCommit(t *testing.T) { // are persisted. for h := int64(3); h <= 5; h++ { state.LastBlockHeight = h - 1 - blockCommit := makeTestCommit(h-1, tmtime.Now()) + blockCommit := makeTestCommit(state, h-1, tmtime.Now()) block, err := factory.MakeBlock(state, h, blockCommit, nil, 0) require.NoError(t, err) - partSet := block.MakePartSet(2) - seenCommit := makeTestCommit(h, tmtime.Now()) - bs.SaveBlock(block, partSet, seenCommit) - c3 := bs.LoadSeenCommit() + partSet, err := block.MakePartSet(2) + require.NoError(t, err) + seenCommit := makeTestCommit(state, h, tmtime.Now()) + store.SaveBlock(block, partSet, seenCommit) + c3 := store.LoadSeenCommit() require.NotNil(t, c3) require.Equal(t, h, c3.Height) require.Equal(t, seenCommit.Hash(), c3.Hash()) - c5 := bs.LoadBlockCommit(h) + c5 := store.LoadBlockCommit(h) require.Nil(t, c5) - c6 := bs.LoadBlockCommit(h - 1) + c6 := store.LoadBlockCommit(h - 1) require.Equal(t, blockCommit.Hash(), c6.Hash()) } diff --git a/internal/test/factory/block.go b/internal/test/factory/block.go index b4ad5b439c..81ec8a49ce 100644 --- a/internal/test/factory/block.go +++ b/internal/test/factory/block.go @@ -1,10 +1,12 @@ package factory import ( + "testing" "time" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -17,15 +19,12 @@ var ( DefaultTestTime = time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) ) -func MakeVersion() version.Consensus { - return version.Consensus{ - Block: version.BlockProtocol, - App: 1, - } +func RandomAddress() []byte { + return crypto.CRandBytes(crypto.AddressSize) } func RandomHash() []byte { - return crypto.CRandBytes(tmhash.Size) + return crypto.CRandBytes(crypto.HashSize) } func MakeBlockID() types.BlockID { @@ -44,14 +43,15 @@ func MakeBlockIDWithHash(hash []byte) types.BlockID { // MakeHeader fills the rest of the contents of the header such that it passes // validate basic -func MakeHeader(h *types.Header) (*types.Header, error) { +func MakeHeader(t *testing.T, h *types.Header) *types.Header { + t.Helper() if h.Version.Block == 0 { h.Version.Block = version.BlockProtocol } if h.Height == 0 { h.Height = 1 } - if h.LastBlockID.IsZero() { + if h.LastBlockID.IsNil() { h.LastBlockID = MakeBlockID() } if h.ChainID == "" { @@ -85,13 +85,7 @@ func MakeHeader(h *types.Header) (*types.Header, error) { h.ProposerProTxHash = crypto.RandProTxHash() } - return h, h.ValidateBasic() -} + require.NoError(t, h.ValidateBasic()) -func MakeRandomHeader() *types.Header { - h, err := MakeHeader(&types.Header{}) - if err != nil { - panic(err) - } return h } diff --git a/internal/test/factory/commit.go b/internal/test/factory/commit.go index 92f3da5925..51b22f1b0e 100644 --- a/internal/test/factory/commit.go +++ b/internal/test/factory/commit.go @@ -2,23 +2,26 @@ package factory import ( "context" - "fmt" - "github.com/dashevo/dashd-go/btcjson" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) -func MakeCommit(blockID types.BlockID, height int64, round int32, voteSet *types.VoteSet, - validatorSet *types.ValidatorSet, validators []types.PrivValidator, stateID types.StateID) (*types.Commit, error) { - +func MakeCommit( + ctx context.Context, + blockID types.BlockID, + height int64, + round int32, + voteSet *types.VoteSet, + validatorSet *types.ValidatorSet, + validators []types.PrivValidator, + stateID types.StateID, +) (*types.Commit, error) { // all sign for i := 0; i < len(validators); i++ { - proTxHash, err := validators[i].GetProTxHash(context.Background()) + proTxHash, err := validators[i].GetProTxHash(ctx) if err != nil { - return nil, fmt.Errorf("can't get pubkey: %w", err) + return nil, err } vote := &types.Vote{ ValidatorProTxHash: proTxHash, @@ -28,23 +31,19 @@ func MakeCommit(blockID types.BlockID, height int64, round int32, voteSet *types Type: tmproto.PrecommitType, BlockID: blockID, } - _, err = signAddVote(validators[i], vote, voteSet, validatorSet.QuorumType, validatorSet.QuorumHash, stateID) - if err != nil { + + v := vote.ToProto() + + if err := validators[i].SignVote(ctx, voteSet.ChainID(), validatorSet.QuorumType, validatorSet.QuorumHash, v, stateID, nil); err != nil { + return nil, err + } + vote.StateSignature = v.StateSignature + vote.BlockSignature = v.BlockSignature + vote.ExtensionSignature = v.ExtensionSignature + if _, err := voteSet.AddVote(vote); err != nil { return nil, err } } return voteSet.MakeCommit(), nil } - -func signAddVote(privVal types.PrivValidator, vote *types.Vote, voteSet *types.VoteSet, quorumType btcjson.LLMQType, - quorumHash crypto.QuorumHash, stateID types.StateID) (signed bool, err error) { - v := vote.ToProto() - err = privVal.SignVote(context.Background(), voteSet.ChainID(), quorumType, quorumHash, v, stateID, log.TestingLogger()) - if err != nil { - return false, err - } - vote.StateSignature = v.StateSignature - vote.BlockSignature = v.BlockSignature - return voteSet.AddVote(vote) -} diff --git a/internal/test/factory/factory_test.go b/internal/test/factory/factory_test.go index 02c6a0de11..6cdc2aed96 100644 --- a/internal/test/factory/factory_test.go +++ b/internal/test/factory/factory_test.go @@ -3,15 +3,13 @@ package factory import ( "testing" - "github.com/stretchr/testify/assert" "github.com/tendermint/tendermint/types" ) func TestMakeHeader(t *testing.T) { - _, err := MakeHeader(&types.Header{}) - assert.NoError(t, err) + MakeHeader(t, &types.Header{}) } func TestRandomNodeID(t *testing.T) { - assert.NotPanics(t, func() { RandomNodeID() }) + RandomNodeID(t) } diff --git a/internal/test/factory/genesis.go b/internal/test/factory/genesis.go index 48500584af..a90d6641e0 100644 --- a/internal/test/factory/genesis.go +++ b/internal/test/factory/genesis.go @@ -14,6 +14,7 @@ func RandGenesisDoc( cfg *config.Config, numValidators int, initialHeight int64, + consensusParams *types.ConsensusParams, ) (*types.GenesisDoc, []types.PrivValidator) { validators := make([]types.GenesisValidator, 0, numValidators) privValidators := make([]types.PrivValidator, 0, numValidators) @@ -42,10 +43,13 @@ func RandGenesisDoc( coreChainLock := types.NewMockChainLock(2) return &types.GenesisDoc{ - GenesisTime: tmtime.Now(), - InitialHeight: initialHeight, - ChainID: cfg.ChainID(), - Validators: validators, + GenesisTime: tmtime.Now(), + InitialHeight: initialHeight, + ChainID: cfg.ChainID(), + Validators: validators, + ConsensusParams: consensusParams, + + // dash fields InitialCoreChainLockedHeight: 1, InitialProposalCoreChainLock: coreChainLock.ToProto(), ThresholdPublicKey: ld.ThresholdPubKey, diff --git a/internal/test/factory/p2p.go b/internal/test/factory/p2p.go index 34c139f588..e2edcba6a4 100644 --- a/internal/test/factory/p2p.go +++ b/internal/test/factory/p2p.go @@ -3,25 +3,30 @@ package factory import ( "encoding/hex" "strings" + "testing" + + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/types" ) // NodeID returns a valid NodeID based on an inputted string -func NodeID(str string) types.NodeID { +func NodeID(t *testing.T, str string) types.NodeID { + t.Helper() + id, err := types.NewNodeID(strings.Repeat(str, 2*types.NodeIDByteLength)) - if err != nil { - panic(err) - } + require.NoError(t, err) + return id } // RandomNodeID returns a randomly generated valid NodeID -func RandomNodeID() types.NodeID { +func RandomNodeID(t *testing.T) types.NodeID { + t.Helper() + id, err := types.NewNodeID(hex.EncodeToString(rand.Bytes(types.NodeIDByteLength))) - if err != nil { - panic(err) - } + require.NoError(t, err) + return id } diff --git a/internal/test/factory/params.go b/internal/test/factory/params.go new file mode 100644 index 0000000000..dda8e2b3ca --- /dev/null +++ b/internal/test/factory/params.go @@ -0,0 +1,22 @@ +package factory + +import ( + "time" + + "github.com/tendermint/tendermint/types" +) + +// ConsensusParams returns a default set of ConsensusParams that are suitable +// for use in testing +func ConsensusParams() *types.ConsensusParams { + c := types.DefaultConsensusParams() + c.Timeout = types.TimeoutParams{ + Commit: 10 * time.Millisecond, + Propose: 40 * time.Millisecond, + ProposeDelta: 1 * time.Millisecond, + Vote: 10 * time.Millisecond, + VoteDelta: 1 * time.Millisecond, + BypassCommitTimeout: true, + } + return c +} diff --git a/internal/test/factory/tx.go b/internal/test/factory/tx.go index c97aeefc96..725f3c720d 100644 --- a/internal/test/factory/tx.go +++ b/internal/test/factory/tx.go @@ -2,15 +2,10 @@ package factory import "github.com/tendermint/tendermint/types" -// MakeTxs is a helper function to generate mock transactions by given the block height -// and the transaction numbers. -func MakeTxs(height int64, num int) (txs []types.Tx) { - for i := 0; i < num; i++ { - txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) +func MakeNTxs(height, n int64) []types.Tx { + txs := make([]types.Tx, n) + for i := range txs { + txs[i] = types.Tx([]byte{byte(height), byte(i / 256), byte(i % 256)}) } return txs } - -func MakeTenTxs(height int64) (txs []types.Tx) { - return MakeTxs(height, 10) -} diff --git a/internal/test/factory/vote.go b/internal/test/factory/vote.go index 263adc2285..c07968cf51 100644 --- a/internal/test/factory/vote.go +++ b/internal/test/factory/vote.go @@ -8,6 +8,7 @@ import ( ) func MakeVote( + ctx context.Context, val types.PrivValidator, valSet *types.ValidatorSet, chainID string, @@ -18,10 +19,11 @@ func MakeVote( blockID types.BlockID, stateID types.StateID, ) (*types.Vote, error) { - proTxHash, err := val.GetProTxHash(context.Background()) + proTxHash, err := val.GetProTxHash(ctx) if err != nil { return nil, err } + v := &types.Vote{ ValidatorProTxHash: proTxHash, ValidatorIndex: valIndex, @@ -33,12 +35,12 @@ func MakeVote( vpb := v.ToProto() - if err := val.SignVote(context.Background(), chainID, valSet.QuorumType, valSet.QuorumHash, vpb, stateID, nil); err != nil { + if err := val.SignVote(ctx, chainID, valSet.QuorumType, valSet.QuorumHash, vpb, stateID, nil); err != nil { return nil, err } v.BlockSignature = vpb.BlockSignature v.StateSignature = vpb.StateSignature - + v.ExtensionSignature = vpb.ExtensionSignature return v, nil } diff --git a/libs/bits/bit_array.go b/libs/bits/bit_array.go index acf5b40c13..ba7e968d5e 100644 --- a/libs/bits/bit_array.go +++ b/libs/bits/bit_array.go @@ -5,19 +5,18 @@ import ( "errors" "fmt" "math" - mrand "math/rand" + "math/rand" "regexp" "strings" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" tmprotobits "github.com/tendermint/tendermint/proto/tendermint/libs/bits" ) // BitArray is a thread-safe implementation of a bit array. type BitArray struct { - mtx tmsync.RWMutex + mtx sync.RWMutex Bits int `json:"bits"` // NOTE: persisted via reflect, must be exported Elems []uint64 `json:"elems"` // NOTE: persisted via reflect, must be exported } @@ -25,14 +24,24 @@ type BitArray struct { // NewBitArray returns a new bit array. // It returns nil if the number of bits is zero. func NewBitArray(bits int) *BitArray { - // Reseed non-deterministically. - tmrand.Reseed() if bits <= 0 { return nil } - return &BitArray{ - Bits: bits, - Elems: make([]uint64, numElems(bits)), + bA := &BitArray{} + bA.reset(bits) + return bA +} + +// reset changes size of BitArray to `bits` and re-allocates (zeroed) data buffer +func (bA *BitArray) reset(bits int) { + bA.mtx.Lock() + defer bA.mtx.Unlock() + + bA.Bits = bits + if bits == 0 { + bA.Elems = nil + } else { + bA.Elems = make([]uint64, numElems(bits)) } } @@ -63,7 +72,7 @@ func (bA *BitArray) getIndex(i int) bool { } // SetIndex sets the bit at index i within the bit array. -// The behavior is undefined if i >= bA.Bits +// This method returns false if i is out of range of the BitArray. func (bA *BitArray) SetIndex(i int, v bool) bool { if bA == nil { return false @@ -74,7 +83,7 @@ func (bA *BitArray) SetIndex(i int, v bool) bool { } func (bA *BitArray) setIndex(i int, v bool) bool { - if i >= bA.Bits { + if i < 0 || i >= bA.Bits { return false } if v { @@ -253,8 +262,14 @@ func (bA *BitArray) PickRandom() (int, bool) { if len(trueIndices) == 0 { // no bits set to true return 0, false } + + // NOTE: using the default math/rand might result in somewhat + // amount of determinism here. It would be possible to use + // rand.New(rand.NewSeed(time.Now().Unix())).Intn() to + // counteract this possibility if it proved to be material. + // // nolint:gosec // G404: Use of weak random number generator - return trueIndices[mrand.Intn(len(trueIndices))], true + return trueIndices[rand.Intn(len(trueIndices))], true } func (bA *BitArray) getTrueIndices() []int { @@ -404,8 +419,7 @@ func (bA *BitArray) UnmarshalJSON(bz []byte) error { if b == "null" { // This is required e.g. for encoding/json when decoding // into a pointer with pre-allocated BitArray. - bA.Bits = 0 - bA.Elems = nil + bA.reset(0) return nil } @@ -415,16 +429,15 @@ func (bA *BitArray) UnmarshalJSON(bz []byte) error { return fmt.Errorf("bitArray in JSON should be a string of format %q but got %s", bitArrayJSONRegexp.String(), b) } bits := match[1] - - // Construct new BitArray and copy over. numBits := len(bits) - bA2 := NewBitArray(numBits) + + bA.reset(numBits) for i := 0; i < numBits; i++ { if bits[i] == 'x' { - bA2.SetIndex(i, true) + bA.SetIndex(i, true) } } - *bA = *bA2 //nolint:govet + return nil } diff --git a/libs/bits/bit_array_test.go b/libs/bits/bit_array_test.go index 96f2e2257f..e4bf45c387 100644 --- a/libs/bits/bit_array_test.go +++ b/libs/bits/bit_array_test.go @@ -3,7 +3,6 @@ package bits import ( "bytes" "encoding/json" - "fmt" "math" "testing" @@ -14,25 +13,25 @@ import ( tmprotobits "github.com/tendermint/tendermint/proto/tendermint/libs/bits" ) -func randBitArray(bits int) (*BitArray, []byte) { +func randBitArray(bits int) *BitArray { src := tmrand.Bytes((bits + 7) / 8) bA := NewBitArray(bits) for i := 0; i < len(src); i++ { for j := 0; j < 8; j++ { if i*8+j >= bits { - return bA, src + return bA } setBit := src[i]&(1< 0 bA.SetIndex(i*8+j, setBit) } } - return bA, src + return bA } func TestAnd(t *testing.T) { - bA1, _ := randBitArray(51) - bA2, _ := randBitArray(31) + bA1 := randBitArray(51) + bA2 := randBitArray(31) bA3 := bA1.And(bA2) var bNil *BitArray @@ -55,9 +54,8 @@ func TestAnd(t *testing.T) { } func TestOr(t *testing.T) { - - bA1, _ := randBitArray(51) - bA2, _ := randBitArray(31) + bA1 := randBitArray(51) + bA2 := randBitArray(31) bA3 := bA1.Or(bA2) bNil := (*BitArray)(nil) @@ -99,11 +97,11 @@ func TestSub(t *testing.T) { for _, tc := range testCases { var bA *BitArray err := json.Unmarshal([]byte(tc.initBA), &bA) - require.Nil(t, err) + require.NoError(t, err) var o *BitArray err = json.Unmarshal([]byte(tc.subtractingBA), &o) - require.Nil(t, err) + require.NoError(t, err) got, _ := json.Marshal(bA.Sub(o)) require.Equal( @@ -149,9 +147,8 @@ func TestBytes(t *testing.T) { bA := NewBitArray(4) bA.SetIndex(0, true) check := func(bA *BitArray, bz []byte) { - if !bytes.Equal(bA.Bytes(), bz) { - panic(fmt.Sprintf("Expected %X but got %X", bz, bA.Bytes())) - } + require.True(t, bytes.Equal(bA.Bytes(), bz), + "Expected %X but got %X", bz, bA.Bytes()) } check(bA, []byte{0x01}) bA.SetIndex(3, true) @@ -172,6 +169,8 @@ func TestBytes(t *testing.T) { check(bA, []byte{0x80, 0x01}) bA.SetIndex(9, true) check(bA, []byte{0x80, 0x03}) + + require.False(t, bA.SetIndex(-1, true)) } func TestEmptyFull(t *testing.T) { @@ -191,10 +190,7 @@ func TestEmptyFull(t *testing.T) { } func TestUpdateNeverPanics(t *testing.T) { - newRandBitArray := func(n int) *BitArray { - ba, _ := randBitArray(n) - return ba - } + newRandBitArray := func(n int) *BitArray { return randBitArray(n) } pairs := []struct { a, b *BitArray }{ diff --git a/libs/bytes/bytes.go b/libs/bytes/bytes.go index f1a441bd47..d2a5e1a71f 100644 --- a/libs/bytes/bytes.go +++ b/libs/bytes/bytes.go @@ -1,21 +1,16 @@ package bytes import ( - "bytes" + "encoding/base64" "encoding/hex" - "encoding/json" "fmt" "strings" ) -// HexBytes enables HEX-encoding for json/encoding. +// HexBytes is a wrapper around []byte that encodes data as hexadecimal strings +// for use in JSON. type HexBytes []byte -var ( - _ json.Marshaler = HexBytes{} - _ json.Unmarshaler = &HexBytes{} -) - // Marshal needed for protobuf compatibility func (bz HexBytes) Marshal() ([]byte, error) { return bz, nil @@ -27,41 +22,30 @@ func (bz *HexBytes) Unmarshal(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaler interface. The encoding is a JSON -// quoted string of hexadecimal digits. -func (bz HexBytes) MarshalJSON() ([]byte, error) { - size := hex.EncodedLen(len(bz)) + 2 // +2 for quotation marks - buf := make([]byte, size) - hex.Encode(buf[1:], []byte(bz)) - buf[0] = '"' - buf[size-1] = '"' - - // Ensure letter digits are capitalized. - for i := 1; i < size-1; i++ { - if buf[i] >= 'a' && buf[i] <= 'f' { - buf[i] = 'A' + (buf[i] - 'a') - } - } - return buf, nil +// MarshalText encodes a HexBytes value as hexadecimal digits. +// This method is used by json.Marshal. +func (bz HexBytes) MarshalText() ([]byte, error) { + enc := hex.EncodeToString([]byte(bz)) + return []byte(strings.ToUpper(enc)), nil } -// UnmarshalJSON implements the json.Umarshaler interface. -func (bz *HexBytes) UnmarshalJSON(data []byte) error { - if bytes.Equal(data, []byte("null")) { +// UnmarshalText handles decoding of HexBytes from JSON strings. +// This method is used by json.Unmarshal. +// It allows decoding of both hex and base64-encoded byte arrays. +func (bz *HexBytes) UnmarshalText(data []byte) error { + input := string(data) + if input == "" || input == "null" { return nil } - - if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { - return fmt.Errorf("invalid hex string: %s", data) - } - - bz2, err := hex.DecodeString(string(data[1 : len(data)-1])) + dec, err := hex.DecodeString(input) if err != nil { - return err - } - - *bz = bz2 + dec, err = base64.StdEncoding.DecodeString(input) + if err != nil { + return err + } + } + *bz = HexBytes(dec) return nil } diff --git a/libs/bytes/bytes_test.go b/libs/bytes/bytes_test.go index 6a9ca7c3d7..fb1200a04c 100644 --- a/libs/bytes/bytes_test.go +++ b/libs/bytes/bytes_test.go @@ -14,12 +14,12 @@ func TestMarshal(t *testing.T) { bz := []byte("hello world") dataB := HexBytes(bz) bz2, err := dataB.Marshal() - assert.Nil(t, err) + assert.NoError(t, err) assert.Equal(t, bz, bz2) var dataB2 HexBytes err = (&dataB2).Unmarshal(bz) - assert.Nil(t, err) + assert.NoError(t, err) assert.Equal(t, dataB, dataB2) } @@ -61,7 +61,7 @@ func TestJSONMarshal(t *testing.T) { t.Fatal(err) } assert.Equal(t, ts2.B1, tc.input) - assert.Equal(t, ts2.B2, HexBytes(tc.input)) + assert.Equal(t, string(ts2.B2), string(tc.input)) }) } } diff --git a/libs/cli/helper.go b/libs/cli/helper.go index 4b87bd60be..6f723ac024 100644 --- a/libs/cli/helper.go +++ b/libs/cli/helper.go @@ -1,30 +1,20 @@ package cli import ( - "bytes" + "context" "fmt" - "io" - "io/ioutil" "os" - "path/filepath" + "runtime" "github.com/spf13/cobra" + "github.com/spf13/viper" ) -// WriteConfigVals writes a toml file with the given values. -// It returns an error if writing was impossible. -func WriteConfigVals(dir string, vals map[string]string) error { - data := "" - for k, v := range vals { - data += fmt.Sprintf("%s = \"%s\"\n", k, v) - } - cfile := filepath.Join(dir, "config.toml") - return ioutil.WriteFile(cfile, []byte(data), 0600) -} - // RunWithArgs executes the given command with the specified command line args // and environmental variables set. It returns any error returned from cmd.Execute() -func RunWithArgs(cmd Executable, args []string, env map[string]string) error { +// +// This is only used in testing. +func RunWithArgs(ctx context.Context, cmd *cobra.Command, args []string, env map[string]string) error { oargs := os.Args oenv := map[string]string{} // defer returns the environment back to normal @@ -47,82 +37,24 @@ func RunWithArgs(cmd Executable, args []string, env map[string]string) error { } // and finally run the command - return cmd.Execute() + return RunWithTrace(ctx, cmd) } -// RunCaptureWithArgs executes the given command with the specified command -// line args and environmental variables set. It returns string fields -// representing output written to stdout and stderr, additionally any error -// from cmd.Execute() is also returned -func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (stdout, stderr string, err error) { - oldout, olderr := os.Stdout, os.Stderr // keep backup of the real stdout - rOut, wOut, _ := os.Pipe() - rErr, wErr, _ := os.Pipe() - os.Stdout, os.Stderr = wOut, wErr - defer func() { - os.Stdout, os.Stderr = oldout, olderr // restoring the real stdout - }() - - // copy the output in a separate goroutine so printing can't block indefinitely - copyStd := func(reader *os.File) *(chan string) { - stdC := make(chan string) - go func() { - var buf bytes.Buffer - // io.Copy will end when we call reader.Close() below - io.Copy(&buf, reader) //nolint:errcheck //ignore error - stdC <- buf.String() - }() - return &stdC - } - outC := copyStd(rOut) - errC := copyStd(rErr) - - // now run the command - err = RunWithArgs(cmd, args, env) - - // and grab the stdout to return - wOut.Close() - wErr.Close() - stdout = <-*outC - stderr = <-*errC - return stdout, stderr, err -} - -// NewCompletionCmd returns a cobra.Command that generates bash and zsh -// completion scripts for the given root command. If hidden is true, the -// command will not show up in the root command's list of available commands. -func NewCompletionCmd(rootCmd *cobra.Command, hidden bool) *cobra.Command { - flagZsh := "zsh" - cmd := &cobra.Command{ - Use: "completion", - Short: "Generate shell completion scripts", - Long: fmt.Sprintf(`Generate Bash and Zsh completion scripts and print them to STDOUT. - -Once saved to file, a completion script can be loaded in the shell's -current session as shown: - - $ . <(%s completion) - -To configure your bash shell to load completions for each session add to -your $HOME/.bashrc or $HOME/.profile the following instruction: +func RunWithTrace(ctx context.Context, cmd *cobra.Command) error { + cmd.SilenceUsage = true + cmd.SilenceErrors = true + + if err := cmd.ExecuteContext(ctx); err != nil { + if viper.GetBool(TraceFlag) { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + fmt.Fprintf(os.Stderr, "ERROR: %v\n%s\n", err, buf) + } else { + fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) + } - . <(%s completion) -`, rootCmd.Use, rootCmd.Use), - RunE: func(cmd *cobra.Command, _ []string) error { - zsh, err := cmd.Flags().GetBool(flagZsh) - if err != nil { - return err - } - if zsh { - return rootCmd.GenZshCompletion(cmd.OutOrStdout()) - } - return rootCmd.GenBashCompletion(cmd.OutOrStdout()) - }, - Hidden: hidden, - Args: cobra.NoArgs, + return err } - - cmd.Flags().Bool(flagZsh, false, "Generate Zsh completion script") - - return cmd + return nil } diff --git a/libs/cli/setup.go b/libs/cli/setup.go index e4955dcf41..54ea90358e 100644 --- a/libs/cli/setup.go +++ b/libs/cli/setup.go @@ -1,10 +1,8 @@ package cli import ( - "fmt" "os" "path/filepath" - "runtime" "strings" "github.com/spf13/cobra" @@ -12,51 +10,27 @@ import ( ) const ( - HomeFlag = "home" - TraceFlag = "trace" - OutputFlag = "output" - EncodingFlag = "encoding" + HomeFlag = "home" + TraceFlag = "trace" + OutputFlag = "output" // used in the cli ) -// Executable is the minimal interface to *corba.Command, so we can -// wrap if desired before the test -type Executable interface { - Execute() error -} - // PrepareBaseCmd is meant for tendermint and other servers -func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { - cobra.OnInitialize(func() { initEnv(envPrefix) }) +func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defaultHome string) *cobra.Command { + // the primary caller of this command is in the SDK and + // returning the cobra.Command object avoids breaking that + // code. In the long term, the SDK could avoid this entirely. + cobra.OnInitialize(func() { InitEnv(envPrefix) }) cmd.PersistentFlags().StringP(HomeFlag, "", defaultHome, "directory for config and data") cmd.PersistentFlags().Bool(TraceFlag, false, "print out full stack trace on errors") - cmd.PersistentPreRunE = concatCobraCmdFuncs(bindFlagsLoadViper, cmd.PersistentPreRunE) - return Executor{cmd, os.Exit} -} - -// PrepareMainCmd is meant for client side libs that want some more flags -// -// This adds --encoding (hex, btc, base64) and --output (text, json) to -// the command. These only really make sense in interactive commands. -func PrepareMainCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { - cmd.PersistentFlags().StringP(EncodingFlag, "e", "hex", "Binary encoding (hex|b64|btc)") - cmd.PersistentFlags().StringP(OutputFlag, "o", "text", "Output format (text|json)") - cmd.PersistentPreRunE = concatCobraCmdFuncs(validateOutput, cmd.PersistentPreRunE) - return PrepareBaseCmd(cmd, envPrefix, defaultHome) -} - -// initEnv sets to use ENV variables if set. -func initEnv(prefix string) { - copyEnvVars(prefix) - - // env variables with TM prefix (eg. TM_ROOT) - viper.SetEnvPrefix(prefix) - viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")) - viper.AutomaticEnv() + cmd.PersistentPreRunE = concatCobraCmdFuncs(BindFlagsLoadViper, cmd.PersistentPreRunE) + return cmd } -// This copies all variables like TMROOT to TM_ROOT, -// so we can support both formats for the user -func copyEnvVars(prefix string) { +// InitEnv sets to use ENV variables if set. +func InitEnv(prefix string) { + // This copies all variables like TMROOT to TM_ROOT, + // so we can support both formats for the user prefix = strings.ToUpper(prefix) ps := prefix + "_" for _, e := range os.Environ() { @@ -69,42 +43,11 @@ func copyEnvVars(prefix string) { } } } -} - -// Executor wraps the cobra Command with a nicer Execute method -type Executor struct { - *cobra.Command - Exit func(int) // this is os.Exit by default, override in tests -} -type ExitCoder interface { - ExitCode() int -} - -// execute adds all child commands to the root command sets flags appropriately. -// This is called by main.main(). It only needs to happen once to the rootCmd. -func (e Executor) Execute() error { - e.SilenceUsage = true - e.SilenceErrors = true - err := e.Command.Execute() - if err != nil { - if viper.GetBool(TraceFlag) { - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - fmt.Fprintf(os.Stderr, "ERROR: %v\n%s\n", err, buf) - } else { - fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) - } - - // return error code 1 by default, can override it with a special error type - exitCode := 1 - if ec, ok := err.(ExitCoder); ok { - exitCode = ec.ExitCode() - } - e.Exit(exitCode) - } - return err + // env variables with TM prefix (eg. TM_ROOT) + viper.SetEnvPrefix(prefix) + viper.SetEnvKeyReplacer(strings.NewReplacer(".", "_", "-", "_")) + viper.AutomaticEnv() } type cobraCmdFunc func(cmd *cobra.Command, args []string) error @@ -125,7 +68,7 @@ func concatCobraCmdFuncs(fs ...cobraCmdFunc) cobraCmdFunc { } // Bind all flags and read the config into viper -func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { +func BindFlagsLoadViper(cmd *cobra.Command, args []string) error { // cmd.Flags() includes flags from this command and all persistent flags from the parent if err := viper.BindPFlags(cmd.Flags()); err != nil { return err @@ -147,14 +90,3 @@ func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { } return nil } - -func validateOutput(cmd *cobra.Command, args []string) error { - // validate output format - output := viper.GetString(OutputFlag) - switch output { - case "text", "json": - default: - return fmt.Errorf("unsupported output format: %s", output) - } - return nil -} diff --git a/libs/cli/setup_test.go b/libs/cli/setup_test.go index 0cb3223446..9198485ef2 100644 --- a/libs/cli/setup_test.go +++ b/libs/cli/setup_test.go @@ -1,8 +1,12 @@ package cli import ( + "bytes" + "context" "fmt" - "io/ioutil" + "io" + "os" + "path/filepath" "strconv" "strings" "testing" @@ -14,6 +18,9 @@ import ( ) func TestSetupEnv(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cases := []struct { args []string env map[string]string @@ -44,31 +51,37 @@ func TestSetupEnv(t *testing.T) { } demo.Flags().String("foobar", "", "Some test value from config") cmd := PrepareBaseCmd(demo, "DEMO", "/qwerty/asdfgh") // some missing dir.. - cmd.Exit = func(int) {} viper.Reset() args := append([]string{cmd.Use}, tc.args...) - err := RunWithArgs(cmd, args, tc.env) - require.Nil(t, err, i) + err := RunWithArgs(ctx, cmd, args, tc.env) + require.NoError(t, err, i) assert.Equal(t, tc.expected, foo, i) } } -func tempDir() string { - cdir, err := ioutil.TempDir("", "test-cli") - if err != nil { - panic(err) +// writeConfigVals writes a toml file with the given values. +// It returns an error if writing was impossible. +func writeConfigVals(dir string, vals map[string]string) error { + lines := make([]string, 0, len(vals)) + for k, v := range vals { + lines = append(lines, fmt.Sprintf("%s = %q", k, v)) } - return cdir + data := strings.Join(lines, "\n") + cfile := filepath.Join(dir, "config.toml") + return os.WriteFile(cfile, []byte(data), 0600) } func TestSetupConfig(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // we pre-create two config files we can refer to in the rest of // the test cases. cval1 := "fubble" - conf1 := tempDir() - err := WriteConfigVals(conf1, map[string]string{"boo": cval1}) - require.Nil(t, err) + conf1 := t.TempDir() + err := writeConfigVals(conf1, map[string]string{"boo": cval1}) + require.NoError(t, err) cases := []struct { args []string @@ -104,12 +117,11 @@ func TestSetupConfig(t *testing.T) { boo.Flags().String("boo", "", "Some test value from config") boo.Flags().String("two-words", "", "Check out env handling -") cmd := PrepareBaseCmd(boo, "RD", "/qwerty/asdfgh") // some missing dir... - cmd.Exit = func(int) {} viper.Reset() args := append([]string{cmd.Use}, tc.args...) - err := RunWithArgs(cmd, args, tc.env) - require.Nil(t, err, i) + err := RunWithArgs(ctx, cmd, args, tc.env) + require.NoError(t, err, i) assert.Equal(t, tc.expected, foo, i) assert.Equal(t, tc.expectedTwo, two, i) } @@ -122,16 +134,19 @@ type DemoConfig struct { } func TestSetupUnmarshal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // we pre-create two config files we can refer to in the rest of // the test cases. cval1, cval2 := "someone", "else" - conf1 := tempDir() - err := WriteConfigVals(conf1, map[string]string{"name": cval1}) - require.Nil(t, err) + conf1 := t.TempDir() + err := writeConfigVals(conf1, map[string]string{"name": cval1}) + require.NoError(t, err) // even with some ignored fields, should be no problem - conf2 := tempDir() - err = WriteConfigVals(conf2, map[string]string{"name": cval2, "foo": "bar"}) - require.Nil(t, err) + conf2 := t.TempDir() + err = writeConfigVals(conf2, map[string]string{"name": cval2, "foo": "bar"}) + require.NoError(t, err) // unused is not declared on a flag and remains from base base := DemoConfig{ @@ -183,17 +198,19 @@ func TestSetupUnmarshal(t *testing.T) { // from the default config here marsh.Flags().Int("age", base.Age, "Some test value from config") cmd := PrepareBaseCmd(marsh, "MR", "/qwerty/asdfgh") // some missing dir... - cmd.Exit = func(int) {} viper.Reset() args := append([]string{cmd.Use}, tc.args...) - err := RunWithArgs(cmd, args, tc.env) - require.Nil(t, err, i) + err := RunWithArgs(ctx, cmd, args, tc.env) + require.NoError(t, err, i) assert.Equal(t, tc.expected, cfg, i) } } func TestSetupTrace(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cases := []struct { args []string env map[string]string @@ -216,18 +233,16 @@ func TestSetupTrace(t *testing.T) { }, } cmd := PrepareBaseCmd(trace, "DBG", "/qwerty/asdfgh") // some missing dir.. - cmd.Exit = func(int) {} viper.Reset() args := append([]string{cmd.Use}, tc.args...) - stdout, stderr, err := RunCaptureWithArgs(cmd, args, tc.env) - require.NotNil(t, err, i) + stdout, stderr, err := runCaptureWithArgs(ctx, cmd, args, tc.env) + require.Error(t, err, i) require.Equal(t, "", stdout, i) require.NotEqual(t, "", stderr, i) msg := strings.Split(stderr, "\n") desired := fmt.Sprintf("ERROR: %s", tc.expected) - assert.Equal(t, desired, msg[0], i) - t.Log(msg) + assert.Equal(t, desired, msg[0], i, msg) if tc.long && assert.True(t, len(msg) > 2, i) { // the next line starts the stack trace... assert.Contains(t, stderr, "TestSetupTrace", i) @@ -235,3 +250,44 @@ func TestSetupTrace(t *testing.T) { } } } + +// runCaptureWithArgs executes the given command with the specified command +// line args and environmental variables set. It returns string fields +// representing output written to stdout and stderr, additionally any error +// from cmd.Execute() is also returned +func runCaptureWithArgs(ctx context.Context, cmd *cobra.Command, args []string, env map[string]string) (stdout, stderr string, err error) { + oldout, olderr := os.Stdout, os.Stderr // keep backup of the real stdout + rOut, wOut, _ := os.Pipe() + rErr, wErr, _ := os.Pipe() + os.Stdout, os.Stderr = wOut, wErr + defer func() { + os.Stdout, os.Stderr = oldout, olderr // restoring the real stdout + }() + + // copy the output in a separate goroutine so printing can't block indefinitely + copyStd := func(reader *os.File) *(chan string) { + stdC := make(chan string) + go func() { + var buf bytes.Buffer + // io.Copy will end when we call reader.Close() below + io.Copy(&buf, reader) //nolint:errcheck //ignore error + select { + case <-cmd.Context().Done(): + case stdC <- buf.String(): + } + }() + return &stdC + } + outC := copyStd(rOut) + errC := copyStd(rErr) + + // now run the command + err = RunWithArgs(ctx, cmd, args, env) + + // and grab the stdout to return + wOut.Close() + wErr.Close() + stdout = <-*outC + stderr = <-*errC + return stdout, stderr, err +} diff --git a/libs/cmap/cmap.go b/libs/cmap/cmap.go deleted file mode 100644 index 5aa82e807c..0000000000 --- a/libs/cmap/cmap.go +++ /dev/null @@ -1,91 +0,0 @@ -package cmap - -import ( - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -// CMap is a goroutine-safe map -type CMap struct { - m map[string]interface{} - l tmsync.Mutex -} - -func NewCMap() *CMap { - return &CMap{ - m: make(map[string]interface{}), - } -} - -func (cm *CMap) Set(key string, value interface{}) { - cm.l.Lock() - cm.m[key] = value - cm.l.Unlock() -} - -// GetOrSet returns the existing value if present. Othewise, it stores `newValue` and returns it. -func (cm *CMap) GetOrSet(key string, newValue interface{}) (value interface{}, alreadyExists bool) { - - cm.l.Lock() - defer cm.l.Unlock() - - if v, ok := cm.m[key]; ok { - return v, true - } - - cm.m[key] = newValue - return newValue, false -} - -func (cm *CMap) Get(key string) interface{} { - cm.l.Lock() - val := cm.m[key] - cm.l.Unlock() - return val -} - -func (cm *CMap) Has(key string) bool { - cm.l.Lock() - _, ok := cm.m[key] - cm.l.Unlock() - return ok -} - -func (cm *CMap) Delete(key string) { - cm.l.Lock() - delete(cm.m, key) - cm.l.Unlock() -} - -func (cm *CMap) Size() int { - cm.l.Lock() - size := len(cm.m) - cm.l.Unlock() - return size -} - -func (cm *CMap) Clear() { - cm.l.Lock() - cm.m = make(map[string]interface{}) - cm.l.Unlock() -} - -func (cm *CMap) Keys() []string { - cm.l.Lock() - - keys := make([]string, 0, len(cm.m)) - for k := range cm.m { - keys = append(keys, k) - } - cm.l.Unlock() - return keys -} - -func (cm *CMap) Values() []interface{} { - cm.l.Lock() - items := make([]interface{}, 0, len(cm.m)) - for _, v := range cm.m { - items = append(items, v) - } - cm.l.Unlock() - return items -} diff --git a/libs/cmap/cmap_test.go b/libs/cmap/cmap_test.go deleted file mode 100644 index 68a052bdb7..0000000000 --- a/libs/cmap/cmap_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package cmap - -import ( - "fmt" - "strings" - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestIterateKeysWithValues(t *testing.T) { - cmap := NewCMap() - - for i := 1; i <= 10; i++ { - cmap.Set(fmt.Sprintf("key%d", i), fmt.Sprintf("value%d", i)) - } - - // Testing size - assert.Equal(t, 10, cmap.Size()) - assert.Equal(t, 10, len(cmap.Keys())) - assert.Equal(t, 10, len(cmap.Values())) - - // Iterating Keys, checking for matching Value - for _, key := range cmap.Keys() { - val := strings.ReplaceAll(key, "key", "value") - assert.Equal(t, val, cmap.Get(key)) - } - - // Test if all keys are within []Keys() - keys := cmap.Keys() - for i := 1; i <= 10; i++ { - assert.Contains(t, keys, fmt.Sprintf("key%d", i), "cmap.Keys() should contain key") - } - - // Delete 1 Key - cmap.Delete("key1") - - assert.NotEqual( - t, - len(keys), - len(cmap.Keys()), - "[]keys and []Keys() should not be equal, they are copies, one item was removed", - ) -} - -func TestContains(t *testing.T) { - cmap := NewCMap() - - cmap.Set("key1", "value1") - - // Test for known values - assert.True(t, cmap.Has("key1")) - assert.Equal(t, "value1", cmap.Get("key1")) - - // Test for unknown values - assert.False(t, cmap.Has("key2")) - assert.Nil(t, cmap.Get("key2")) -} - -func BenchmarkCMapHas(b *testing.B) { - m := NewCMap() - for i := 0; i < 1000; i++ { - m.Set(string(rune(i)), i) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - m.Has(string(rune(i))) - } -} - -func TestCMap_GetOrSet_Parallel(t *testing.T) { - - tests := []struct { - name string - newValue interface{} - parallelism int - }{ - {"test1", "a", 4}, - {"test2", "a", 40}, - {"test3", "a", 1}, - } - - //nolint:scopelint - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - cm := NewCMap() - - wg := sync.WaitGroup{} - wg.Add(tt.parallelism) - for i := 0; i < tt.parallelism; i++ { - go func() { - defer wg.Done() - gotValue, _ := cm.GetOrSet(tt.name, tt.newValue) - assert.EqualValues(t, tt.newValue, gotValue) - }() - } - wg.Wait() - }) - } -} - -func TestCMap_GetOrSet_Exists(t *testing.T) { - cm := NewCMap() - - gotValue, exists := cm.GetOrSet("key", 1000) - assert.False(t, exists) - assert.EqualValues(t, 1000, gotValue) - - gotValue, exists = cm.GetOrSet("key", 2000) - assert.True(t, exists) - assert.EqualValues(t, 1000, gotValue) -} diff --git a/libs/events/Makefile b/libs/events/Makefile deleted file mode 100644 index 39292f8196..0000000000 --- a/libs/events/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -.PHONY: docs -REPO:=github.com/dashevo/tenderdash/libs/events - -docs: - @go get github.com/davecheney/godoc2md - godoc2md $(REPO) > README.md - -test: - go test -v ./... diff --git a/libs/events/README.md b/libs/events/README.md deleted file mode 100644 index 9c4d7dc558..0000000000 --- a/libs/events/README.md +++ /dev/null @@ -1,193 +0,0 @@ - - -# events - -`import "github.com/tendermint/tendermint/libs/events"` - -* [Overview](#pkg-overview) -* [Index](#pkg-index) - -## Overview - -Pub-Sub in go with event caching - -## Index - -* [type EventCache](#EventCache) - * [func NewEventCache(evsw Fireable) *EventCache](#NewEventCache) - * [func (evc *EventCache) FireEvent(event string, data EventData)](#EventCache.FireEvent) - * [func (evc *EventCache) Flush()](#EventCache.Flush) -* [type EventCallback](#EventCallback) -* [type EventData](#EventData) -* [type EventSwitch](#EventSwitch) - * [func NewEventSwitch() EventSwitch](#NewEventSwitch) -* [type Eventable](#Eventable) -* [type Fireable](#Fireable) - - -### Package files - -[event_cache.go](/src/github.com/tendermint/tendermint/libs/events/event_cache.go) [events.go](/src/github.com/tendermint/tendermint/libs/events/events.go) - - - - - - -## Type [EventCache](/src/target/event_cache.go?s=116:179#L5) - -``` go -type EventCache struct { - // contains filtered or unexported fields -} -``` - -An EventCache buffers events for a Fireable -All events are cached. Filtering happens on Flush - - - - - - - -### func [NewEventCache](/src/target/event_cache.go?s=239:284#L11) - -``` go -func NewEventCache(evsw Fireable) *EventCache -``` - -Create a new EventCache with an EventSwitch as backend - - - - - -### func (\*EventCache) [FireEvent](/src/target/event_cache.go?s=449:511#L24) - -``` go -func (evc *EventCache) FireEvent(event string, data EventData) -``` - -Cache an event to be fired upon finality. - - - - -### func (\*EventCache) [Flush](/src/target/event_cache.go?s=735:765#L31) - -``` go -func (evc *EventCache) Flush() -``` - -Fire events by running evsw.FireEvent on all cached events. Blocks. -Clears cached events - - - - -## Type [EventCallback](/src/target/events.go?s=4201:4240#L185) - -``` go -type EventCallback func(data EventData) -``` - - - - - - - - - -## Type [EventData](/src/target/events.go?s=243:294#L14) - -``` go -type EventData interface { -} -``` - -Generic event data can be typed and registered with tendermint/go-amino -via concrete implementation of this interface - - - - - - - - - - -## Type [EventSwitch](/src/target/events.go?s=560:771#L29) - -``` go -type EventSwitch interface { - service.Service - Fireable - - AddListenerForEvent(listenerID, event string, cb EventCallback) - RemoveListenerForEvent(event string, listenerID string) - RemoveListener(listenerID string) -} -``` - - - - - - -### func [NewEventSwitch](/src/target/events.go?s=917:950#L46) - -``` go -func NewEventSwitch() EventSwitch -``` - - - - -## Type [Eventable](/src/target/events.go?s=378:440#L20) - -``` go -type Eventable interface { - SetEventSwitch(evsw EventSwitch) -} -``` - -reactors and other modules should export -this interface to become eventable - - - - - - - - - - -## Type [Fireable](/src/target/events.go?s=490:558#L25) - -``` go -type Fireable interface { - FireEvent(event string, data EventData) -} -``` - -an event switch or cache implements fireable - - - - - - - - - - - - - - -- - - -Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/libs/events/event_cache.go b/libs/events/event_cache.go deleted file mode 100644 index f508e873da..0000000000 --- a/libs/events/event_cache.go +++ /dev/null @@ -1,37 +0,0 @@ -package events - -// An EventCache buffers events for a Fireable -// All events are cached. Filtering happens on Flush -type EventCache struct { - evsw Fireable - events []eventInfo -} - -// Create a new EventCache with an EventSwitch as backend -func NewEventCache(evsw Fireable) *EventCache { - return &EventCache{ - evsw: evsw, - } -} - -// a cached event -type eventInfo struct { - event string - data EventData -} - -// Cache an event to be fired upon finality. -func (evc *EventCache) FireEvent(event string, data EventData) { - // append to list (go will grow our backing array exponentially) - evc.events = append(evc.events, eventInfo{event, data}) -} - -// Fire events by running evsw.FireEvent on all cached events. Blocks. -// Clears cached events -func (evc *EventCache) Flush() { - for _, ei := range evc.events { - evc.evsw.FireEvent(ei.event, ei.data) - } - // Clear the buffer, since we only add to it with append it's safe to just set it to nil and maybe safe an allocation - evc.events = nil -} diff --git a/libs/events/event_cache_test.go b/libs/events/event_cache_test.go deleted file mode 100644 index d6199bc800..0000000000 --- a/libs/events/event_cache_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package events - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestEventCache_Flush(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - - err = evsw.AddListenerForEvent("nothingness", "", func(data EventData) { - // Check we are not initializing an empty buffer full of zeroed eventInfos in the EventCache - require.FailNow(t, "We should never receive a message on this switch since none are fired") - }) - require.NoError(t, err) - - evc := NewEventCache(evsw) - evc.Flush() - // Check after reset - evc.Flush() - fail := true - pass := false - err = evsw.AddListenerForEvent("somethingness", "something", func(data EventData) { - if fail { - require.FailNow(t, "Shouldn't see a message until flushed") - } - pass = true - }) - require.NoError(t, err) - - evc.FireEvent("something", struct{ int }{1}) - evc.FireEvent("something", struct{ int }{2}) - evc.FireEvent("something", struct{ int }{3}) - fail = false - evc.Flush() - assert.True(t, pass) -} diff --git a/libs/events/events.go b/libs/events/events.go index 146a9cfa7c..1b8db09c43 100644 --- a/libs/events/events.go +++ b/libs/events/events.go @@ -2,22 +2,9 @@ package events import ( - "fmt" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/service" + "sync" ) -// ErrListenerWasRemoved is returned by AddEvent if the listener was removed. -type ErrListenerWasRemoved struct { - listenerID string -} - -// Error implements the error interface. -func (e ErrListenerWasRemoved) Error() string { - return fmt.Sprintf("listener #%s was removed", e.listenerID) -} - // EventData is a generic event data can be typed and registered with // tendermint/go-amino via concrete implementation of this interface. type EventData interface{} @@ -43,37 +30,22 @@ type Fireable interface { // They can be removed by calling either RemoveListenerForEvent or // RemoveListener (for all events). type EventSwitch interface { - service.Service Fireable - AddListenerForEvent(listenerID, eventValue string, cb EventCallback) error - RemoveListenerForEvent(event string, listenerID string) - RemoveListener(listenerID string) } type eventSwitch struct { - service.BaseService - - mtx tmsync.RWMutex + mtx sync.RWMutex eventCells map[string]*eventCell - listeners map[string]*eventListener } func NewEventSwitch() EventSwitch { evsw := &eventSwitch{ eventCells: make(map[string]*eventCell), - listeners: make(map[string]*eventListener), } - evsw.BaseService = *service.NewBaseService(nil, "EventSwitch", evsw) return evsw } -func (evsw *eventSwitch) OnStart() error { - return nil -} - -func (evsw *eventSwitch) OnStop() {} - func (evsw *eventSwitch) AddListenerForEvent(listenerID, eventValue string, cb EventCallback) error { // Get/Create eventCell and listener. evsw.mtx.Lock() @@ -83,69 +55,12 @@ func (evsw *eventSwitch) AddListenerForEvent(listenerID, eventValue string, cb E eventCell = newEventCell() evsw.eventCells[eventValue] = eventCell } - - listener := evsw.listeners[listenerID] - if listener == nil { - listener = newEventListener(listenerID) - evsw.listeners[listenerID] = listener - } - evsw.mtx.Unlock() - if err := listener.AddEvent(eventValue); err != nil { - return err - } - - eventCell.AddListener(listenerID, cb) + eventCell.addListener(listenerID, cb) return nil } -func (evsw *eventSwitch) RemoveListener(listenerID string) { - // Get and remove listener. - evsw.mtx.RLock() - listener := evsw.listeners[listenerID] - evsw.mtx.RUnlock() - if listener == nil { - return - } - - evsw.mtx.Lock() - delete(evsw.listeners, listenerID) - evsw.mtx.Unlock() - - // Remove callback for each event. - listener.SetRemoved() - for _, event := range listener.GetEvents() { - evsw.RemoveListenerForEvent(event, listenerID) - } -} - -func (evsw *eventSwitch) RemoveListenerForEvent(event string, listenerID string) { - // Get eventCell - evsw.mtx.Lock() - eventCell := evsw.eventCells[event] - evsw.mtx.Unlock() - - if eventCell == nil { - return - } - - // Remove listenerID from eventCell - numListeners := eventCell.RemoveListener(listenerID) - - // Maybe garbage collect eventCell. - if numListeners == 0 { - // Lock again and double check. - evsw.mtx.Lock() // OUTER LOCK - eventCell.mtx.Lock() // INNER LOCK - if len(eventCell.listeners) == 0 { - delete(evsw.eventCells, event) - } - eventCell.mtx.Unlock() // INNER LOCK - evsw.mtx.Unlock() // OUTER LOCK - } -} - func (evsw *eventSwitch) FireEvent(event string, data EventData) { // Get the eventCell evsw.mtx.RLock() @@ -157,14 +72,16 @@ func (evsw *eventSwitch) FireEvent(event string, data EventData) { } // Fire event for all listeners in eventCell - eventCell.FireEvent(data) + eventCell.fireEvent(data) } //----------------------------------------------------------------------------- +type EventCallback func(data EventData) error + // eventCell handles keeping track of listener callbacks for a given event. type eventCell struct { - mtx tmsync.RWMutex + mtx sync.RWMutex listeners map[string]EventCallback } @@ -174,21 +91,13 @@ func newEventCell() *eventCell { } } -func (cell *eventCell) AddListener(listenerID string, cb EventCallback) { +func (cell *eventCell) addListener(listenerID string, cb EventCallback) { cell.mtx.Lock() + defer cell.mtx.Unlock() cell.listeners[listenerID] = cb - cell.mtx.Unlock() -} - -func (cell *eventCell) RemoveListener(listenerID string) int { - cell.mtx.Lock() - delete(cell.listeners, listenerID) - numListeners := len(cell.listeners) - cell.mtx.Unlock() - return numListeners } -func (cell *eventCell) FireEvent(data EventData) { +func (cell *eventCell) fireEvent(data EventData) { cell.mtx.RLock() eventCallbacks := make([]EventCallback, 0, len(cell.listeners)) for _, cb := range cell.listeners { @@ -197,53 +106,9 @@ func (cell *eventCell) FireEvent(data EventData) { cell.mtx.RUnlock() for _, cb := range eventCallbacks { - cb(data) - } -} - -//----------------------------------------------------------------------------- - -type EventCallback func(data EventData) - -type eventListener struct { - id string - - mtx tmsync.RWMutex - removed bool - events []string -} - -func newEventListener(id string) *eventListener { - return &eventListener{ - id: id, - removed: false, - events: nil, - } -} - -func (evl *eventListener) AddEvent(event string) error { - evl.mtx.Lock() - - if evl.removed { - evl.mtx.Unlock() - return ErrListenerWasRemoved{listenerID: evl.id} + if err := cb(data); err != nil { + // should we log or abort here? + continue + } } - - evl.events = append(evl.events, event) - evl.mtx.Unlock() - return nil -} - -func (evl *eventListener) GetEvents() []string { - evl.mtx.RLock() - events := make([]string, len(evl.events)) - copy(events, evl.events) - evl.mtx.RUnlock() - return events -} - -func (evl *eventListener) SetRemoved() { - evl.mtx.Lock() - evl.removed = true - evl.mtx.Unlock() } diff --git a/libs/events/events_test.go b/libs/events/events_test.go index 9e21e02351..17f8c56d1e 100644 --- a/libs/events/events_test.go +++ b/libs/events/events_test.go @@ -1,36 +1,34 @@ package events import ( + "context" "fmt" + "math/rand" "testing" "time" - "github.com/stretchr/testify/assert" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/libs/rand" ) // TestAddListenerForEventFireOnce sets up an EventSwitch, subscribes a single // listener to an event, and sends a string "data". func TestAddListenerForEventFireOnce(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) messages := make(chan EventData) - err = evsw.AddListenerForEvent("listener", "event", - func(data EventData) { - // test there's no deadlock if we remove the listener inside a callback - evsw.RemoveListener("listener") - messages <- data - }) - require.NoError(t, err) + require.NoError(t, evsw.AddListenerForEvent("listener", "event", + func(data EventData) error { + select { + case messages <- data: + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) go evsw.FireEvent("event", "data") received := <-messages if received != "data" { @@ -41,28 +39,28 @@ func TestAddListenerForEventFireOnce(t *testing.T) { // TestAddListenerForEventFireMany sets up an EventSwitch, subscribes a single // listener to an event, and sends a thousand integers. func TestAddListenerForEventFireMany(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) doneSum := make(chan uint64) doneSending := make(chan uint64) numbers := make(chan uint64, 4) // subscribe one listener for one event - err = evsw.AddListenerForEvent("listener", "event", - func(data EventData) { - numbers <- data.(uint64) - }) - require.NoError(t, err) + require.NoError(t, evsw.AddListenerForEvent("listener", "event", + func(data EventData) error { + select { + case numbers <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) // collect received events go sumReceivedNumbers(numbers, doneSum) // go fire events - go fireEvents(evsw, "event", doneSending, uint64(1)) + go fireEvents(ctx, evsw, "event", doneSending, uint64(1)) checkSum := <-doneSending close(numbers) eventSum := <-doneSum @@ -75,14 +73,12 @@ func TestAddListenerForEventFireMany(t *testing.T) { // listener to three different events and sends a thousand integers for each // of the three events. func TestAddListenerForDifferentEvents(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + t.Cleanup(leaktest.Check(t)) + evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) doneSum := make(chan uint64) doneSending1 := make(chan uint64) @@ -90,27 +86,39 @@ func TestAddListenerForDifferentEvents(t *testing.T) { doneSending3 := make(chan uint64) numbers := make(chan uint64, 4) // subscribe one listener to three events - err = evsw.AddListenerForEvent("listener", "event1", - func(data EventData) { - numbers <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener", "event2", - func(data EventData) { - numbers <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener", "event3", - func(data EventData) { - numbers <- data.(uint64) - }) - require.NoError(t, err) + require.NoError(t, evsw.AddListenerForEvent("listener", "event1", + func(data EventData) error { + select { + case numbers <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener", "event2", + func(data EventData) error { + select { + case numbers <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener", "event3", + func(data EventData) error { + select { + case numbers <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) // collect received events go sumReceivedNumbers(numbers, doneSum) // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) - go fireEvents(evsw, "event2", doneSending2, uint64(1)) - go fireEvents(evsw, "event3", doneSending3, uint64(1)) + go fireEvents(ctx, evsw, "event1", doneSending1, uint64(1)) + go fireEvents(ctx, evsw, "event2", doneSending2, uint64(1)) + go fireEvents(ctx, evsw, "event3", doneSending3, uint64(1)) var checkSum uint64 checkSum += <-doneSending1 checkSum += <-doneSending2 @@ -127,15 +135,12 @@ func TestAddListenerForDifferentEvents(t *testing.T) { // listener to two of those three events, and then sends a thousand integers // for each of the three events. func TestAddDifferentListenerForDifferentEvents(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(leaktest.Check(t)) + + evsw := NewEventSwitch() doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) @@ -145,39 +150,59 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) { numbers1 := make(chan uint64, 4) numbers2 := make(chan uint64, 4) // subscribe two listener to three events - err = evsw.AddListenerForEvent("listener1", "event1", - func(data EventData) { - numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener1", "event2", - func(data EventData) { - numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener1", "event3", - func(data EventData) { - numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event2", - func(data EventData) { - numbers2 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event3", - func(data EventData) { - numbers2 <- data.(uint64) - }) - require.NoError(t, err) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event1", + func(data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event2", + func(data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event3", + func(data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event2", + func(data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event3", + func(data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) // collect received events for listener1 go sumReceivedNumbers(numbers1, doneSum1) // collect received events for listener2 go sumReceivedNumbers(numbers2, doneSum2) // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) - go fireEvents(evsw, "event2", doneSending2, uint64(1001)) - go fireEvents(evsw, "event3", doneSending3, uint64(2001)) + go fireEvents(ctx, evsw, "event1", doneSending1, uint64(1)) + go fireEvents(ctx, evsw, "event2", doneSending2, uint64(1001)) + go fireEvents(ctx, evsw, "event3", doneSending3, uint64(2001)) checkSumEvent1 := <-doneSending1 checkSumEvent2 := <-doneSending2 checkSumEvent3 := <-doneSending3 @@ -193,164 +218,7 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) { } } -func TestAddAndRemoveListenerConcurrency(t *testing.T) { - var ( - stopInputEvent = false - roundCount = 2000 - ) - - evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) - - done1 := make(chan struct{}) - done2 := make(chan struct{}) - - // Must be executed concurrently to uncover the data race. - // 1. RemoveListener - go func() { - defer close(done1) - for i := 0; i < roundCount; i++ { - evsw.RemoveListener("listener") - } - }() - - // 2. AddListenerForEvent - go func() { - defer close(done2) - for i := 0; i < roundCount; i++ { - index := i - // we explicitly ignore errors here, since the listener will sometimes be removed - // (that's what we're testing) - _ = evsw.AddListenerForEvent("listener", fmt.Sprintf("event%d", index), - func(data EventData) { - t.Errorf("should not run callback for %d.\n", index) - stopInputEvent = true - }) - } - }() - - <-done1 - <-done2 - - evsw.RemoveListener("listener") // remove the last listener - - for i := 0; i < roundCount && !stopInputEvent; i++ { - evsw.FireEvent(fmt.Sprintf("event%d", i), uint64(1001)) - } -} - -// TestAddAndRemoveListener sets up an EventSwitch, subscribes a listener to -// two events, fires a thousand integers for the first event, then unsubscribes -// the listener and fires a thousand integers for the second event. -func TestAddAndRemoveListener(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) - - doneSum1 := make(chan uint64) - doneSum2 := make(chan uint64) - doneSending1 := make(chan uint64) - doneSending2 := make(chan uint64) - numbers1 := make(chan uint64, 4) - numbers2 := make(chan uint64, 4) - // subscribe two listener to three events - err = evsw.AddListenerForEvent("listener", "event1", - func(data EventData) { - numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener", "event2", - func(data EventData) { - numbers2 <- data.(uint64) - }) - require.NoError(t, err) - // collect received events for event1 - go sumReceivedNumbers(numbers1, doneSum1) - // collect received events for event2 - go sumReceivedNumbers(numbers2, doneSum2) - // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) - checkSumEvent1 := <-doneSending1 - // after sending all event1, unsubscribe for all events - evsw.RemoveListener("listener") - go fireEvents(evsw, "event2", doneSending2, uint64(1001)) - checkSumEvent2 := <-doneSending2 - close(numbers1) - close(numbers2) - eventSum1 := <-doneSum1 - eventSum2 := <-doneSum2 - if checkSumEvent1 != eventSum1 || - // correct value asserted by preceding tests, suffices to be non-zero - checkSumEvent2 == uint64(0) || - eventSum2 != uint64(0) { - t.Errorf("not all messages sent were received or unsubscription did not register.\n") - } -} - -// TestRemoveListener does basic tests on adding and removing -func TestRemoveListener(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) - - count := 10 - sum1, sum2 := 0, 0 - // add some listeners and make sure they work - err = evsw.AddListenerForEvent("listener", "event1", - func(data EventData) { - sum1++ - }) - require.NoError(t, err) - - err = evsw.AddListenerForEvent("listener", "event2", - func(data EventData) { - sum2++ - }) - require.NoError(t, err) - - for i := 0; i < count; i++ { - evsw.FireEvent("event1", true) - evsw.FireEvent("event2", true) - } - assert.Equal(t, count, sum1) - assert.Equal(t, count, sum2) - - // remove one by event and make sure it is gone - evsw.RemoveListenerForEvent("event2", "listener") - for i := 0; i < count; i++ { - evsw.FireEvent("event1", true) - evsw.FireEvent("event2", true) - } - assert.Equal(t, count*2, sum1) - assert.Equal(t, count, sum2) - - // remove the listener entirely and make sure both gone - evsw.RemoveListener("listener") - for i := 0; i < count; i++ { - evsw.FireEvent("event1", true) - evsw.FireEvent("event2", true) - } - assert.Equal(t, count*2, sum1) - assert.Equal(t, count, sum2) -} - -// TestAddAndRemoveListenersAsync sets up an EventSwitch, subscribes two +// TestManagerLiistenersAsync sets up an EventSwitch, subscribes two // listeners to three events, and fires a thousand integers for each event. // These two listeners serve as the baseline validation while other listeners // are randomly subscribed and unsubscribed. @@ -360,15 +228,11 @@ func TestRemoveListener(t *testing.T) { // at that point subscribed to. // NOTE: it is important to run this test with race conditions tracking on, // `go test -race`, to examine for possible race conditions. -func TestRemoveListenersAsync(t *testing.T) { +func TestManageListenersAsync(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + evsw := NewEventSwitch() - err := evsw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := evsw.Stop(); err != nil { - t.Error(err) - } - }) doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) @@ -378,65 +242,80 @@ func TestRemoveListenersAsync(t *testing.T) { numbers1 := make(chan uint64, 4) numbers2 := make(chan uint64, 4) // subscribe two listener to three events - err = evsw.AddListenerForEvent("listener1", "event1", - func(data EventData) { - numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener1", "event2", - func(data EventData) { - numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener1", "event3", - func(data EventData) { - numbers1 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event1", - func(data EventData) { - numbers2 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event2", - func(data EventData) { - numbers2 <- data.(uint64) - }) - require.NoError(t, err) - err = evsw.AddListenerForEvent("listener2", "event3", - func(data EventData) { - numbers2 <- data.(uint64) - }) - require.NoError(t, err) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event1", + func(data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event2", + func(data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener1", "event3", + func(data EventData) error { + select { + case numbers1 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event1", + func(data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event2", + func(data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) + require.NoError(t, evsw.AddListenerForEvent("listener2", "event3", + func(data EventData) error { + select { + case numbers2 <- data.(uint64): + return nil + case <-ctx.Done(): + return ctx.Err() + } + })) // collect received events for event1 go sumReceivedNumbers(numbers1, doneSum1) // collect received events for event2 go sumReceivedNumbers(numbers2, doneSum2) addListenersStress := func() { - r1 := rand.NewRand() + r1 := rand.New(rand.NewSource(time.Now().Unix())) r1.Seed(time.Now().UnixNano()) for k := uint16(0); k < 400; k++ { listenerNumber := r1.Intn(100) + 3 eventNumber := r1.Intn(3) + 1 go evsw.AddListenerForEvent(fmt.Sprintf("listener%v", listenerNumber), //nolint:errcheck // ignore for tests fmt.Sprintf("event%v", eventNumber), - func(_ EventData) {}) - } - } - removeListenersStress := func() { - r2 := rand.NewRand() - r2.Seed(time.Now().UnixNano()) - for k := uint16(0); k < 80; k++ { - listenerNumber := r2.Intn(100) + 3 - go evsw.RemoveListener(fmt.Sprintf("listener%v", listenerNumber)) + func(EventData) error { return nil }) } } addListenersStress() // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) - removeListenersStress() - go fireEvents(evsw, "event2", doneSending2, uint64(1001)) - go fireEvents(evsw, "event3", doneSending3, uint64(2001)) + go fireEvents(ctx, evsw, "event1", doneSending1, uint64(1)) + go fireEvents(ctx, evsw, "event2", doneSending2, uint64(1001)) + go fireEvents(ctx, evsw, "event3", doneSending3, uint64(2001)) checkSumEvent1 := <-doneSending1 checkSumEvent2 := <-doneSending2 checkSumEvent3 := <-doneSending3 @@ -475,13 +354,21 @@ func sumReceivedNumbers(numbers, doneSum chan uint64) { // to `offset` + 999. It additionally returns the addition of all integers // sent on `doneChan` for assertion that all events have been sent, and enabling // the test to assert all events have also been received. -func fireEvents(evsw Fireable, event string, doneChan chan uint64, - offset uint64) { +func fireEvents(ctx context.Context, evsw Fireable, event string, doneChan chan uint64, offset uint64) { + defer close(doneChan) + var sentSum uint64 for i := offset; i <= offset+uint64(999); i++ { - sentSum += i + if ctx.Err() != nil { + break + } + evsw.FireEvent(event, i) + sentSum += i + } + + select { + case <-ctx.Done(): + case doneChan <- sentSum: } - doneChan <- sentSum - close(doneChan) } diff --git a/libs/json/decoder.go b/libs/json/decoder.go deleted file mode 100644 index 86ff27d393..0000000000 --- a/libs/json/decoder.go +++ /dev/null @@ -1,278 +0,0 @@ -package json - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "reflect" -) - -// Unmarshal unmarshals JSON into the given value, using Amino-compatible JSON encoding (strings -// for 64-bit numbers, and type wrappers for registered types). -func Unmarshal(bz []byte, v interface{}) error { - return decode(bz, v) -} - -func decode(bz []byte, v interface{}) error { - if len(bz) == 0 { - return errors.New("cannot decode empty bytes") - } - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr { - return errors.New("must decode into a pointer") - } - rv = rv.Elem() - - // If this is a registered type, defer to interface decoder regardless of whether the input is - // an interface or a bare value. This retains Amino's behavior, but is inconsistent with - // behavior in structs where an interface field will get the type wrapper while a bare value - // field will not. - if typeRegistry.name(rv.Type()) != "" { - return decodeReflectInterface(bz, rv) - } - - return decodeReflect(bz, rv) -} - -func decodeReflect(bz []byte, rv reflect.Value) error { - if !rv.CanAddr() { - return errors.New("value is not addressable") - } - - // Handle null for slices, interfaces, and pointers - if bytes.Equal(bz, []byte("null")) { - rv.Set(reflect.Zero(rv.Type())) - return nil - } - - // Dereference-and-construct pointers, to handle nested pointers. - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - - // Times must be UTC and end with Z - if rv.Type() == timeType { - switch { - case len(bz) < 2 || bz[0] != '"' || bz[len(bz)-1] != '"': - return fmt.Errorf("JSON time must be an RFC3339 string, but got %q", bz) - case bz[len(bz)-2] != 'Z': - return fmt.Errorf("JSON time must be UTC and end with 'Z', but got %q", bz) - } - } - - // If value implements json.Umarshaler, call it. - if rv.Addr().Type().Implements(jsonUnmarshalerType) { - return rv.Addr().Interface().(json.Unmarshaler).UnmarshalJSON(bz) - } - - switch rv.Type().Kind() { - // Decode complex types recursively. - case reflect.Slice, reflect.Array: - return decodeReflectList(bz, rv) - - case reflect.Map: - return decodeReflectMap(bz, rv) - - case reflect.Struct: - return decodeReflectStruct(bz, rv) - - case reflect.Interface: - return decodeReflectInterface(bz, rv) - - // For 64-bit integers, unwrap expected string and defer to stdlib for integer decoding. - case reflect.Int64, reflect.Int, reflect.Uint64, reflect.Uint: - if bz[0] != '"' || bz[len(bz)-1] != '"' { - return fmt.Errorf("invalid 64-bit integer encoding %q, expected string", string(bz)) - } - bz = bz[1 : len(bz)-1] - fallthrough - - // Anything else we defer to the stdlib. - default: - return decodeStdlib(bz, rv) - } -} - -func decodeReflectList(bz []byte, rv reflect.Value) error { - if !rv.CanAddr() { - return errors.New("list value is not addressable") - } - - switch rv.Type().Elem().Kind() { - // Decode base64-encoded bytes using stdlib decoder, via byte slice for arrays. - case reflect.Uint8: - if rv.Type().Kind() == reflect.Array { - var buf []byte - if err := json.Unmarshal(bz, &buf); err != nil { - return err - } - if len(buf) != rv.Len() { - return fmt.Errorf("got %v bytes, expected %v", len(buf), rv.Len()) - } - reflect.Copy(rv, reflect.ValueOf(buf)) - - } else if err := decodeStdlib(bz, rv); err != nil { - return err - } - - // Decode anything else into a raw JSON slice, and decode values recursively. - default: - var rawSlice []json.RawMessage - if err := json.Unmarshal(bz, &rawSlice); err != nil { - return err - } - if rv.Type().Kind() == reflect.Slice { - rv.Set(reflect.MakeSlice(reflect.SliceOf(rv.Type().Elem()), len(rawSlice), len(rawSlice))) - } - if rv.Len() != len(rawSlice) { // arrays of wrong size - return fmt.Errorf("got list of %v elements, expected %v", len(rawSlice), rv.Len()) - } - for i, bz := range rawSlice { - if err := decodeReflect(bz, rv.Index(i)); err != nil { - return err - } - } - } - - // Replace empty slices with nil slices, for Amino compatibility - if rv.Type().Kind() == reflect.Slice && rv.Len() == 0 { - rv.Set(reflect.Zero(rv.Type())) - } - - return nil -} - -func decodeReflectMap(bz []byte, rv reflect.Value) error { - if !rv.CanAddr() { - return errors.New("map value is not addressable") - } - - // Decode into a raw JSON map, using string keys. - rawMap := make(map[string]json.RawMessage) - if err := json.Unmarshal(bz, &rawMap); err != nil { - return err - } - if rv.Type().Key().Kind() != reflect.String { - return fmt.Errorf("map keys must be strings, got %v", rv.Type().Key().String()) - } - - // Recursively decode values. - rv.Set(reflect.MakeMapWithSize(rv.Type(), len(rawMap))) - for key, bz := range rawMap { - value := reflect.New(rv.Type().Elem()).Elem() - if err := decodeReflect(bz, value); err != nil { - return err - } - rv.SetMapIndex(reflect.ValueOf(key), value) - } - return nil -} - -func decodeReflectStruct(bz []byte, rv reflect.Value) error { - if !rv.CanAddr() { - return errors.New("struct value is not addressable") - } - sInfo := makeStructInfo(rv.Type()) - - // Decode raw JSON values into a string-keyed map. - rawMap := make(map[string]json.RawMessage) - if err := json.Unmarshal(bz, &rawMap); err != nil { - return err - } - for i, fInfo := range sInfo.fields { - if !fInfo.hidden { - frv := rv.Field(i) - bz := rawMap[fInfo.jsonName] - if len(bz) > 0 { - if err := decodeReflect(bz, frv); err != nil { - return err - } - } else if !fInfo.omitEmpty { - frv.Set(reflect.Zero(frv.Type())) - } - } - } - - return nil -} - -func decodeReflectInterface(bz []byte, rv reflect.Value) error { - if !rv.CanAddr() { - return errors.New("interface value not addressable") - } - - // Decode the interface wrapper. - wrapper := interfaceWrapper{} - if err := json.Unmarshal(bz, &wrapper); err != nil { - return err - } - if wrapper.Type == "" { - return errors.New("interface type cannot be empty") - } - if len(wrapper.Value) == 0 { - return errors.New("interface value cannot be empty") - } - - // Dereference-and-construct pointers, to handle nested pointers. - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - rv.Set(reflect.New(rv.Type().Elem())) - } - rv = rv.Elem() - } - - // Look up the interface type, and construct a concrete value. - rt, returnPtr := typeRegistry.lookup(wrapper.Type) - if rt == nil { - return fmt.Errorf("unknown type %q", wrapper.Type) - } - - cptr := reflect.New(rt) - crv := cptr.Elem() - if err := decodeReflect(wrapper.Value, crv); err != nil { - return err - } - - // This makes sure interface implementations with pointer receivers (e.g. func (c *Car)) are - // constructed as pointers behind the interface. The types must be registered as pointers with - // RegisterType(). - if rv.Type().Kind() == reflect.Interface && returnPtr { - if !cptr.Type().AssignableTo(rv.Type()) { - return fmt.Errorf("invalid type %q for this value", wrapper.Type) - } - rv.Set(cptr) - } else { - if !crv.Type().AssignableTo(rv.Type()) { - return fmt.Errorf("invalid type %q for this value", wrapper.Type) - } - rv.Set(crv) - } - return nil -} - -func decodeStdlib(bz []byte, rv reflect.Value) error { - if !rv.CanAddr() && rv.Kind() != reflect.Ptr { - return errors.New("value must be addressable or pointer") - } - - // Make sure we are unmarshaling into a pointer. - target := rv - if rv.Kind() != reflect.Ptr { - target = reflect.New(rv.Type()) - } - if err := json.Unmarshal(bz, target.Interface()); err != nil { - return err - } - rv.Set(target.Elem()) - return nil -} - -type interfaceWrapper struct { - Type string `json:"type"` - Value json.RawMessage `json:"value"` -} diff --git a/libs/json/decoder_test.go b/libs/json/decoder_test.go deleted file mode 100644 index 41faa10627..0000000000 --- a/libs/json/decoder_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package json_test - -import ( - "reflect" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/libs/json" -) - -func TestUnmarshal(t *testing.T) { - i64Nil := (*int64)(nil) - str := "string" - strPtr := &str - structNil := (*Struct)(nil) - i32 := int32(32) - i64 := int64(64) - - testcases := map[string]struct { - json string - value interface{} - err bool - }{ - "bool true": {"true", true, false}, - "bool false": {"false", false, false}, - "float32": {"3.14", float32(3.14), false}, - "float64": {"3.14", float64(3.14), false}, - "int32": {`32`, int32(32), false}, - "int32 string": {`"32"`, int32(32), true}, - "int32 ptr": {`32`, &i32, false}, - "int64": {`"64"`, int64(64), false}, - "int64 noend": {`"64`, int64(64), true}, - "int64 number": {`64`, int64(64), true}, - "int64 ptr": {`"64"`, &i64, false}, - "int64 ptr nil": {`null`, i64Nil, false}, - "string": {`"foo"`, "foo", false}, - "string noend": {`"foo`, "foo", true}, - "string ptr": {`"string"`, &str, false}, - "slice byte": {`"AQID"`, []byte{1, 2, 3}, false}, - "slice bytes": {`["AQID"]`, [][]byte{{1, 2, 3}}, false}, - "slice int32": {`[1,2,3]`, []int32{1, 2, 3}, false}, - "slice int64": {`["1","2","3"]`, []int64{1, 2, 3}, false}, - "slice int64 number": {`[1,2,3]`, []int64{1, 2, 3}, true}, - "slice int64 ptr": {`["64"]`, []*int64{&i64}, false}, - "slice int64 empty": {`[]`, []int64(nil), false}, - "slice int64 null": {`null`, []int64(nil), false}, - "array byte": {`"AQID"`, [3]byte{1, 2, 3}, false}, - "array byte large": {`"AQID"`, [4]byte{1, 2, 3, 4}, true}, - "array byte small": {`"AQID"`, [2]byte{1, 2}, true}, - "array int32": {`[1,2,3]`, [3]int32{1, 2, 3}, false}, - "array int64": {`["1","2","3"]`, [3]int64{1, 2, 3}, false}, - "array int64 number": {`[1,2,3]`, [3]int64{1, 2, 3}, true}, - "array int64 large": {`["1","2","3"]`, [4]int64{1, 2, 3, 4}, true}, - "array int64 small": {`["1","2","3"]`, [2]int64{1, 2}, true}, - "map bytes": {`{"b":"AQID"}`, map[string][]byte{"b": {1, 2, 3}}, false}, - "map int32": {`{"a":1,"b":2}`, map[string]int32{"a": 1, "b": 2}, false}, - "map int64": {`{"a":"1","b":"2"}`, map[string]int64{"a": 1, "b": 2}, false}, - "map int64 empty": {`{}`, map[string]int64{}, false}, - "map int64 null": {`null`, map[string]int64(nil), false}, - "map int key": {`{}`, map[int]int{}, true}, - "time": {`"2020-06-03T17:35:30Z"`, time.Date(2020, 6, 3, 17, 35, 30, 0, time.UTC), false}, - "time non-utc": {`"2020-06-03T17:35:30+02:00"`, time.Time{}, true}, - "time nozone": {`"2020-06-03T17:35:30"`, time.Time{}, true}, - "car": {`{"type":"vehicle/car","value":{"Wheels":4}}`, Car{Wheels: 4}, false}, - "car ptr": {`{"type":"vehicle/car","value":{"Wheels":4}}`, &Car{Wheels: 4}, false}, - "car iface": {`{"type":"vehicle/car","value":{"Wheels":4}}`, Vehicle(&Car{Wheels: 4}), false}, - "boat": {`{"type":"vehicle/boat","value":{"Sail":true}}`, Boat{Sail: true}, false}, - "boat ptr": {`{"type":"vehicle/boat","value":{"Sail":true}}`, &Boat{Sail: true}, false}, - "boat iface": {`{"type":"vehicle/boat","value":{"Sail":true}}`, Vehicle(Boat{Sail: true}), false}, - "boat into car": {`{"type":"vehicle/boat","value":{"Sail":true}}`, Car{}, true}, - "boat into car iface": {`{"type":"vehicle/boat","value":{"Sail":true}}`, Vehicle(&Car{}), true}, - "shoes": {`{"type":"vehicle/shoes","value":{"Soles":"rubber"}}`, Car{}, true}, - "shoes ptr": {`{"type":"vehicle/shoes","value":{"Soles":"rubber"}}`, &Car{}, true}, - "shoes iface": {`{"type":"vehicle/shoes","value":{"Soles":"rubbes"}}`, Vehicle(&Car{}), true}, - "key public": {`{"type":"key/public","value":"AQIDBAUGBwg="}`, PublicKey{1, 2, 3, 4, 5, 6, 7, 8}, false}, - "key wrong": {`{"type":"key/public","value":"AQIDBAUGBwg="}`, PrivateKey{1, 2, 3, 4, 5, 6, 7, 8}, true}, - "key into car": {`{"type":"key/public","value":"AQIDBAUGBwg="}`, Vehicle(&Car{}), true}, - "tags": { - `{"name":"name","OmitEmpty":"foo","Hidden":"bar","tags":{"name":"child"}}`, - Tags{JSONName: "name", OmitEmpty: "foo", Tags: &Tags{JSONName: "child"}}, - false, - }, - "tags ptr": { - `{"name":"name","OmitEmpty":"foo","tags":null}`, - &Tags{JSONName: "name", OmitEmpty: "foo"}, - false, - }, - "tags real name": {`{"JSONName":"name"}`, Tags{}, false}, - "struct": { - `{ - "Bool":true, "Float64":3.14, "Int32":32, "Int64":"64", "Int64Ptr":"64", - "String":"foo", "StringPtrPtr": "string", "Bytes":"AQID", - "Time":"2020-06-02T16:05:13.004346374Z", - "Car":{"Wheels":4}, - "Boat":{"Sail":true}, - "Vehicles":[ - {"type":"vehicle/car","value":{"Wheels":4}}, - {"type":"vehicle/boat","value":{"Sail":true}} - ], - "Child":{ - "Bool":false, "Float64":0, "Int32":0, "Int64":"0", "Int64Ptr":null, - "String":"child", "StringPtrPtr":null, "Bytes":null, - "Time":"0001-01-01T00:00:00Z", - "Car":null, "Boat":{"Sail":false}, "Vehicles":null, "Child":null - }, - "private": "foo", "unknown": "bar" - }`, - Struct{ - Bool: true, Float64: 3.14, Int32: 32, Int64: 64, Int64Ptr: &i64, - String: "foo", StringPtrPtr: &strPtr, Bytes: []byte{1, 2, 3}, - Time: time.Date(2020, 6, 2, 16, 5, 13, 4346374, time.UTC), - Car: &Car{Wheels: 4}, Boat: Boat{Sail: true}, Vehicles: []Vehicle{ - Vehicle(&Car{Wheels: 4}), - Vehicle(Boat{Sail: true}), - }, - Child: &Struct{Bool: false, String: "child"}, - }, - false, - }, - "struct key into vehicle": {`{"Vehicles":[ - {"type":"vehicle/car","value":{"Wheels":4}}, - {"type":"key/public","value":"MTIzNDU2Nzg="} - ]}`, Struct{}, true}, - "struct ptr null": {`null`, structNil, false}, - "custom value": {`{"Value":"foo"}`, CustomValue{}, false}, - "custom ptr": {`"foo"`, &CustomPtr{Value: "custom"}, false}, - "custom ptr value": {`"foo"`, CustomPtr{Value: "custom"}, false}, - "invalid type": {`"foo"`, Struct{}, true}, - } - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - // Create a target variable as a pointer to the zero value of the tc.value type, - // and wrap it in an empty interface. Decode into that interface. - target := reflect.New(reflect.TypeOf(tc.value)).Interface() - err := json.Unmarshal([]byte(tc.json), target) - if tc.err { - require.Error(t, err) - return - } - require.NoError(t, err) - - // Unwrap the target pointer and get the value behind the interface. - actual := reflect.ValueOf(target).Elem().Interface() - assert.Equal(t, tc.value, actual) - }) - } -} diff --git a/libs/json/doc.go b/libs/json/doc.go deleted file mode 100644 index d5ef4047f3..0000000000 --- a/libs/json/doc.go +++ /dev/null @@ -1,99 +0,0 @@ -// Package json provides functions for marshaling and unmarshaling JSON in a format that is -// backwards-compatible with Amino JSON encoding. This mostly differs from encoding/json in -// encoding of integers (64-bit integers are encoded as strings, not numbers), and handling -// of interfaces (wrapped in an interface object with type/value keys). -// -// JSON tags (e.g. `json:"name,omitempty"`) are supported in the same way as encoding/json, as is -// custom marshaling overrides via the json.Marshaler and json.Unmarshaler interfaces. -// -// Note that not all JSON emitted by Tendermint is generated by this library; some is generated by -// encoding/json instead, and kept like that for backwards compatibility. -// -// Encoding of numbers uses strings for 64-bit integers (including unspecified ints), to improve -// compatibility with e.g. Javascript (which uses 64-bit floats for numbers, having 53-bit -// precision): -// -// int32(32) // Output: 32 -// uint32(32) // Output: 32 -// int64(64) // Output: "64" -// uint64(64) // Output: "64" -// int(64) // Output: "64" -// uint(64) // Output: "64" -// -// Encoding of other scalars follows encoding/json: -// -// nil // Output: null -// true // Output: true -// "foo" // Output: "foo" -// "" // Output: "" -// -// Slices and arrays are encoded as encoding/json, including base64-encoding of byte slices -// with additional base64-encoding of byte arrays as well: -// -// []int64(nil) // Output: null -// []int64{} // Output: [] -// []int64{1, 2, 3} // Output: ["1", "2", "3"] -// []int32{1, 2, 3} // Output: [1, 2, 3] -// []byte{1, 2, 3} // Output: "AQID" -// [3]int64{1, 2, 3} // Output: ["1", "2", "3"] -// [3]byte{1, 2, 3} // Output: "AQID" -// -// Maps are encoded as encoding/json, but only strings are allowed as map keys (nil maps are not -// emitted as null, to retain Amino backwards-compatibility): -// -// map[string]int64(nil) // Output: {} -// map[string]int64{} // Output: {} -// map[string]int64{"a":1,"b":2} // Output: {"a":"1","b":"2"} -// map[string]int32{"a":1,"b":2} // Output: {"a":1,"b":2} -// map[bool]int{true:1} // Errors -// -// Times are encoded as encoding/json, in RFC3339Nano format, but requiring UTC time zone (with zero -// times emitted as "0001-01-01T00:00:00Z" as with encoding/json): -// -// time.Date(2020, 6, 8, 16, 21, 28, 123, time.FixedZone("UTC+2", 2*60*60)) -// // Output: "2020-06-08T14:21:28.000000123Z" -// time.Time{} // Output: "0001-01-01T00:00:00Z" -// (*time.Time)(nil) // Output: null -// -// Structs are encoded as encoding/json, supporting JSON tags and ignoring private fields: -// -// type Struct struct{ -// Name string -// Value int32 `json:"value,omitempty"` -// private bool -// } -// -// Struct{Name: "foo", Value: 7, private: true} // Output: {"Name":"foo","value":7} -// Struct{} // Output: {"Name":""} -// -// Registered types are encoded with type wrapper, regardless of whether they are given as interface -// or bare struct, but inside structs they are only emitted with type wrapper for interface fields -// (this follows Amino behavior): -// -// type Vehicle interface { -// Drive() error -// } -// -// type Car struct { -// Wheels int8 -// } -// -// func (c *Car) Drive() error { return nil } -// -// RegisterType(&Car{}, "vehicle/car") -// -// Car{Wheels: 4} // Output: {"type":"vehicle/car","value":{"Wheels":4}} -// &Car{Wheels: 4} // Output: {"type":"vehicle/car","value":{"Wheels":4}} -// (*Car)(nil) // Output: null -// Vehicle(Car{Wheels: 4}) // Output: {"type":"vehicle/car","value":{"Wheels":4}} -// Vehicle(nil) // Output: null -// -// type Struct struct { -// Car *Car -// Vehicle Vehicle -// } -// -// Struct{Car: &Car{Wheels: 4}, Vehicle: &Car{Wheels: 4}} -// // Output: {"Car": {"Wheels: 4"}, "Vehicle": {"type":"vehicle/car","value":{"Wheels":4}}} -// -package json diff --git a/libs/json/encoder.go b/libs/json/encoder.go deleted file mode 100644 index 11990e2af6..0000000000 --- a/libs/json/encoder.go +++ /dev/null @@ -1,254 +0,0 @@ -package json - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "reflect" - "strconv" - "time" -) - -var ( - timeType = reflect.TypeOf(time.Time{}) - jsonMarshalerType = reflect.TypeOf(new(json.Marshaler)).Elem() - jsonUnmarshalerType = reflect.TypeOf(new(json.Unmarshaler)).Elem() -) - -// Marshal marshals the value as JSON, using Amino-compatible JSON encoding (strings for -// 64-bit numbers, and type wrappers for registered types). -func Marshal(v interface{}) ([]byte, error) { - buf := new(bytes.Buffer) - err := encode(buf, v) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalIndent marshals the value as JSON, using the given prefix and indentation. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - bz, err := Marshal(v) - if err != nil { - return nil, err - } - buf := new(bytes.Buffer) - err = json.Indent(buf, bz, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func encode(w io.Writer, v interface{}) error { - // Bare nil values can't be reflected, so we must handle them here. - if v == nil { - return writeStr(w, "null") - } - rv := reflect.ValueOf(v) - - // If this is a registered type, defer to interface encoder regardless of whether the input is - // an interface or a bare value. This retains Amino's behavior, but is inconsistent with - // behavior in structs where an interface field will get the type wrapper while a bare value - // field will not. - if typeRegistry.name(rv.Type()) != "" { - return encodeReflectInterface(w, rv) - } - - return encodeReflect(w, rv) -} - -func encodeReflect(w io.Writer, rv reflect.Value) error { - if !rv.IsValid() { - return errors.New("invalid reflect value") - } - - // Recursively dereference if pointer. - for rv.Kind() == reflect.Ptr { - if rv.IsNil() { - return writeStr(w, "null") - } - rv = rv.Elem() - } - - // Convert times to UTC. - if rv.Type() == timeType { - rv = reflect.ValueOf(rv.Interface().(time.Time).Round(0).UTC()) - } - - // If the value implements json.Marshaler, defer to stdlib directly. Since we've already - // dereferenced, we try implementations with both value receiver and pointer receiver. We must - // do this after the time normalization above, and thus after dereferencing. - if rv.Type().Implements(jsonMarshalerType) { - return encodeStdlib(w, rv.Interface()) - } else if rv.CanAddr() && rv.Addr().Type().Implements(jsonMarshalerType) { - return encodeStdlib(w, rv.Addr().Interface()) - } - - switch rv.Type().Kind() { - // Complex types must be recursively encoded. - case reflect.Interface: - return encodeReflectInterface(w, rv) - - case reflect.Array, reflect.Slice: - return encodeReflectList(w, rv) - - case reflect.Map: - return encodeReflectMap(w, rv) - - case reflect.Struct: - return encodeReflectStruct(w, rv) - - // 64-bit integers are emitted as strings, to avoid precision problems with e.g. - // Javascript which uses 64-bit floats (having 53-bit precision). - case reflect.Int64, reflect.Int: - return writeStr(w, `"`+strconv.FormatInt(rv.Int(), 10)+`"`) - - case reflect.Uint64, reflect.Uint: - return writeStr(w, `"`+strconv.FormatUint(rv.Uint(), 10)+`"`) - - // For everything else, defer to the stdlib encoding/json encoder - default: - return encodeStdlib(w, rv.Interface()) - } -} - -func encodeReflectList(w io.Writer, rv reflect.Value) error { - // Emit nil slices as null. - if rv.Kind() == reflect.Slice && rv.IsNil() { - return writeStr(w, "null") - } - - // Encode byte slices as base64 with the stdlib encoder. - if rv.Type().Elem().Kind() == reflect.Uint8 { - // Stdlib does not base64-encode byte arrays, only slices, so we copy to slice. - if rv.Type().Kind() == reflect.Array { - slice := reflect.MakeSlice(reflect.SliceOf(rv.Type().Elem()), rv.Len(), rv.Len()) - reflect.Copy(slice, rv) - rv = slice - } - return encodeStdlib(w, rv.Interface()) - } - - // Anything else we recursively encode ourselves. - length := rv.Len() - if err := writeStr(w, "["); err != nil { - return err - } - for i := 0; i < length; i++ { - if err := encodeReflect(w, rv.Index(i)); err != nil { - return err - } - if i < length-1 { - if err := writeStr(w, ","); err != nil { - return err - } - } - } - return writeStr(w, "]") -} - -func encodeReflectMap(w io.Writer, rv reflect.Value) error { - if rv.Type().Key().Kind() != reflect.String { - return errors.New("map key must be string") - } - - // nil maps are not emitted as nil, to retain Amino compatibility. - - if err := writeStr(w, "{"); err != nil { - return err - } - writeComma := false - for _, keyrv := range rv.MapKeys() { - if writeComma { - if err := writeStr(w, ","); err != nil { - return err - } - } - if err := encodeStdlib(w, keyrv.Interface()); err != nil { - return err - } - if err := writeStr(w, ":"); err != nil { - return err - } - if err := encodeReflect(w, rv.MapIndex(keyrv)); err != nil { - return err - } - writeComma = true - } - return writeStr(w, "}") -} - -func encodeReflectStruct(w io.Writer, rv reflect.Value) error { - sInfo := makeStructInfo(rv.Type()) - if err := writeStr(w, "{"); err != nil { - return err - } - writeComma := false - for i, fInfo := range sInfo.fields { - frv := rv.Field(i) - if fInfo.hidden || (fInfo.omitEmpty && frv.IsZero()) { - continue - } - - if writeComma { - if err := writeStr(w, ","); err != nil { - return err - } - } - if err := encodeStdlib(w, fInfo.jsonName); err != nil { - return err - } - if err := writeStr(w, ":"); err != nil { - return err - } - if err := encodeReflect(w, frv); err != nil { - return err - } - writeComma = true - } - return writeStr(w, "}") -} - -func encodeReflectInterface(w io.Writer, rv reflect.Value) error { - // Get concrete value and dereference pointers. - for rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Interface { - if rv.IsNil() { - return writeStr(w, "null") - } - rv = rv.Elem() - } - - // Look up the name of the concrete type - name := typeRegistry.name(rv.Type()) - if name == "" { - return fmt.Errorf("cannot encode unregistered type %v", rv.Type()) - } - - // Write value wrapped in interface envelope - if err := writeStr(w, fmt.Sprintf(`{"type":%q,"value":`, name)); err != nil { - return err - } - if err := encodeReflect(w, rv); err != nil { - return err - } - return writeStr(w, "}") -} - -func encodeStdlib(w io.Writer, v interface{}) error { - // Doesn't stream the output because that adds a newline, as per: - // https://golang.org/pkg/encoding/json/#Encoder.Encode - blob, err := json.Marshal(v) - if err != nil { - return err - } - _, err = w.Write(blob) - return err -} - -func writeStr(w io.Writer, s string) error { - _, err := w.Write([]byte(s)) - return err -} diff --git a/libs/json/encoder_test.go b/libs/json/encoder_test.go deleted file mode 100644 index 88eb56f857..0000000000 --- a/libs/json/encoder_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package json_test - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/libs/json" -) - -func TestMarshal(t *testing.T) { - s := "string" - sPtr := &s - i64 := int64(64) - ti := time.Date(2020, 6, 2, 18, 5, 13, 4346374, time.FixedZone("UTC+2", 2*60*60)) - car := &Car{Wheels: 4} - boat := Boat{Sail: true} - - testcases := map[string]struct { - value interface{} - output string - }{ - "nil": {nil, `null`}, - "string": {"foo", `"foo"`}, - "float32": {float32(3.14), `3.14`}, - "float32 neg": {float32(-3.14), `-3.14`}, - "float64": {float64(3.14), `3.14`}, - "float64 neg": {float64(-3.14), `-3.14`}, - "int32": {int32(32), `32`}, - "int64": {int64(64), `"64"`}, - "int64 neg": {int64(-64), `"-64"`}, - "int64 ptr": {&i64, `"64"`}, - "uint64": {uint64(64), `"64"`}, - "time": {ti, `"2020-06-02T16:05:13.004346374Z"`}, - "time empty": {time.Time{}, `"0001-01-01T00:00:00Z"`}, - "time ptr": {&ti, `"2020-06-02T16:05:13.004346374Z"`}, - "customptr": {CustomPtr{Value: "x"}, `{"Value":"x"}`}, // same as encoding/json - "customptr ptr": {&CustomPtr{Value: "x"}, `"custom"`}, - "customvalue": {CustomValue{Value: "x"}, `"custom"`}, - "customvalue ptr": {&CustomValue{Value: "x"}, `"custom"`}, - "slice nil": {[]int(nil), `null`}, - "slice empty": {[]int{}, `[]`}, - "slice bytes": {[]byte{1, 2, 3}, `"AQID"`}, - "slice int64": {[]int64{1, 2, 3}, `["1","2","3"]`}, - "slice int64 ptr": {[]*int64{&i64, nil}, `["64",null]`}, - "array bytes": {[3]byte{1, 2, 3}, `"AQID"`}, - "array int64": {[3]int64{1, 2, 3}, `["1","2","3"]`}, - "map nil": {map[string]int64(nil), `{}`}, // retain Amino compatibility - "map empty": {map[string]int64{}, `{}`}, - "map int64": {map[string]int64{"a": 1, "b": 2, "c": 3}, `{"a":"1","b":"2","c":"3"}`}, - "car": {car, `{"type":"vehicle/car","value":{"Wheels":4}}`}, - "car value": {*car, `{"type":"vehicle/car","value":{"Wheels":4}}`}, - "car iface": {Vehicle(car), `{"type":"vehicle/car","value":{"Wheels":4}}`}, - "car nil": {(*Car)(nil), `null`}, - "boat": {boat, `{"type":"vehicle/boat","value":{"Sail":true}}`}, - "boat ptr": {&boat, `{"type":"vehicle/boat","value":{"Sail":true}}`}, - "boat iface": {Vehicle(boat), `{"type":"vehicle/boat","value":{"Sail":true}}`}, - "key public": {PublicKey{1, 2, 3, 4, 5, 6, 7, 8}, `{"type":"key/public","value":"AQIDBAUGBwg="}`}, - "tags": { - Tags{JSONName: "name", OmitEmpty: "foo", Hidden: "bar", Tags: &Tags{JSONName: "child"}}, - `{"name":"name","OmitEmpty":"foo","tags":{"name":"child"}}`, - }, - "tags empty": {Tags{}, `{"name":""}`}, - // The encoding of the Car and Boat fields do not have type wrappers, even though they get - // type wrappers when encoded directly (see "car" and "boat" tests). This is to retain the - // same behavior as Amino. If the field was a Vehicle interface instead, it would get - // type wrappers, as seen in the Vehicles field. - "struct": { - Struct{ - Bool: true, Float64: 3.14, Int32: 32, Int64: 64, Int64Ptr: &i64, - String: "foo", StringPtrPtr: &sPtr, Bytes: []byte{1, 2, 3}, - Time: ti, Car: car, Boat: boat, Vehicles: []Vehicle{car, boat}, - Child: &Struct{Bool: false, String: "child"}, private: "private", - }, - `{ - "Bool":true, "Float64":3.14, "Int32":32, "Int64":"64", "Int64Ptr":"64", - "String":"foo", "StringPtrPtr": "string", "Bytes":"AQID", - "Time":"2020-06-02T16:05:13.004346374Z", - "Car":{"Wheels":4}, - "Boat":{"Sail":true}, - "Vehicles":[ - {"type":"vehicle/car","value":{"Wheels":4}}, - {"type":"vehicle/boat","value":{"Sail":true}} - ], - "Child":{ - "Bool":false, "Float64":0, "Int32":0, "Int64":"0", "Int64Ptr":null, - "String":"child", "StringPtrPtr":null, "Bytes":null, - "Time":"0001-01-01T00:00:00Z", - "Car":null, "Boat":{"Sail":false}, "Vehicles":null, "Child":null - } - }`, - }, - } - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - bz, err := json.Marshal(tc.value) - require.NoError(t, err) - assert.JSONEq(t, tc.output, string(bz)) - }) - } -} diff --git a/libs/json/helpers_test.go b/libs/json/helpers_test.go deleted file mode 100644 index ccb3c00388..0000000000 --- a/libs/json/helpers_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package json_test - -import ( - "time" - - "github.com/tendermint/tendermint/libs/json" -) - -// Register Car, an instance of the Vehicle interface. -func init() { - json.RegisterType(&Car{}, "vehicle/car") - json.RegisterType(Boat{}, "vehicle/boat") - json.RegisterType(PublicKey{}, "key/public") - json.RegisterType(PrivateKey{}, "key/private") -} - -type Vehicle interface { - Drive() error -} - -// Car is a pointer implementation of Vehicle. -type Car struct { - Wheels int32 -} - -func (c *Car) Drive() error { return nil } - -// Boat is a value implementation of Vehicle. -type Boat struct { - Sail bool -} - -func (b Boat) Drive() error { return nil } - -// These are public and private encryption keys. -type PublicKey [8]byte -type PrivateKey [8]byte - -// Custom has custom marshalers and unmarshalers, taking pointer receivers. -type CustomPtr struct { - Value string -} - -func (c *CustomPtr) MarshalJSON() ([]byte, error) { - return []byte("\"custom\""), nil -} - -func (c *CustomPtr) UnmarshalJSON(bz []byte) error { - c.Value = "custom" - return nil -} - -// CustomValue has custom marshalers and unmarshalers, taking value receivers (which usually doesn't -// make much sense since the unmarshaler can't change anything). -type CustomValue struct { - Value string -} - -func (c CustomValue) MarshalJSON() ([]byte, error) { - return []byte("\"custom\""), nil -} - -func (c CustomValue) UnmarshalJSON(bz []byte) error { - return nil -} - -// Tags tests JSON tags. -type Tags struct { - JSONName string `json:"name"` - OmitEmpty string `json:",omitempty"` - Hidden string `json:"-"` - Tags *Tags `json:"tags,omitempty"` -} - -// Struct tests structs with lots of contents. -type Struct struct { - Bool bool - Float64 float64 - Int32 int32 - Int64 int64 - Int64Ptr *int64 - String string - StringPtrPtr **string - Bytes []byte - Time time.Time - Car *Car - Boat Boat - Vehicles []Vehicle - Child *Struct - private string -} diff --git a/libs/json/structs.go b/libs/json/structs.go deleted file mode 100644 index b9521114af..0000000000 --- a/libs/json/structs.go +++ /dev/null @@ -1,88 +0,0 @@ -package json - -import ( - "fmt" - "reflect" - "strings" - "unicode" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -var ( - // cache caches struct info. - cache = newStructInfoCache() -) - -// structCache is a cache of struct info. -type structInfoCache struct { - tmsync.RWMutex - structInfos map[reflect.Type]*structInfo -} - -func newStructInfoCache() *structInfoCache { - return &structInfoCache{ - structInfos: make(map[reflect.Type]*structInfo), - } -} - -func (c *structInfoCache) get(rt reflect.Type) *structInfo { - c.RLock() - defer c.RUnlock() - return c.structInfos[rt] -} - -func (c *structInfoCache) set(rt reflect.Type, sInfo *structInfo) { - c.Lock() - defer c.Unlock() - c.structInfos[rt] = sInfo -} - -// structInfo contains JSON info for a struct. -type structInfo struct { - fields []*fieldInfo -} - -// fieldInfo contains JSON info for a struct field. -type fieldInfo struct { - jsonName string - omitEmpty bool - hidden bool -} - -// makeStructInfo generates structInfo for a struct as a reflect.Value. -func makeStructInfo(rt reflect.Type) *structInfo { - if rt.Kind() != reflect.Struct { - panic(fmt.Sprintf("can't make struct info for non-struct value %v", rt)) - } - if sInfo := cache.get(rt); sInfo != nil { - return sInfo - } - fields := make([]*fieldInfo, 0, rt.NumField()) - for i := 0; i < cap(fields); i++ { - frt := rt.Field(i) - fInfo := &fieldInfo{ - jsonName: frt.Name, - omitEmpty: false, - hidden: frt.Name == "" || !unicode.IsUpper(rune(frt.Name[0])), - } - o := frt.Tag.Get("json") - if o == "-" { - fInfo.hidden = true - } else if o != "" { - opts := strings.Split(o, ",") - if opts[0] != "" { - fInfo.jsonName = opts[0] - } - for _, o := range opts[1:] { - if o == "omitempty" { - fInfo.omitEmpty = true - } - } - } - fields = append(fields, fInfo) - } - sInfo := &structInfo{fields: fields} - cache.set(rt, sInfo) - return sInfo -} diff --git a/libs/json/types.go b/libs/json/types.go deleted file mode 100644 index 9f21e81eb8..0000000000 --- a/libs/json/types.go +++ /dev/null @@ -1,109 +0,0 @@ -package json - -import ( - "errors" - "fmt" - "reflect" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -var ( - // typeRegistry contains globally registered types for JSON encoding/decoding. - typeRegistry = newTypes() -) - -// RegisterType registers a type for Amino-compatible interface encoding in the global type -// registry. These types will be encoded with a type wrapper `{"type":"","value":}` -// regardless of which interface they are wrapped in (if any). If the type is a pointer, it will -// still be valid both for value and pointer types, but decoding into an interface will generate -// the a value or pointer based on the registered type. -// -// Should only be called in init() functions, as it panics on error. -func RegisterType(_type interface{}, name string) { - if _type == nil { - panic("cannot register nil type") - } - err := typeRegistry.register(name, reflect.ValueOf(_type).Type()) - if err != nil { - panic(err) - } -} - -// typeInfo contains type information. -type typeInfo struct { - name string - rt reflect.Type - returnPtr bool -} - -// types is a type registry. It is safe for concurrent use. -type types struct { - tmsync.RWMutex - byType map[reflect.Type]*typeInfo - byName map[string]*typeInfo -} - -// newTypes creates a new type registry. -func newTypes() types { - return types{ - byType: map[reflect.Type]*typeInfo{}, - byName: map[string]*typeInfo{}, - } -} - -// registers the given type with the given name. The name and type must not be registered already. -func (t *types) register(name string, rt reflect.Type) error { - if name == "" { - return errors.New("name cannot be empty") - } - // If this is a pointer type, we recursively resolve until we get a bare type, but register that - // we should return pointers. - returnPtr := false - for rt.Kind() == reflect.Ptr { - returnPtr = true - rt = rt.Elem() - } - tInfo := &typeInfo{ - name: name, - rt: rt, - returnPtr: returnPtr, - } - - t.Lock() - defer t.Unlock() - if _, ok := t.byName[tInfo.name]; ok { - return fmt.Errorf("a type with name %q is already registered", name) - } - if _, ok := t.byType[tInfo.rt]; ok { - return fmt.Errorf("the type %v is already registered", rt) - } - t.byName[name] = tInfo - t.byType[rt] = tInfo - return nil -} - -// lookup looks up a type from a name, or nil if not registered. -func (t *types) lookup(name string) (reflect.Type, bool) { - t.RLock() - defer t.RUnlock() - tInfo := t.byName[name] - if tInfo == nil { - return nil, false - } - return tInfo.rt, tInfo.returnPtr -} - -// name looks up the name of a type, or empty if not registered. Unwraps pointers as necessary. -func (t *types) name(rt reflect.Type) string { - for rt.Kind() == reflect.Ptr { - rt = rt.Elem() - } - t.RLock() - defer t.RUnlock() - tInfo := t.byType[rt] - if tInfo == nil { - return "" - } - return tInfo.name -} diff --git a/libs/log/default.go b/libs/log/default.go index f31ceaada4..d0f610601e 100644 --- a/libs/log/default.go +++ b/libs/log/default.go @@ -14,8 +14,6 @@ var _ Logger = (*defaultLogger)(nil) type defaultLogger struct { zerolog.Logger - - trace bool } // NewDefaultLogger returns a default logger that can be used within Tendermint @@ -26,7 +24,7 @@ type defaultLogger struct { // Since zerolog supports typed structured logging and it is difficult to reflect // that in a generic interface, all logging methods accept a series of key/value // pair tuples, where the key must be a string. -func NewDefaultLogger(format, level string, trace bool) (Logger, error) { +func NewDefaultLogger(format, level string) (Logger, error) { var logWriter io.Writer switch strings.ToLower(format) { case LogFormatPlain, LogFormatText: @@ -57,34 +55,17 @@ func NewDefaultLogger(format, level string, trace bool) (Logger, error) { // make the writer thread-safe logWriter = newSyncWriter(logWriter) - return defaultLogger{ + return &defaultLogger{ Logger: zerolog.New(logWriter).Level(logLevel).With().Timestamp().Logger(), - trace: trace, }, nil } -// MustNewDefaultLogger delegates a call NewDefaultLogger where it panics on -// error. -func MustNewDefaultLogger(format, level string, trace bool) Logger { - logger, err := NewDefaultLogger(format, level, trace) - if err != nil { - panic(err) - } - - return logger -} - func (l defaultLogger) Info(msg string, keyVals ...interface{}) { l.Logger.Info().Fields(getLogFields(keyVals...)).Msg(msg) } func (l defaultLogger) Error(msg string, keyVals ...interface{}) { - e := l.Logger.Error() - if l.trace { - e = e.Stack() - } - - e.Fields(getLogFields(keyVals...)).Msg(msg) + l.Logger.Error().Fields(getLogFields(keyVals...)).Msg(msg) } func (l defaultLogger) Debug(msg string, keyVals ...interface{}) { @@ -92,12 +73,33 @@ func (l defaultLogger) Debug(msg string, keyVals ...interface{}) { } func (l defaultLogger) With(keyVals ...interface{}) Logger { - return defaultLogger{ + return &defaultLogger{ Logger: l.Logger.With().Fields(getLogFields(keyVals...)).Logger(), - trace: l.trace, } } +// OverrideWithNewLogger replaces an existing logger's internal with +// a new logger, and makes it possible to reconfigure an existing +// logger that has already been propagated to callers. +func OverrideWithNewLogger(logger Logger, format, level string) error { + ol, ok := logger.(*defaultLogger) + if !ok { + return fmt.Errorf("logger %T cannot be overridden", logger) + } + + newLogger, err := NewDefaultLogger(format, level) + if err != nil { + return err + } + nl, ok := newLogger.(*defaultLogger) + if !ok { + return fmt.Errorf("logger %T cannot be overridden by %T", logger, newLogger) + } + + ol.Logger = nl.Logger + return nil +} + func getLogFields(keyVals ...interface{}) map[string]interface{} { if len(keyVals)%2 != 0 { return nil diff --git a/libs/log/default_test.go b/libs/log/default_test.go index c66508f048..6ea723c519 100644 --- a/libs/log/default_test.go +++ b/libs/log/default_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" ) @@ -34,12 +35,9 @@ func TestNewDefaultLogger(t *testing.T) { tc := tc t.Run(name, func(t *testing.T) { - _, err := log.NewDefaultLogger(tc.format, tc.level, false) + _, err := log.NewDefaultLogger(tc.format, tc.level) if tc.expectErr { require.Error(t, err) - require.Panics(t, func() { - _ = log.MustNewDefaultLogger(tc.format, tc.level, false) - }) } else { require.NoError(t, err) } diff --git a/libs/log/nop.go b/libs/log/nop.go index 4b3bfb1967..3dc5ef411e 100644 --- a/libs/log/nop.go +++ b/libs/log/nop.go @@ -5,8 +5,7 @@ import ( ) func NewNopLogger() Logger { - return defaultLogger{ + return &defaultLogger{ Logger: zerolog.Nop(), - trace: false, } } diff --git a/libs/log/testing.go b/libs/log/testing.go index 9894f6a507..464a7036f5 100644 --- a/libs/log/testing.go +++ b/libs/log/testing.go @@ -1,41 +1,51 @@ package log import ( - "io" - "os" - "sync" "testing" -) -var ( - // reuse the same logger across all tests - testingLoggerMtx = sync.Mutex{} - testingLogger Logger + "github.com/rs/zerolog" ) -// TestingLogger returns a Logger which writes to STDOUT if test(s) are being -// run with the verbose (-v) flag, NopLogger otherwise. +// NewTestingLogger converts a testing.T into a logging interface to +// make test failures and verbose provide better feedback associated +// with test failures. This logging instance is safe for use from +// multiple threads, but in general you should create one of these +// loggers ONCE for each *testing.T instance that you interact with. // -// NOTE: -// - A call to NewTestingLogger() must be made inside a test (not in the init func) -// because verbose flag only set at the time of testing. -func TestingLogger() Logger { - return TestingLoggerWithOutput(os.Stdout) -} +// By default it collects only ERROR messages, or DEBUG messages in +// verbose mode, and relies on the underlying behavior of +// testing.T.Log() +// +// Users should be careful to ensure that no calls to this logger are +// made in goroutines that are running after (which, by the rules of +// testing.TB will panic.) +func NewTestingLogger(t testing.TB) Logger { + level := LogLevelError + if testing.Verbose() { + level = LogLevelDebug + } -func TestingLoggerWithOutput(w io.Writer) Logger { - testingLoggerMtx.Lock() - defer testingLoggerMtx.Unlock() + return NewTestingLoggerWithLevel(t, level) +} - if testingLogger != nil { - return testingLogger +// NewTestingLoggerWithLevel creates a testing logger instance at a +// specific level that wraps the behavior of testing.T.Log(). +func NewTestingLoggerWithLevel(t testing.TB, level string) Logger { + logLevel, err := zerolog.ParseLevel(level) + if err != nil { + t.Fatalf("failed to parse log level (%s): %v", level, err) } - if testing.Verbose() { - testingLogger = MustNewDefaultLogger(LogFormatText, LogLevelDebug, true) - } else { - testingLogger = NewNopLogger() + return defaultLogger{ + Logger: zerolog.New(newSyncWriter(testingWriter{t})).Level(logLevel), } +} + +type testingWriter struct { + t testing.TB +} - return testingLogger +func (tw testingWriter) Write(in []byte) (int, error) { + tw.t.Log(string(in)) + return len(in), nil } diff --git a/libs/math/safemath.go b/libs/math/safemath.go index ff7f0908f9..af40548ea9 100644 --- a/libs/math/safemath.go +++ b/libs/math/safemath.go @@ -9,41 +9,37 @@ var ErrOverflowInt32 = errors.New("int32 overflow") var ErrOverflowUint8 = errors.New("uint8 overflow") var ErrOverflowInt8 = errors.New("int8 overflow") -// SafeAddInt32 adds two int32 integers -// If there is an overflow this will panic -func SafeAddInt32(a, b int32) int32 { +// SafeAddInt32 adds two int32 integers. +func SafeAddInt32(a, b int32) (int32, error) { if b > 0 && (a > math.MaxInt32-b) { - panic(ErrOverflowInt32) + return 0, ErrOverflowInt32 } else if b < 0 && (a < math.MinInt32-b) { - panic(ErrOverflowInt32) + return 0, ErrOverflowInt32 } - return a + b + return a + b, nil } -// SafeSubInt32 subtracts two int32 integers -// If there is an overflow this will panic -func SafeSubInt32(a, b int32) int32 { +// SafeSubInt32 subtracts two int32 integers. +func SafeSubInt32(a, b int32) (int32, error) { if b > 0 && (a < math.MinInt32+b) { - panic(ErrOverflowInt32) + return 0, ErrOverflowInt32 } else if b < 0 && (a > math.MaxInt32+b) { - panic(ErrOverflowInt32) + return 0, ErrOverflowInt32 } - return a - b + return a - b, nil } -// SafeConvertInt32 takes a int and checks if it overflows -// If there is an overflow this will panic -func SafeConvertInt32(a int64) int32 { +// SafeConvertInt32 takes a int and checks if it overflows. +func SafeConvertInt32(a int64) (int32, error) { if a > math.MaxInt32 { - panic(ErrOverflowInt32) + return 0, ErrOverflowInt32 } else if a < math.MinInt32 { - panic(ErrOverflowInt32) + return 0, ErrOverflowInt32 } - return int32(a) + return int32(a), nil } -// SafeConvertUint8 takes an int64 and checks if it overflows -// If there is an overflow it returns an error +// SafeConvertUint8 takes an int64 and checks if it overflows. func SafeConvertUint8(a int64) (uint8, error) { if a > math.MaxUint8 { return 0, ErrOverflowUint8 @@ -53,8 +49,7 @@ func SafeConvertUint8(a int64) (uint8, error) { return uint8(a), nil } -// SafeConvertInt8 takes an int64 and checks if it overflows -// If there is an overflow it returns an error +// SafeConvertInt8 takes an int64 and checks if it overflows. func SafeConvertInt8(a int64) (int8, error) { if a > math.MaxInt8 { return 0, ErrOverflowInt8 diff --git a/libs/os/os.go b/libs/os/os.go index f4b0f1810a..3d74c22082 100644 --- a/libs/os/os.go +++ b/libs/os/os.go @@ -5,35 +5,8 @@ import ( "fmt" "io" "os" - "os/signal" - "syscall" ) -type logger interface { - Info(msg string, keyvals ...interface{}) -} - -// TrapSignal catches SIGTERM and SIGINT, executes the cleanup function, -// and exits with code 0. -func TrapSignal(logger logger, cb func()) { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGTERM) - - go func() { - sig := <-c - logger.Info(fmt.Sprintf("captured %v, exiting...", sig)) - if cb != nil { - cb() - } - os.Exit(0) - }() -} - -func Exit(s string) { - fmt.Printf(s + "\n") - os.Exit(1) -} - // EnsureDir ensures the given directory exists, creating it if necessary. // Errors if the path already exists as a non-directory. func EnsureDir(dir string, mode os.FileMode) error { diff --git a/libs/os/os_test.go b/libs/os/os_test.go index 3a31de04a0..ca7050156d 100644 --- a/libs/os/os_test.go +++ b/libs/os/os_test.go @@ -3,20 +3,17 @@ package os_test import ( "bytes" "fmt" - "io/ioutil" "os" - "os/exec" "path/filepath" - "syscall" "testing" - "time" "github.com/stretchr/testify/require" + tmos "github.com/tendermint/tendermint/libs/os" ) func TestCopyFile(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "example") + tmpfile, err := os.CreateTemp(t.TempDir(), "example") if err != nil { t.Fatal(err) } @@ -33,7 +30,7 @@ func TestCopyFile(t *testing.T) { if _, err := os.Stat(copyfile); os.IsNotExist(err) { t.Fatal("copy should exist") } - data, err := ioutil.ReadFile(copyfile) + data, err := os.ReadFile(copyfile) if err != nil { t.Fatal(err) } @@ -43,39 +40,11 @@ func TestCopyFile(t *testing.T) { os.Remove(copyfile) } -func TestTrapSignal(t *testing.T) { - if os.Getenv("TM_TRAP_SIGNAL_TEST") == "1" { - t.Log("inside test process") - killer() - return - } - - cmd, _, mockStderr := newTestProgram(t, "TM_TRAP_SIGNAL_TEST") - - err := cmd.Run() - if err == nil { - wantStderr := "exiting" - if mockStderr.String() != wantStderr { - t.Fatalf("stderr: want %q, got %q", wantStderr, mockStderr.String()) - } - - return - } - - if e, ok := err.(*exec.ExitError); ok && !e.Success() { - t.Fatalf("wrong exit code, want 0, got %d", e.ExitCode()) - } - - t.Fatal("this error should not be triggered") -} - func TestEnsureDir(t *testing.T) { - tmp, err := ioutil.TempDir("", "ensure-dir") - require.NoError(t, err) - defer os.RemoveAll(tmp) + tmp := t.TempDir() // Should be possible to create a new directory. - err = tmos.EnsureDir(filepath.Join(tmp, "dir"), 0755) + err := tmos.EnsureDir(filepath.Join(tmp, "dir"), 0755) require.NoError(t, err) require.DirExists(t, filepath.Join(tmp, "dir")) @@ -84,7 +53,7 @@ func TestEnsureDir(t *testing.T) { require.NoError(t, err) // Should fail on file. - err = ioutil.WriteFile(filepath.Join(tmp, "file"), []byte{}, 0644) + err = os.WriteFile(filepath.Join(tmp, "file"), []byte{}, 0644) require.NoError(t, err) err = tmos.EnsureDir(filepath.Join(tmp, "file"), 0755) require.Error(t, err) @@ -102,58 +71,20 @@ func TestEnsureDir(t *testing.T) { require.Error(t, err) } -type mockLogger struct{} - -func (ml mockLogger) Info(msg string, keyvals ...interface{}) {} - -func killer() { - logger := mockLogger{} - - tmos.TrapSignal(logger, func() { _, _ = fmt.Fprintf(os.Stderr, "exiting") }) - time.Sleep(1 * time.Second) - - p, err := os.FindProcess(os.Getpid()) - if err != nil { - panic(err) - } - - if err := p.Signal(syscall.SIGTERM); err != nil { - panic(err) - } - - time.Sleep(1 * time.Second) -} - -func newTestProgram(t *testing.T, environVar string) (cmd *exec.Cmd, stdout *bytes.Buffer, stderr *bytes.Buffer) { - t.Helper() - - cmd = exec.Command(os.Args[0], "-test.run="+t.Name()) - stdout, stderr = bytes.NewBufferString(""), bytes.NewBufferString("") - cmd.Env = append(os.Environ(), fmt.Sprintf("%s=1", environVar)) - cmd.Stdout = stdout - cmd.Stderr = stderr - - return -} - // Ensure that using CopyFile does not truncate the destination file before // the origin is positively a non-directory and that it is ready for copying. // See https://github.com/tendermint/tendermint/issues/6427 func TestTrickedTruncation(t *testing.T) { - tmpDir, err := ioutil.TempDir(os.TempDir(), "pwn_truncate") - if err != nil { - t.Fatal(err) - } - defer os.Remove(tmpDir) + tmpDir := t.TempDir() originalWALPath := filepath.Join(tmpDir, "wal") originalWALContent := []byte("I AM BECOME DEATH, DESTROYER OF ALL WORLDS!") - if err := ioutil.WriteFile(originalWALPath, originalWALContent, 0755); err != nil { + if err := os.WriteFile(originalWALPath, originalWALContent, 0755); err != nil { t.Fatal(err) } // 1. Sanity check. - readWAL, err := ioutil.ReadFile(originalWALPath) + readWAL, err := os.ReadFile(originalWALPath) if err != nil { t.Fatal(err) } @@ -168,7 +99,7 @@ func TestTrickedTruncation(t *testing.T) { } // 3. Check the WAL's content - reReadWAL, err := ioutil.ReadFile(originalWALPath) + reReadWAL, err := os.ReadFile(originalWALPath) if err != nil { t.Fatal(err) } diff --git a/libs/pubsub/example_test.go b/libs/pubsub/example_test.go deleted file mode 100644 index 4be1d97c4f..0000000000 --- a/libs/pubsub/example_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package pubsub_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/pubsub/query" -) - -func TestExample(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - - require.NoError(t, s.Start()) - - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - - subscription, err := s.Subscribe(ctx, "example-client", query.MustCompile("abci.account.name='John'")) - require.NoError(t, err) - - events := []abci.Event{ - { - Type: "abci.account", - Attributes: []abci.EventAttribute{{Key: "name", Value: "John"}}, - }, - } - err = s.PublishWithEvents(ctx, "Tombstone", events) - require.NoError(t, err) - - assertReceive(t, "Tombstone", subscription.Out()) -} diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go deleted file mode 100644 index 6ecf5acbdb..0000000000 --- a/libs/pubsub/pubsub.go +++ /dev/null @@ -1,527 +0,0 @@ -// Package pubsub implements a pub-sub model with a single publisher (Server) -// and multiple subscribers (clients). -// -// Though you can have multiple publishers by sharing a pointer to a server or -// by giving the same channel to each publisher and publishing messages from -// that channel (fan-in). -// -// Clients subscribe for messages, which could be of any type, using a query. -// When some message is published, we match it with all queries. If there is a -// match, this message will be pushed to all clients, subscribed to that query. -// See query subpackage for our implementation. -// -// Example: -// -// q, err := query.New("account.name='John'") -// if err != nil { -// return err -// } -// ctx, cancel := context.WithTimeout(context.Background(), 1 * time.Second) -// defer cancel() -// subscription, err := pubsub.Subscribe(ctx, "johns-transactions", q) -// if err != nil { -// return err -// } -// -// for { -// select { -// case msg <- subscription.Out(): -// // handle msg.Data() and msg.Events() -// case <-subscription.Canceled(): -// return subscription.Err() -// } -// } -// -package pubsub - -import ( - "context" - "errors" - "fmt" - - "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/libs/service" -) - -type operation int - -const ( - sub operation = iota - pub - unsub - shutdown -) - -var ( - // ErrSubscriptionNotFound is returned when a client tries to unsubscribe - // from not existing subscription. - ErrSubscriptionNotFound = errors.New("subscription not found") - - // ErrAlreadySubscribed is returned when a client tries to subscribe twice or - // more using the same query. - ErrAlreadySubscribed = errors.New("already subscribed") -) - -// Query defines an interface for a query to be used for subscribing. A query -// matches against a map of events. Each key in this map is a composite of the -// even type and an attribute key (e.g. "{eventType}.{eventAttrKey}") and the -// values are the event values that are contained under that relationship. This -// allows event types to repeat themselves with the same set of keys and -// different values. -type Query interface { - Matches(events []types.Event) (bool, error) - String() string -} - -type UnsubscribeArgs struct { - ID string - Subscriber string - Query Query -} - -func (args UnsubscribeArgs) Validate() error { - if args.Subscriber == "" { - return errors.New("must specify a subscriber") - } - - if args.ID == "" && args.Query == nil { - return fmt.Errorf("subscription is not fully defined [subscriber=%q]", args.Subscriber) - } - - return nil -} - -type cmd struct { - op operation - - // subscribe, unsubscribe - query Query - subscription *Subscription - clientID string - - // publish - msg interface{} - events []types.Event -} - -// Server allows clients to subscribe/unsubscribe for messages, publishing -// messages with or without events, and manages internal state. -type Server struct { - service.BaseService - - cmds chan cmd - cmdsCap int - - // check if we have subscription before - // subscribing or unsubscribing - mtx tmsync.RWMutex - - // subscriber -> [query->id (string) OR id->query (string))], - // track connections both by ID (new) and query (legacy) to - // avoid breaking the interface. - subscriptions map[string]map[string]string -} - -// Option sets a parameter for the server. -type Option func(*Server) - -// NewServer returns a new server. See the commentary on the Option functions -// for a detailed description of how to configure buffering. If no options are -// provided, the resulting server's queue is unbuffered. -func NewServer(options ...Option) *Server { - s := &Server{ - subscriptions: make(map[string]map[string]string), - } - s.BaseService = *service.NewBaseService(nil, "PubSub", s) - - for _, option := range options { - option(s) - } - - // if BufferCapacity option was not set, the channel is unbuffered - s.cmds = make(chan cmd, s.cmdsCap) - - return s -} - -// BufferCapacity allows you to specify capacity for the internal server's -// queue. Since the server, given Y subscribers, could only process X messages, -// this option could be used to survive spikes (e.g. high amount of -// transactions during peak hours). -func BufferCapacity(cap int) Option { - return func(s *Server) { - if cap > 0 { - s.cmdsCap = cap - } - } -} - -// BufferCapacity returns capacity of the internal server's queue. -func (s *Server) BufferCapacity() int { - return s.cmdsCap -} - -// Subscribe creates a subscription for the given client. -// -// An error will be returned to the caller if the context is canceled or if -// subscription already exist for pair clientID and query. -// -// outCapacity can be used to set a capacity for Subscription#Out channel (1 by -// default). Panics if outCapacity is less than or equal to zero. If you want -// an unbuffered channel, use SubscribeUnbuffered. -func (s *Server) Subscribe( - ctx context.Context, - clientID string, - query Query, - outCapacity ...int) (*Subscription, error) { - outCap := 1 - if len(outCapacity) > 0 { - if outCapacity[0] <= 0 { - panic("Negative or zero capacity. Use SubscribeUnbuffered if you want an unbuffered channel") - } - outCap = outCapacity[0] - } - - return s.subscribe(ctx, clientID, query, outCap) -} - -// SubscribeUnbuffered does the same as Subscribe, except it returns a -// subscription with unbuffered channel. Use with caution as it can freeze the -// server. -func (s *Server) SubscribeUnbuffered(ctx context.Context, clientID string, query Query) (*Subscription, error) { - return s.subscribe(ctx, clientID, query, 0) -} - -func (s *Server) subscribe(ctx context.Context, clientID string, query Query, outCapacity int) (*Subscription, error) { - s.mtx.RLock() - clientSubscriptions, ok := s.subscriptions[clientID] - if ok { - _, ok = clientSubscriptions[query.String()] - } - s.mtx.RUnlock() - if ok { - return nil, ErrAlreadySubscribed - } - - subscription := NewSubscription(outCapacity) - select { - case s.cmds <- cmd{op: sub, clientID: clientID, query: query, subscription: subscription}: - s.mtx.Lock() - if _, ok = s.subscriptions[clientID]; !ok { - s.subscriptions[clientID] = make(map[string]string) - } - s.subscriptions[clientID][query.String()] = subscription.id - s.subscriptions[clientID][subscription.id] = query.String() - s.mtx.Unlock() - return subscription, nil - case <-ctx.Done(): - return nil, ctx.Err() - case <-s.Quit(): - return nil, errors.New("service is shutting down") - } -} - -// Unsubscribe removes the subscription on the given query. An error will be -// returned to the caller if the context is canceled or if subscription does -// not exist. -func (s *Server) Unsubscribe(ctx context.Context, args UnsubscribeArgs) error { - if err := args.Validate(); err != nil { - return err - } - var qs string - - if args.Query != nil { - qs = args.Query.String() - } - - clientSubscriptions, err := func() (map[string]string, error) { - s.mtx.RLock() - defer s.mtx.RUnlock() - - clientSubscriptions, ok := s.subscriptions[args.Subscriber] - if args.ID != "" { - qs, ok = clientSubscriptions[args.ID] - - if ok && args.Query == nil { - var err error - args.Query, err = query.New(qs) - if err != nil { - return nil, err - } - } - } else if qs != "" { - args.ID, ok = clientSubscriptions[qs] - } - - if !ok { - return nil, ErrSubscriptionNotFound - } - - return clientSubscriptions, nil - }() - - if err != nil { - return err - } - - select { - case s.cmds <- cmd{op: unsub, clientID: args.Subscriber, query: args.Query, subscription: &Subscription{id: args.ID}}: - s.mtx.Lock() - defer s.mtx.Unlock() - - delete(clientSubscriptions, args.ID) - delete(clientSubscriptions, qs) - - if len(clientSubscriptions) == 0 { - delete(s.subscriptions, args.Subscriber) - } - return nil - case <-ctx.Done(): - return ctx.Err() - case <-s.Quit(): - return nil - } -} - -// UnsubscribeAll removes all client subscriptions. An error will be returned -// to the caller if the context is canceled or if subscription does not exist. -func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { - s.mtx.RLock() - _, ok := s.subscriptions[clientID] - s.mtx.RUnlock() - if !ok { - return ErrSubscriptionNotFound - } - - select { - case s.cmds <- cmd{op: unsub, clientID: clientID}: - s.mtx.Lock() - defer s.mtx.Unlock() - - delete(s.subscriptions, clientID) - - return nil - case <-ctx.Done(): - return ctx.Err() - case <-s.Quit(): - return nil - } -} - -// NumClients returns the number of clients. -func (s *Server) NumClients() int { - s.mtx.RLock() - defer s.mtx.RUnlock() - return len(s.subscriptions) -} - -// NumClientSubscriptions returns the number of subscriptions the client has. -func (s *Server) NumClientSubscriptions(clientID string) int { - s.mtx.RLock() - defer s.mtx.RUnlock() - return len(s.subscriptions[clientID]) / 2 -} - -// Publish publishes the given message. An error will be returned to the caller -// if the context is canceled. -func (s *Server) Publish(ctx context.Context, msg interface{}) error { - return s.PublishWithEvents(ctx, msg, []types.Event{}) -} - -// PublishWithEvents publishes the given message with the set of events. The set -// is matched with clients queries. If there is a match, the message is sent to -// the client. -func (s *Server) PublishWithEvents(ctx context.Context, msg interface{}, events []types.Event) error { - select { - case s.cmds <- cmd{op: pub, msg: msg, events: events}: - return nil - case <-ctx.Done(): - return ctx.Err() - case <-s.Quit(): - return nil - } -} - -// OnStop implements Service.OnStop by shutting down the server. -func (s *Server) OnStop() { - s.cmds <- cmd{op: shutdown} -} - -// NOTE: not goroutine safe -type state struct { - // query string -> client -> subscription - subscriptions map[string]map[string]*Subscription - // query string -> queryPlusRefCount - queries map[string]*queryPlusRefCount -} - -// queryPlusRefCount holds a pointer to a query and reference counter. When -// refCount is zero, query will be removed. -type queryPlusRefCount struct { - q Query - refCount int -} - -// OnStart implements Service.OnStart by starting the server. -func (s *Server) OnStart() error { - go s.loop(state{ - subscriptions: make(map[string]map[string]*Subscription), - queries: make(map[string]*queryPlusRefCount), - }) - return nil -} - -// OnReset implements Service.OnReset -func (s *Server) OnReset() error { - return nil -} - -func (s *Server) loop(state state) { -loop: - for cmd := range s.cmds { - switch cmd.op { - case unsub: - if cmd.query != nil { - state.remove(cmd.clientID, cmd.query.String(), cmd.subscription.id, ErrUnsubscribed) - } else { - state.removeClient(cmd.clientID, ErrUnsubscribed) - } - case shutdown: - state.removeAll(nil) - break loop - case sub: - state.add(cmd.clientID, cmd.query, cmd.subscription) - case pub: - if err := state.send(cmd.msg, cmd.events); err != nil { - s.Logger.Error("Error querying for events", "err", err) - } - } - } -} - -func (state *state) add(clientID string, q Query, subscription *Subscription) { - qStr := q.String() - - // initialize subscription for this client per query if needed - if _, ok := state.subscriptions[qStr]; !ok { - state.subscriptions[qStr] = make(map[string]*Subscription) - } - - if _, ok := state.subscriptions[subscription.id]; !ok { - state.subscriptions[subscription.id] = make(map[string]*Subscription) - } - - // create subscription - state.subscriptions[qStr][clientID] = subscription - state.subscriptions[subscription.id][clientID] = subscription - - // initialize query if needed - if _, ok := state.queries[qStr]; !ok { - state.queries[qStr] = &queryPlusRefCount{q: q, refCount: 0} - } - // increment reference counter - state.queries[qStr].refCount++ -} - -func (state *state) remove(clientID string, qStr, id string, reason error) { - clientSubscriptions, ok := state.subscriptions[qStr] - if !ok { - return - } - - subscription, ok := clientSubscriptions[clientID] - if !ok { - return - } - - subscription.cancel(reason) - - // remove client from query map. - // if query has no other clients subscribed, remove it. - delete(state.subscriptions[qStr], clientID) - delete(state.subscriptions[id], clientID) - if len(state.subscriptions[qStr]) == 0 { - delete(state.subscriptions, qStr) - } - - // decrease ref counter in queries - if ref, ok := state.queries[qStr]; ok { - ref.refCount-- - if ref.refCount == 0 { - // remove the query if nobody else is using it - delete(state.queries, qStr) - } - } -} - -func (state *state) removeClient(clientID string, reason error) { - seen := map[string]struct{}{} - for qStr, clientSubscriptions := range state.subscriptions { - if sub, ok := clientSubscriptions[clientID]; ok { - if _, ok = seen[sub.id]; ok { - // all subscriptions are double indexed by ID and query, only - // process them once. - continue - } - state.remove(clientID, qStr, sub.id, reason) - seen[sub.id] = struct{}{} - } - } -} - -func (state *state) removeAll(reason error) { - for qStr, clientSubscriptions := range state.subscriptions { - sub, ok := clientSubscriptions[qStr] - if !ok || ok && sub.id == qStr { - // all subscriptions are double indexed by ID and query, only - // process them once. - continue - } - - for clientID := range clientSubscriptions { - state.remove(clientID, qStr, sub.id, reason) - } - } -} - -func (state *state) send(msg interface{}, events []types.Event) error { - for qStr, clientSubscriptions := range state.subscriptions { - if sub, ok := clientSubscriptions[qStr]; ok && sub.id == qStr { - continue - } - var q Query - if qi, ok := state.queries[qStr]; ok { - q = qi.q - } else { - continue - } - - match, err := q.Matches(events) - if err != nil { - return fmt.Errorf("failed to match against query %s: %w", q.String(), err) - } - - if match { - for clientID, subscription := range clientSubscriptions { - if cap(subscription.out) == 0 { - // block on unbuffered channel - select { - case subscription.out <- NewMessage(subscription.id, msg, events): - case <-subscription.canceled: - } - } else { - // don't block on buffered channels - select { - case subscription.out <- NewMessage(subscription.id, msg, events): - default: - state.remove(clientID, qStr, subscription.id, ErrOutOfCapacity) - } - } - } - } - } - - return nil -} diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go deleted file mode 100644 index 36359a143a..0000000000 --- a/libs/pubsub/pubsub_test.go +++ /dev/null @@ -1,573 +0,0 @@ -package pubsub_test - -import ( - "context" - "fmt" - "runtime/debug" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/log" - - "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/pubsub/query" -) - -const ( - clientID = "test-client" -) - -func TestSubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - subscription, err := s.Subscribe(ctx, clientID, query.All) - require.NoError(t, err) - - require.Equal(t, 1, s.NumClients()) - require.Equal(t, 1, s.NumClientSubscriptions(clientID)) - - err = s.Publish(ctx, "Ka-Zar") - require.NoError(t, err) - assertReceive(t, "Ka-Zar", subscription.Out()) - - published := make(chan struct{}) - go func() { - defer close(published) - - err := s.Publish(ctx, "Quicksilver") - require.NoError(t, err) - - err = s.Publish(ctx, "Asylum") - require.NoError(t, err) - - err = s.Publish(ctx, "Ivan") - require.NoError(t, err) - }() - - select { - case <-published: - assertReceive(t, "Quicksilver", subscription.Out()) - assertCanceled(t, subscription, pubsub.ErrOutOfCapacity) - case <-time.After(3 * time.Second): - t.Fatal("Expected Publish(Asylum) not to block") - } -} - -func TestSubscribeWithCapacity(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - require.Panics(t, func() { - _, err = s.Subscribe(ctx, clientID, query.All, -1) - require.NoError(t, err) - }) - require.Panics(t, func() { - _, err = s.Subscribe(ctx, clientID, query.All, 0) - require.NoError(t, err) - }) - subscription, err := s.Subscribe(ctx, clientID, query.All, 1) - require.NoError(t, err) - err = s.Publish(ctx, "Aggamon") - require.NoError(t, err) - assertReceive(t, "Aggamon", subscription.Out()) -} - -func TestSubscribeUnbuffered(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - subscription, err := s.SubscribeUnbuffered(ctx, clientID, query.All) - require.NoError(t, err) - - published := make(chan struct{}) - go func() { - defer close(published) - - err := s.Publish(ctx, "Ultron") - require.NoError(t, err) - - err = s.Publish(ctx, "Darkhawk") - require.NoError(t, err) - }() - - select { - case <-published: - t.Fatal("Expected Publish(Darkhawk) to block") - case <-time.After(3 * time.Second): - assertReceive(t, "Ultron", subscription.Out()) - assertReceive(t, "Darkhawk", subscription.Out()) - } -} - -func TestSlowClientIsRemovedWithErrOutOfCapacity(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - subscription, err := s.Subscribe(ctx, clientID, query.All) - require.NoError(t, err) - err = s.Publish(ctx, "Fat Cobra") - require.NoError(t, err) - err = s.Publish(ctx, "Viper") - require.NoError(t, err) - - assertCanceled(t, subscription, pubsub.ErrOutOfCapacity) -} - -func TestDifferentClients(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - - require.NoError(t, s.Start()) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - - subscription1, err := s.Subscribe(ctx, "client-1", query.MustCompile("tm.events.type='NewBlock'")) - require.NoError(t, err) - - events := []abci.Event{ - { - Type: "tm.events", - Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, - }, - } - - require.NoError(t, s.PublishWithEvents(ctx, "Iceman", events)) - assertReceive(t, "Iceman", subscription1.Out()) - - subscription2, err := s.Subscribe( - ctx, - "client-2", - query.MustCompile("tm.events.type='NewBlock' AND abci.account.name='Igor'"), - ) - require.NoError(t, err) - - events = []abci.Event{ - { - Type: "tm.events", - Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, - }, - { - Type: "abci.account", - Attributes: []abci.EventAttribute{{Key: "name", Value: "Igor"}}, - }, - } - - require.NoError(t, s.PublishWithEvents(ctx, "Ultimo", events)) - assertReceive(t, "Ultimo", subscription1.Out()) - assertReceive(t, "Ultimo", subscription2.Out()) - - subscription3, err := s.Subscribe( - ctx, - "client-3", - query.MustCompile("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), - ) - require.NoError(t, err) - - events = []abci.Event{ - { - Type: "tm.events", - Attributes: []abci.EventAttribute{{Key: "type", Value: "NewRoundStep"}}, - }, - } - - require.NoError(t, s.PublishWithEvents(ctx, "Valeria Richards", events)) - require.Zero(t, len(subscription3.Out())) -} - -func TestSubscribeDuplicateKeys(t *testing.T) { - ctx := context.Background() - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - - require.NoError(t, s.Start()) - - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - testCases := []struct { - query string - expected interface{} - }{ - { - "withdraw.rewards='17'", - "Iceman", - }, - { - "withdraw.rewards='22'", - "Iceman", - }, - { - "withdraw.rewards='1' AND withdraw.rewards='22'", - "Iceman", - }, - { - "withdraw.rewards='100'", - nil, - }, - } - - for i, tc := range testCases { - sub, err := s.Subscribe(ctx, fmt.Sprintf("client-%d", i), query.MustCompile(tc.query)) - require.NoError(t, err) - - events := []abci.Event{ - { - Type: "transfer", - Attributes: []abci.EventAttribute{ - {Key: "sender", Value: "foo"}, - {Key: "sender", Value: "bar"}, - {Key: "sender", Value: "baz"}, - }, - }, - { - Type: "withdraw", - Attributes: []abci.EventAttribute{ - {Key: "rewards", Value: "1"}, - {Key: "rewards", Value: "17"}, - {Key: "rewards", Value: "22"}, - }, - }, - } - - require.NoError(t, s.PublishWithEvents(ctx, "Iceman", events)) - - if tc.expected != nil { - assertReceive(t, tc.expected, sub.Out()) - } else { - require.Zero(t, len(sub.Out())) - } - } -} - -func TestClientSubscribesTwice(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - q := query.MustCompile("tm.events.type='NewBlock'") - - subscription1, err := s.Subscribe(ctx, clientID, q) - require.NoError(t, err) - - events := []abci.Event{ - { - Type: "tm.events", - Attributes: []abci.EventAttribute{{Key: "type", Value: "NewBlock"}}, - }, - } - - require.NoError(t, s.PublishWithEvents(ctx, "Goblin Queen", events)) - assertReceive(t, "Goblin Queen", subscription1.Out()) - - subscription2, err := s.Subscribe(ctx, clientID, q) - require.Error(t, err) - require.Nil(t, subscription2) - - require.NoError(t, s.PublishWithEvents(ctx, "Spider-Man", events)) - assertReceive(t, "Spider-Man", subscription1.Out()) -} - -func TestUnsubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - subscription, err := s.Subscribe(ctx, clientID, query.MustCompile("tm.events.type='NewBlock'")) - require.NoError(t, err) - err = s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ - Subscriber: clientID, - Query: query.MustCompile("tm.events.type='NewBlock'")}) - require.NoError(t, err) - - err = s.Publish(ctx, "Nick Fury") - require.NoError(t, err) - require.Zero(t, len(subscription.Out()), "Should not receive anything after Unsubscribe") - - assertCanceled(t, subscription, pubsub.ErrUnsubscribed) -} - -func TestClientUnsubscribesTwice(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - _, err = s.Subscribe(ctx, clientID, query.MustCompile("tm.events.type='NewBlock'")) - require.NoError(t, err) - err = s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ - Subscriber: clientID, - Query: query.MustCompile("tm.events.type='NewBlock'")}) - require.NoError(t, err) - - err = s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{ - Subscriber: clientID, - Query: query.MustCompile("tm.events.type='NewBlock'")}) - require.Equal(t, pubsub.ErrSubscriptionNotFound, err) - err = s.UnsubscribeAll(ctx, clientID) - require.Equal(t, pubsub.ErrSubscriptionNotFound, err) -} - -func TestResubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - _, err = s.Subscribe(ctx, clientID, query.All) - require.NoError(t, err) - err = s.Unsubscribe(ctx, pubsub.UnsubscribeArgs{Subscriber: clientID, Query: query.All}) - require.NoError(t, err) - subscription, err := s.Subscribe(ctx, clientID, query.All) - require.NoError(t, err) - - err = s.Publish(ctx, "Cable") - require.NoError(t, err) - assertReceive(t, "Cable", subscription.Out()) -} - -func TestUnsubscribeAll(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - err := s.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := s.Stop(); err != nil { - t.Error(err) - } - }) - - ctx := context.Background() - subscription1, err := s.Subscribe(ctx, clientID, query.MustCompile("tm.events.type='NewBlock'")) - require.NoError(t, err) - subscription2, err := s.Subscribe(ctx, clientID, query.MustCompile("tm.events.type='NewBlockHeader'")) - require.NoError(t, err) - - err = s.UnsubscribeAll(ctx, clientID) - require.NoError(t, err) - - err = s.Publish(ctx, "Nick Fury") - require.NoError(t, err) - require.Zero(t, len(subscription1.Out()), "Should not receive anything after UnsubscribeAll") - require.Zero(t, len(subscription2.Out()), "Should not receive anything after UnsubscribeAll") - - assertCanceled(t, subscription1, pubsub.ErrUnsubscribed) - assertCanceled(t, subscription2, pubsub.ErrUnsubscribed) -} - -func TestBufferCapacity(t *testing.T) { - s := pubsub.NewServer(pubsub.BufferCapacity(2)) - s.SetLogger(log.TestingLogger()) - - require.Equal(t, 2, s.BufferCapacity()) - - ctx := context.Background() - err := s.Publish(ctx, "Nighthawk") - require.NoError(t, err) - err = s.Publish(ctx, "Sage") - require.NoError(t, err) - - ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) - defer cancel() - - err = s.Publish(ctx, "Ironclad") - if assert.Error(t, err) { - require.Equal(t, context.DeadlineExceeded, err) - } -} - -func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) } -func Benchmark100Clients(b *testing.B) { benchmarkNClients(100, b) } -func Benchmark1000Clients(b *testing.B) { benchmarkNClients(1000, b) } - -func Benchmark10ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(10, b) } -func Benchmark100ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(100, b) } -func Benchmark1000ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(1000, b) } - -func benchmarkNClients(n int, b *testing.B) { - s := pubsub.NewServer() - err := s.Start() - require.NoError(b, err) - - b.Cleanup(func() { - if err := s.Stop(); err != nil { - b.Error(err) - } - }) - - ctx := context.Background() - for i := 0; i < n; i++ { - subscription, err := s.Subscribe( - ctx, - clientID, - query.MustCompile(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)), - ) - if err != nil { - b.Fatal(err) - } - go func() { - for { - select { - case <-subscription.Out(): - continue - case <-subscription.Canceled(): - return - } - } - }() - } - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - events := []abci.Event{ - { - Type: "abci.Account", - Attributes: []abci.EventAttribute{{Key: "Owner", Value: "Ivan"}}, - }, - { - Type: "abci.Invoices", - Attributes: []abci.EventAttribute{{Key: "Number", Value: string(rune(i))}}, - }, - } - - require.NoError(b, s.PublishWithEvents(ctx, "Gamora", events)) - } -} - -func benchmarkNClientsOneQuery(n int, b *testing.B) { - s := pubsub.NewServer() - err := s.Start() - require.NoError(b, err) - b.Cleanup(func() { - if err := s.Stop(); err != nil { - b.Error(err) - } - }) - - ctx := context.Background() - q := query.MustCompile("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1") - for i := 0; i < n; i++ { - id := fmt.Sprintf("clientID-%d", i+1) - subscription, err := s.Subscribe(ctx, id, q) - if err != nil { - b.Fatal(err) - } - go func() { - for { - select { - case <-subscription.Out(): - continue - case <-subscription.Canceled(): - return - } - } - }() - } - - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - events := []abci.Event{ - { - Type: "abci.Account", - Attributes: []abci.EventAttribute{{Key: "Owner", Value: "Ivan"}}, - }, - { - Type: "abci.Invoices", - Attributes: []abci.EventAttribute{{Key: "Number", Value: "1"}}, - }, - } - - require.NoError(b, s.PublishWithEvents(ctx, "Gamora", events)) - } -} - -// HELPERS - -func assertReceive(t *testing.T, expected interface{}, ch <-chan pubsub.Message, msgAndArgs ...interface{}) { - select { - case actual := <-ch: - require.Equal(t, expected, actual.Data(), msgAndArgs...) - case <-time.After(1 * time.Second): - t.Errorf("expected to receive %v from the channel, got nothing after 1s", expected) - debug.PrintStack() - } -} - -func assertCanceled(t *testing.T, subscription *pubsub.Subscription, err error) { - _, ok := <-subscription.Canceled() - require.False(t, ok) - require.Equal(t, err, subscription.Err()) -} diff --git a/libs/pubsub/subscription.go b/libs/pubsub/subscription.go deleted file mode 100644 index 16c50e4d64..0000000000 --- a/libs/pubsub/subscription.go +++ /dev/null @@ -1,112 +0,0 @@ -package pubsub - -import ( - "errors" - "fmt" - - "github.com/google/uuid" - "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -var ( - // ErrUnsubscribed is returned by Err when a client unsubscribes. - ErrUnsubscribed = errors.New("client unsubscribed") - - // ErrOutOfCapacity is returned by Err when a client is not pulling messages - // fast enough. Note the client's subscription will be terminated. - ErrOutOfCapacity = errors.New("internal subscription event buffer is out of capacity") -) - -// A Subscription represents a client subscription for a particular query and -// consists of three things: -// 1) channel onto which messages and events are published -// 2) channel which is closed if a client is too slow or choose to unsubscribe -// 3) err indicating the reason for (2) -type Subscription struct { - id string - out chan Message - - canceled chan struct{} - mtx tmsync.RWMutex - err error -} - -// NewSubscription returns a new subscription with the given outCapacity. -func NewSubscription(outCapacity int) *Subscription { - return &Subscription{ - id: uuid.NewString(), - out: make(chan Message, outCapacity), - canceled: make(chan struct{}), - } -} - -// Out returns a channel onto which messages and events are published. -// Unsubscribe/UnsubscribeAll does not close the channel to avoid clients from -// receiving a nil message. -func (s *Subscription) Out() <-chan Message { - return s.out -} - -func (s *Subscription) ID() string { return s.id } - -// Canceled returns a channel that's closed when the subscription is -// terminated and supposed to be used in a select statement. -func (s *Subscription) Canceled() <-chan struct{} { - return s.canceled -} - -// Err returns nil if the channel returned by Canceled is not yet closed. -// If the channel is closed, Err returns a non-nil error explaining why: -// - ErrUnsubscribed if the subscriber choose to unsubscribe, -// - ErrOutOfCapacity if the subscriber is not pulling messages fast enough -// and the channel returned by Out became full, -// After Err returns a non-nil error, successive calls to Err return the same -// error. -func (s *Subscription) Err() error { - s.mtx.RLock() - defer s.mtx.RUnlock() - return s.err -} - -func (s *Subscription) cancel(err error) { - s.mtx.Lock() - defer s.mtx.Unlock() - defer func() { - perr := recover() - if err == nil && perr != nil { - err = fmt.Errorf("problem closing subscription: %v", perr) - } - }() - - if s.err == nil && err != nil { - s.err = err - } - - close(s.canceled) -} - -// Message glues data and events together. -type Message struct { - subID string - data interface{} - events []types.Event -} - -func NewMessage(subID string, data interface{}, events []types.Event) Message { - return Message{ - subID: subID, - data: data, - events: events, - } -} - -// SubscriptionID returns the unique identifier for the subscription -// that produced this message. -func (msg Message) SubscriptionID() string { return msg.subID } - -// Data returns an original data published. -func (msg Message) Data() interface{} { return msg.data } - -// Events returns events, which matched the client's query. -func (msg Message) Events() []types.Event { return msg.events } diff --git a/libs/rand/random.go b/libs/rand/random.go index b7c2dd0797..6b486a7fdf 100644 --- a/libs/rand/random.go +++ b/libs/rand/random.go @@ -1,9 +1,6 @@ package rand import ( - crand "crypto/rand" - "encoding/binary" - "fmt" mrand "math/rand" ) @@ -11,37 +8,6 @@ const ( strChars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" // 62 characters ) -func init() { - Reseed() -} - -// NewRand returns a prng, that is seeded with OS randomness. -// The OS randomness is obtained from crypto/rand, however, like with any math/rand.Rand -// object none of the provided methods are suitable for cryptographic usage. -// -// Note that the returned instance of math/rand's Rand is not -// suitable for concurrent use by multiple goroutines. -// -// For concurrent use, call Reseed to reseed math/rand's default source and -// use math/rand's top-level convenience functions instead. -func NewRand() *mrand.Rand { - seed := crandSeed() - // nolint:gosec // G404: Use of weak random number generator - return mrand.New(mrand.NewSource(seed)) -} - -// Reseed conveniently re-seeds the default Source of math/rand with -// randomness obtained from crypto/rand. -// -// Note that this does not make math/rand suitable for cryptographic usage. -// -// Use math/rand's top-level convenience functions remain suitable -// for concurrent use by multiple goroutines. -func Reseed() { - seed := crandSeed() - mrand.Seed(seed) -} - // Str constructs a random alphanumeric string of given length // from math/rand's global default Source. func Str(length int) string { return buildString(length, mrand.Int63) } @@ -83,12 +49,3 @@ func Bytes(n int) []byte { } return bs } - -func crandSeed() int64 { - var seed int64 - err := binary.Read(crand.Reader, binary.BigEndian, &seed) - if err != nil { - panic(fmt.Sprintf("could nor read random seed from crypto/rand: %v", err)) - } - return seed -} diff --git a/libs/service/service.go b/libs/service/service.go index 0af2439950..3ce08e7dab 100644 --- a/libs/service/service.go +++ b/libs/service/service.go @@ -1,74 +1,55 @@ package service import ( + "context" "errors" - "fmt" - "sync/atomic" + "sync" "github.com/tendermint/tendermint/libs/log" ) var ( - // ErrAlreadyStarted is returned when somebody tries to start an already - // running service. - ErrAlreadyStarted = errors.New("already started") - // ErrAlreadyStopped is returned when somebody tries to stop an already + // errAlreadyStopped is returned when somebody tries to stop an already // stopped service (without resetting it). - ErrAlreadyStopped = errors.New("already stopped") - // ErrNotStarted is returned when somebody tries to stop a not running - // service. - ErrNotStarted = errors.New("not started") + errAlreadyStopped = errors.New("already stopped") + + _ Service = (*BaseService)(nil) ) // Service defines a service that can be started, stopped, and reset. type Service interface { - // Start the service. - // If it's already started or stopped, will return an error. - // If OnStart() returns an error, it's returned by Start() - Start() error - OnStart() error - - // Stop the service. - // If it's already stopped, will return an error. - // OnStop must never error. - Stop() error - OnStop() - - // Reset the service. - // Panics by default - must be overwritten to enable reset. - Reset() error - OnReset() error + // Start is called to start the service, which should run until + // the context terminates. If the service is already running, Start + // must report an error. + Start(context.Context) error // Return true if the service is running IsRunning() bool - // Quit returns a channel, which is closed once service is stopped. - Quit() <-chan struct{} - - // String representation of the service - String() string - - // SetLogger sets a logger. - SetLogger(log.Logger) - // Wait blocks until the service is stopped. Wait() } +// Implementation describes the implementation that the +// BaseService implementation wraps. +type Implementation interface { + // Called by the Services Start Method + OnStart(context.Context) error + + // Called when the service's context is canceled. + OnStop() +} + /* Classical-inheritance-style service declarations. Services can be started, then -stopped, then optionally restarted. +stopped, but cannot be restarted. -Users can override the OnStart/OnStop methods. In the absence of errors, these +Users must implement OnStart/OnStop methods. In the absence of errors, these methods are guaranteed to be called at most once. If OnStart returns an error, service won't be marked as started, so the user can call Start again. -A call to Reset will panic, unless OnReset is overwritten, allowing -OnStart/OnStop to be called again. - -The caller must ensure that Start and Stop are not called concurrently. - -It is ok to call Stop without calling Start first. +The BaseService implementation ensures that the OnStop method is +called after the context passed to Start is canceled. Typical usage: @@ -85,141 +66,135 @@ Typical usage: return fs } - func (fs *FooService) OnStart() error { - fs.BaseService.OnStart() // Always call the overridden method. + func (fs *FooService) OnStart(ctx context.Context) error { // initialize private fields // start subroutines, etc. } - func (fs *FooService) OnStop() error { - fs.BaseService.OnStop() // Always call the overridden method. - // close/destroy private fields - // stop subroutines, etc. + func (fs *FooService) OnStop() { + // close/destroy private fields and releases resources } */ type BaseService struct { - Logger log.Logger - name string - started uint32 // atomic - stopped uint32 // atomic - quit chan struct{} + logger log.Logger + name string + mtx sync.Mutex + quit <-chan (struct{}) + cancel context.CancelFunc // The "subclass" of BaseService - impl Service + impl Implementation } // NewBaseService creates a new BaseService. -func NewBaseService(logger log.Logger, name string, impl Service) *BaseService { - if logger == nil { - logger = log.NewNopLogger() - } - +func NewBaseService(logger log.Logger, name string, impl Implementation) *BaseService { return &BaseService{ - Logger: logger, + logger: logger, name: name, - quit: make(chan struct{}), impl: impl, } } -// SetLogger implements Service by setting a logger. -func (bs *BaseService) SetLogger(l log.Logger) { - bs.Logger = l -} - -// Start implements Service by calling OnStart (if defined). An error will be -// returned if the service is already running or stopped. Not to start the -// stopped service, you need to call Reset. -func (bs *BaseService) Start() error { - if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { - if atomic.LoadUint32(&bs.stopped) == 1 { - bs.Logger.Error("not starting service; already stopped", "service", bs.name, "impl", bs.impl.String()) - atomic.StoreUint32(&bs.started, 0) - return ErrAlreadyStopped - } - - bs.Logger.Info("starting service", "service", bs.name, "impl", bs.impl.String()) +// Start starts the Service and calls its OnStart method. An error +// will be returned if the service is stopped, but not if it is +// already running. +func (bs *BaseService) Start(ctx context.Context) error { + bs.mtx.Lock() + defer bs.mtx.Unlock() - if err := bs.impl.OnStart(); err != nil { - // revert flag - atomic.StoreUint32(&bs.started, 0) - return err - } + if bs.quit != nil { return nil } - bs.Logger.Debug("not starting service; already started", "service", bs.name, "impl", bs.impl.String()) - return ErrAlreadyStarted -} - -// OnStart implements Service by doing nothing. -// NOTE: Do not put anything in here, -// that way users don't need to call BaseService.OnStart() -func (bs *BaseService) OnStart() error { return nil } - -// Stop implements Service by calling OnStop (if defined) and closing quit -// channel. An error will be returned if the service is already stopped. -func (bs *BaseService) Stop() error { - if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { - if atomic.LoadUint32(&bs.started) == 0 { - bs.Logger.Error("not stopping service; not started yet", "service", bs.name, "impl", bs.impl.String()) - atomic.StoreUint32(&bs.stopped, 0) - return ErrNotStarted + select { + case <-bs.quit: + return errAlreadyStopped + default: + bs.logger.Info("starting service", "service", bs.name, "impl", bs.name) + if err := bs.impl.OnStart(ctx); err != nil { + return err } - bs.Logger.Info("stopping service", "service", bs.name, "impl", bs.impl.String()) - bs.impl.OnStop() - close(bs.quit) + // we need a separate context to ensure that we start + // a thread that will get cleaned up and that the + // Stop/Wait functions work as expected. + srvCtx, cancel := context.WithCancel(context.Background()) + bs.cancel = cancel + bs.quit = srvCtx.Done() + + go func(ctx context.Context) { + select { + case <-srvCtx.Done(): + // this means stop was called manually + return + case <-ctx.Done(): + bs.Stop() + } + + bs.logger.Info("stopped service", + "service", bs.name) + }(ctx) return nil } - - bs.Logger.Debug("not stopping service; already stopped", "service", bs.name, "impl", bs.impl.String()) - return ErrAlreadyStopped } -// OnStop implements Service by doing nothing. -// NOTE: Do not put anything in here, -// that way users don't need to call BaseService.OnStop() -func (bs *BaseService) OnStop() {} - -// Reset implements Service by calling OnReset callback (if defined). An error -// will be returned if the service is running. -func (bs *BaseService) Reset() error { - if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { - bs.Logger.Debug("cannot reset service; not stopped", "service", bs.name, "impl", bs.impl.String()) - return fmt.Errorf("can't reset running %s", bs.name) - } +// Stop manually terminates the service by calling OnStop method from +// the implementation and releases all resources related to the +// service. +func (bs *BaseService) Stop() { + bs.mtx.Lock() + defer bs.mtx.Unlock() - // whether or not we've started, we can reset - atomic.CompareAndSwapUint32(&bs.started, 1, 0) + if bs.quit == nil { + return + } - bs.quit = make(chan struct{}) - return bs.impl.OnReset() -} + select { + case <-bs.quit: + return + default: + bs.logger.Info("stopping service", "service", bs.name) + bs.impl.OnStop() + bs.cancel() -// OnReset implements Service by panicking. -func (bs *BaseService) OnReset() error { - panic("The service cannot be reset") + return + } } // IsRunning implements Service by returning true or false depending on the // service's state. func (bs *BaseService) IsRunning() bool { - return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0 -} + bs.mtx.Lock() + defer bs.mtx.Unlock() -// Wait blocks until the service is stopped. -func (bs *BaseService) Wait() { - <-bs.quit -} + if bs.quit == nil { + return false + } -// String implements Service by returning a string representation of the service. -func (bs *BaseService) String() string { - return bs.name + select { + case <-bs.quit: + return false + default: + return true + } } -// Quit Implements Service by returning a quit channel. -func (bs *BaseService) Quit() <-chan struct{} { +func (bs *BaseService) getWait() <-chan struct{} { + bs.mtx.Lock() + defer bs.mtx.Unlock() + + if bs.quit == nil { + out := make(chan struct{}) + close(out) + return out + } + return bs.quit } + +// Wait blocks until the service is stopped. +func (bs *BaseService) Wait() { <-bs.getWait() } + +// String provides a human-friendly representation of the service. +func (bs *BaseService) String() string { return bs.name } diff --git a/libs/service/service_test.go b/libs/service/service_test.go index 7abc6f4fba..d0b8ce57e3 100644 --- a/libs/service/service_test.go +++ b/libs/service/service_test.go @@ -1,57 +1,137 @@ package service import ( + "context" + "sync" "testing" "time" + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/libs/log" ) type testService struct { + started bool + stopped bool + multiStopped bool + mu sync.Mutex BaseService } -func (testService) OnReset() error { +func (t *testService) OnStop() { + t.mu.Lock() + defer t.mu.Unlock() + if t.stopped == true { + t.multiStopped = true + } + t.stopped = true +} +func (t *testService) OnStart(context.Context) error { + t.mu.Lock() + defer t.mu.Unlock() + + t.started = true return nil } -func TestBaseServiceWait(t *testing.T) { - ts := &testService{} - ts.BaseService = *NewBaseService(nil, "TestService", ts) - err := ts.Start() - require.NoError(t, err) - - waitFinished := make(chan struct{}) - go func() { - ts.Wait() - waitFinished <- struct{}{} - }() - - go ts.Stop() //nolint:errcheck // ignore for tests - - select { - case <-waitFinished: - // all good - case <-time.After(100 * time.Millisecond): - t.Fatal("expected Wait() to finish within 100 ms.") - } +func (t *testService) isStarted() bool { + t.mu.Lock() + defer t.mu.Unlock() + return t.started } -func TestBaseServiceReset(t *testing.T) { - ts := &testService{} - ts.BaseService = *NewBaseService(nil, "TestService", ts) - err := ts.Start() - require.NoError(t, err) +func (t *testService) isStopped() bool { + t.mu.Lock() + defer t.mu.Unlock() + return t.stopped +} + +func (t *testService) isMultiStopped() bool { + t.mu.Lock() + defer t.mu.Unlock() + return t.multiStopped +} + +func TestBaseService(t *testing.T) { + t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + t.Run("Wait", func(t *testing.T) { + wctx, wcancel := context.WithCancel(ctx) + defer wcancel() + ts := &testService{} + ts.BaseService = *NewBaseService(logger, t.Name(), ts) + err := ts.Start(wctx) + require.NoError(t, err) + require.True(t, ts.isStarted()) + + waitFinished := make(chan struct{}) + wcancel() + go func() { + ts.Wait() + close(waitFinished) + }() + + select { + case <-waitFinished: + assert.True(t, ts.isStopped(), "failed to stop") + assert.False(t, ts.IsRunning(), "is not running") + + case <-time.After(100 * time.Millisecond): + t.Fatal("expected Wait() to finish within 100 ms.") + } + }) + t.Run("ManualStop", func(t *testing.T) { + ts := &testService{} + ts.BaseService = *NewBaseService(logger, t.Name(), ts) + require.False(t, ts.IsRunning()) + require.False(t, ts.isStarted()) + require.NoError(t, ts.Start(ctx)) + + require.True(t, ts.isStarted()) + + ts.Stop() + require.True(t, ts.isStopped()) + require.False(t, ts.IsRunning()) + }) + t.Run("MultiStop", func(t *testing.T) { + t.Run("SingleThreaded", func(t *testing.T) { + ts := &testService{} + ts.BaseService = *NewBaseService(logger, t.Name(), ts) + + require.NoError(t, ts.Start(ctx)) + require.True(t, ts.isStarted()) + ts.Stop() + require.True(t, ts.isStopped()) + require.False(t, ts.isMultiStopped()) + ts.Stop() + require.False(t, ts.isMultiStopped()) + }) + t.Run("MultiThreaded", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ts := &testService{} + ts.BaseService = *NewBaseService(logger, t.Name(), ts) + + require.NoError(t, ts.Start(ctx)) + require.True(t, ts.isStarted()) + + go ts.Stop() + go cancel() - err = ts.Reset() - require.Error(t, err, "expected cant reset service error") + ts.Wait() - err = ts.Stop() - require.NoError(t, err) + require.True(t, ts.isStopped()) + require.False(t, ts.isMultiStopped()) + }) - err = ts.Reset() - require.NoError(t, err) + }) - err = ts.Start() - require.NoError(t, err) } diff --git a/libs/strings/string.go b/libs/strings/string.go index b09c00063a..95ea03b5a6 100644 --- a/libs/strings/string.go +++ b/libs/strings/string.go @@ -28,50 +28,12 @@ func SplitAndTrimEmpty(s, sep, cutset string) []string { return nonEmptyStrings } -// StringInSlice returns true if a is found the list. -func StringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - -// SplitAndTrim slices s into all subslices separated by sep and returns a -// slice of the string s with all leading and trailing Unicode code points -// contained in cutset removed. If sep is empty, SplitAndTrim splits after each -// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of -// -1. -func SplitAndTrim(s, sep, cutset string) []string { - if s == "" { - return []string{} - } - - spl := strings.Split(s, sep) - for i := 0; i < len(spl); i++ { - spl[i] = strings.Trim(spl[i], cutset) - } - return spl -} - -// Returns true if s is a non-empty printable non-tab ascii character. -func IsASCIIText(s string) bool { +// ASCIITrim removes spaces from an a ASCII string, erroring if the +// sequence is not an ASCII string. +func ASCIITrim(s string) (string, error) { if len(s) == 0 { - return false - } - for _, b := range []byte(s) { - if 32 <= b && b <= 126 { - // good - } else { - return false - } + return "", nil } - return true -} - -// NOTE: Assumes that s is ASCII as per IsASCIIText(), otherwise panics. -func ASCIITrim(s string) string { r := make([]byte, 0, len(s)) for _, b := range []byte(s) { switch { @@ -80,10 +42,10 @@ func ASCIITrim(s string) string { case 32 < b && b <= 126: r = append(r, b) default: - panic(fmt.Sprintf("non-ASCII (non-tab) char 0x%X", b)) + return "", fmt.Errorf("non-ASCII (non-tab) char 0x%X", b) } } - return string(r) + return string(r), nil } // StringSliceEqual checks if string slices a and b are equal diff --git a/libs/strings/string_test.go b/libs/strings/string_test.go index c561163934..79caf5901f 100644 --- a/libs/strings/string_test.go +++ b/libs/strings/string_test.go @@ -25,34 +25,48 @@ func TestSplitAndTrimEmpty(t *testing.T) { } } -func TestStringInSlice(t *testing.T) { - require.True(t, StringInSlice("a", []string{"a", "b", "c"})) - require.False(t, StringInSlice("d", []string{"a", "b", "c"})) - require.True(t, StringInSlice("", []string{""})) - require.False(t, StringInSlice("", []string{})) -} - -func TestIsASCIIText(t *testing.T) { - notASCIIText := []string{ - "", "\xC2", "\xC2\xA2", "\xFF", "\x80", "\xF0", "\n", "\t", - } - for _, v := range notASCIIText { - require.False(t, IsASCIIText(v), "%q is not ascii-text", v) - } - asciiText := []string{ - " ", ".", "x", "$", "_", "abcdefg;", "-", "0x00", "0", "123", - } - for _, v := range asciiText { - require.True(t, IsASCIIText(v), "%q is ascii-text", v) - } +func assertCorrectTrim(t *testing.T, input, expected string) { + t.Helper() + output, err := ASCIITrim(input) + require.NoError(t, err) + require.Equal(t, expected, output) } func TestASCIITrim(t *testing.T) { - require.Equal(t, ASCIITrim(" "), "") - require.Equal(t, ASCIITrim(" a"), "a") - require.Equal(t, ASCIITrim("a "), "a") - require.Equal(t, ASCIITrim(" a "), "a") - require.Panics(t, func() { ASCIITrim("\xC2\xA2") }) + t.Run("Validation", func(t *testing.T) { + t.Run("NonASCII", func(t *testing.T) { + notASCIIText := []string{ + "\xC2", "\xC2\xA2", "\xFF", "\x80", "\xF0", "\n", "\t", + } + for _, v := range notASCIIText { + _, err := ASCIITrim(v) + require.Error(t, err, "%q is not ascii-text", v) + } + }) + t.Run("EmptyString", func(t *testing.T) { + out, err := ASCIITrim("") + require.NoError(t, err) + require.Zero(t, out) + }) + t.Run("ASCIIText", func(t *testing.T) { + asciiText := []string{ + " ", ".", "x", "$", "_", "abcdefg;", "-", "0x00", "0", "123", + } + for _, v := range asciiText { + _, err := ASCIITrim(v) + require.NoError(t, err, "%q is ascii-text", v) + } + }) + _, err := ASCIITrim("\xC2\xA2") + require.Error(t, err) + }) + t.Run("Trimming", func(t *testing.T) { + assertCorrectTrim(t, " ", "") + assertCorrectTrim(t, " a", "a") + assertCorrectTrim(t, "a ", "a") + assertCorrectTrim(t, " a ", "a") + }) + } func TestStringSliceEqual(t *testing.T) { diff --git a/libs/sync/atomic_bool.go b/libs/sync/atomic_bool.go deleted file mode 100644 index 1a530b5968..0000000000 --- a/libs/sync/atomic_bool.go +++ /dev/null @@ -1,33 +0,0 @@ -package sync - -import "sync/atomic" - -// AtomicBool is an atomic Boolean. -// Its methods are all atomic, thus safe to be called by multiple goroutines simultaneously. -// Note: When embedding into a struct one should always use *AtomicBool to avoid copy. -// it's a simple implmentation from https://github.com/tevino/abool -type AtomicBool int32 - -// NewBool creates an AtomicBool with given default value. -func NewBool(ok bool) *AtomicBool { - ab := new(AtomicBool) - if ok { - ab.Set() - } - return ab -} - -// Set sets the Boolean to true. -func (ab *AtomicBool) Set() { - atomic.StoreInt32((*int32)(ab), 1) -} - -// UnSet sets the Boolean to false. -func (ab *AtomicBool) UnSet() { - atomic.StoreInt32((*int32)(ab), 0) -} - -// IsSet returns whether the Boolean is true. -func (ab *AtomicBool) IsSet() bool { - return atomic.LoadInt32((*int32)(ab))&1 == 1 -} diff --git a/libs/sync/atomic_bool_test.go b/libs/sync/atomic_bool_test.go deleted file mode 100644 index 9531815e8e..0000000000 --- a/libs/sync/atomic_bool_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package sync - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestDefaultValue(t *testing.T) { - t.Parallel() - v := NewBool(false) - assert.False(t, v.IsSet()) - - v = NewBool(true) - assert.True(t, v.IsSet()) -} - -func TestSetUnSet(t *testing.T) { - t.Parallel() - v := NewBool(false) - - v.Set() - assert.True(t, v.IsSet()) - - v.UnSet() - assert.False(t, v.IsSet()) -} diff --git a/libs/time/mocks/source.go b/libs/time/mocks/source.go new file mode 100644 index 0000000000..7878d86f51 --- /dev/null +++ b/libs/time/mocks/source.go @@ -0,0 +1,40 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + testing "testing" + + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// Source is an autogenerated mock type for the Source type +type Source struct { + mock.Mock +} + +// Now provides a mock function with given fields: +func (_m *Source) Now() time.Time { + ret := _m.Called() + + var r0 time.Time + if rf, ok := ret.Get(0).(func() time.Time); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Time) + } + + return r0 +} + +// NewSource creates a new instance of Source. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewSource(t testing.TB) *Source { + mock := &Source{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/libs/time/time.go b/libs/time/time.go index 786f9bbb42..7ab45d8f14 100644 --- a/libs/time/time.go +++ b/libs/time/time.go @@ -15,3 +15,17 @@ func Now() time.Time { func Canonical(t time.Time) time.Time { return t.Round(0).UTC() } + +//go:generate ../../scripts/mockery_generate.sh Source + +// Source is an interface that defines a way to fetch the current time. +type Source interface { + Now() time.Time +} + +// DefaultSource implements the Source interface using the system clock provided by the standard library. +type DefaultSource struct{} + +func (DefaultSource) Now() time.Time { + return Now() +} diff --git a/light/client.go b/light/client.go index 456cbe1c8a..b729c8ccf8 100644 --- a/light/client.go +++ b/light/client.go @@ -10,9 +10,7 @@ import ( "time" "github.com/tendermint/tendermint/crypto" - dashcore "github.com/tendermint/tendermint/dashcore/rpc" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + dashcore "github.com/tendermint/tendermint/dash/core" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light/provider" "github.com/tendermint/tendermint/light/store" @@ -97,7 +95,7 @@ type Client struct { maxBlockLag time.Duration // Mutex for locking during changes of the light clients providers - providerMutex tmsync.Mutex + providerMutex sync.Mutex // Primary provider of new headers. primary provider.Provider // Providers used to "witness" new headers. @@ -112,19 +110,34 @@ type Client struct { pruningSize uint16 // Rpc client connected to dashd - dashCoreRPCClient dashcore.Client + dashCoreRPCClient dashcore.QuorumVerifier logger log.Logger } +func validatePrimaryAndWitnesses(primary provider.Provider, witnesses []provider.Provider) error { + witnessMap := make(map[string]struct{}) + for _, w := range witnesses { + if w.ID() == primary.ID() { + return fmt.Errorf("primary (%s) cannot be also configured as witness", primary.ID()) + } + if _, duplicate := witnessMap[w.ID()]; duplicate { + return fmt.Errorf("witness list must not contain duplicates; duplicate found: %s", w.ID()) + } + witnessMap[w.ID()] = struct{}{} + } + return nil +} + // NewClient returns a new light client. It returns an error if it fails to -// obtain the light block from the primary or they are invalid (e.g. trust +// obtain the light block from the primary, or they are invalid (e.g. trust // hash does not match with the one from the headers). // // Witnesses are providers, which will be used for cross-checking the primary -// provider. At least one witness must be given when skipping verification is -// used (default). A witness can become a primary iff the current primary is -// unavailable. +// provider. At least one witness should be given when skipping verification is +// used (default). A verified header is compared with the headers at same height +// obtained from the specified witnesses. A witness can become a primary iff the +// current primary is unavailable. // // See all Option(s) for the additional configuration. func NewClient( @@ -133,7 +146,7 @@ func NewClient( primary provider.Provider, witnesses []provider.Provider, trustedStore store.Store, - dashCoreRPCClient dashcore.Client, + dashCoreRPCClient dashcore.QuorumVerifier, options ...Option) (*Client, error) { return NewClientAtHeight(ctx, 0, chainID, primary, witnesses, trustedStore, dashCoreRPCClient, options...) @@ -146,8 +159,13 @@ func NewClientAtHeight( primary provider.Provider, witnesses []provider.Provider, trustedStore store.Store, - dashCoreRPCClient dashcore.Client, - options ...Option) (*Client, error) { + dashCoreRPCClient dashcore.QuorumVerifier, + options ...Option, +) (*Client, error) { + // Check that the witness list does not include duplicates or the primary + if err := validatePrimaryAndWitnesses(primary, witnesses); err != nil { + return nil, err + } c, err := NewClientFromTrustedStore(chainID, primary, witnesses, trustedStore, dashCoreRPCClient, options...) if err != nil { @@ -171,13 +189,18 @@ func NewClientFromTrustedStore( primary provider.Provider, witnesses []provider.Provider, trustedStore store.Store, - dashCoreRPCClient dashcore.Client, + dashCoreRPCClient dashcore.QuorumVerifier, options ...Option) (*Client, error) { if dashCoreRPCClient == nil { return nil, ErrNoDashCoreClient } + // Check that the witness list does not include duplicates or the primary + if err := validatePrimaryAndWitnesses(primary, witnesses); err != nil { + return nil, err + } + c := &Client{ chainID: chainID, verificationMode: dashCoreVerification, @@ -196,11 +219,6 @@ func NewClientFromTrustedStore( o(c) } - // Validate the number of witnesses. - if len(c.witnesses) == 0 { - return nil, ErrNoWitnesses - } - if err := c.restoreTrustedLightBlock(); err != nil { return nil, err } @@ -394,7 +412,7 @@ func (c *Client) VerifyLightBlockAtHeight(ctx context.Context, height int64, now // headers are not adjacent, verifySkipping is performed and necessary (not all) // intermediate headers will be requested. See the specification for details. // Intermediate headers are not saved to database. -// https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md +// https://github.com/tendermint/tendermint/blob/master/spec/light-client/README.md // // If the header, which is older than the currently trusted header, is // requested and the light client does not have it, VerifyHeader will perform: @@ -486,12 +504,7 @@ func (c *Client) verifyBlockWithDashCore(ctx context.Context, newLightBlock *typ if err := c.verifyBlockSignatureWithDashCore(ctx, newLightBlock); err != nil { return err } - - if err := c.verifyStateIDSignatureWithDashCore(ctx, newLightBlock); err != nil { - return err - } - - return nil + return c.verifyStateIDSignatureWithDashCore(ctx, newLightBlock) } func (c *Client) verifyBlockSignatureWithDashCore(ctx context.Context, newLightBlock *types.LightBlock) error { @@ -503,7 +516,7 @@ func (c *Client) verifyBlockSignatureWithDashCore(ctx context.Context, newLightB blockSignBytes := types.VoteBlockSignBytes(c.chainID, protoVote) - blockMessageHash := crypto.Sha256(blockSignBytes) + blockMessageHash := crypto.Checksum(blockSignBytes) blockRequestID := types.VoteBlockRequestIDProto(protoVote) blockSignatureIsValid, err := c.dashCoreRPCClient.QuorumVerify( @@ -535,7 +548,7 @@ func (c *Client) verifyStateIDSignatureWithDashCore(ctx context.Context, newLigh stateSignBytes := stateID.SignBytes(c.chainID) - stateMessageHash := crypto.Sha256(stateSignBytes) + stateMessageHash := crypto.Checksum(stateSignBytes) stateRequestID := stateID.SignRequestID() stateSignature := newLightBlock.Commit.ThresholdStateSignature @@ -709,7 +722,6 @@ func (c *Client) getLightBlock(ctx context.Context, p provider.Provider, height // NOTE: requires a providerMutex lock func (c *Client) removeWitnesses(indexes []int) error { - // check that we will still have witnesses remaining if len(c.witnesses) <= len(indexes) { return ErrNoWitnesses } @@ -831,8 +843,8 @@ func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.S c.providerMutex.Lock() defer c.providerMutex.Unlock() - if len(c.witnesses) < 1 { - return ErrNoWitnesses + if len(c.witnesses) == 0 { + return nil } errc := make(chan error, len(c.witnesses)) @@ -853,7 +865,8 @@ func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.S c.logger.Error(fmt.Sprintf("witness #%d has a different header. Please check primary is correct and"+ " remove witness. Otherwise, use the different primary", e.WitnessIndex), "witness", c.witnesses[e.WitnessIndex]) - return err + // if attempt to generate conflicting headers failed then remove witness + witnessesToRemove = append(witnessesToRemove, e.WitnessIndex) case errBadWitness: // If witness sent us an invalid header, then remove it c.logger.Info("witness sent an invalid light block, removing...", @@ -875,3 +888,30 @@ func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.S // remove all witnesses that misbehaved return c.removeWitnesses(witnessesToRemove) } + +func (c *Client) Status(ctx context.Context) *types.LightClientInfo { + chunks := make([]string, len(c.witnesses)) + + // If primary is in witness list we do not want to count it twice in the number of peers + primaryNotInWitnessList := 1 + for i, val := range c.witnesses { + chunks[i] = val.ID() + if chunks[i] == c.primary.ID() { + primaryNotInWitnessList = 0 + } + } + + info := &types.LightClientInfo{ + PrimaryID: c.primary.ID(), + WitnessesID: chunks, + NumPeers: len(chunks) + primaryNotInWitnessList, + } + + if c.latestTrustedBlock != nil { + info.LastTrustedHeight = c.latestTrustedBlock.Height + info.LastTrustedHash = c.latestTrustedBlock.Hash() + info.LatestBlockTime = c.latestTrustedBlock.Time + } + + return info +} diff --git a/light/client_benchmark_test.go b/light/client_benchmark_test.go index 95d35b73e6..e05f943e37 100644 --- a/light/client_benchmark_test.go +++ b/light/client_benchmark_test.go @@ -2,12 +2,13 @@ package light_test import ( "context" + "errors" "testing" "time" dbm "github.com/tendermint/tm-db" - dashcore "github.com/tendermint/tendermint/dashcore/rpc" + dashcore "github.com/tendermint/tendermint/dash/core" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light" "github.com/tendermint/tendermint/light/provider" @@ -56,33 +57,34 @@ func (impl *providerBenchmarkImpl) LightBlock(ctx context.Context, height int64) } func (impl *providerBenchmarkImpl) ReportEvidence(_ context.Context, _ types.Evidence) error { - panic("not implemented") + return errors.New("not implemented") } -func setupDashCoreRPCMockForBenchmark(b *testing.B, validator types.PrivValidator) { - dashCoreMockClient = dashcore.NewMockClient(chainID, 100, validator, true) +// provierBenchmarkImpl does not have an ID iteself. +// Thus we return a sample string +func (impl *providerBenchmarkImpl) ID() string { return "ip-not-defined.com" } - b.Cleanup(func() { - dashCoreMockClient = nil - }) -} +func BenchmarkSequence(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() -func BenchmarkDashCore(b *testing.B) { - headers, vals, privvals := genLightBlocksWithValidatorsRotatingEveryBlock(chainID, 1000, 100, bTime) + headers, vals, privvals := genLightBlocksWithValidatorsRotatingEveryBlock(b, chainID, 1000, 100, bTime) benchmarkFullNode := newProviderBenchmarkImpl(headers, vals) + logger := log.NewTestingLogger(b) + privval := privvals[0] - setupDashCoreRPCMockForBenchmark(b, privval) + dashCoreMockClient := dashcore.NewMockClient(chainID, 100, privval, true) c, err := light.NewClient( - context.Background(), + ctx, chainID, benchmarkFullNode, - []provider.Provider{benchmarkFullNode}, + nil, dbs.New(dbm.NewMemDB()), dashCoreMockClient, - light.Logger(log.TestingLogger()), + light.Logger(logger), ) if err != nil { b.Fatal(err) @@ -90,7 +92,7 @@ func BenchmarkDashCore(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { - _, err = c.VerifyLightBlockAtHeight(context.Background(), 1000, bTime.Add(1000*time.Minute)) + _, err = c.VerifyLightBlockAtHeight(ctx, 1000, bTime.Add(1000*time.Minute)) if err != nil { b.Fatal(err) } diff --git a/light/client_test.go b/light/client_test.go index 405661a1f5..0baedb1fd5 100644 --- a/light/client_test.go +++ b/light/client_test.go @@ -11,14 +11,15 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - dashcore "github.com/tendermint/tendermint/dashcore/rpc" + dbm "github.com/tendermint/tm-db" + + dashcore "github.com/tendermint/tendermint/dash/core" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light" "github.com/tendermint/tendermint/light/provider" provider_mocks "github.com/tendermint/tendermint/light/provider/mocks" dbs "github.com/tendermint/tendermint/light/store/db" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) const ( @@ -26,569 +27,629 @@ const ( llmqType = 100 ) -var ( - vals, privVals = types.RandValidatorSet(4) - keys = exposeMockPVKeys(privVals, vals.QuorumHash) - ctx = context.Background() - bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") - h1 = keys.GenSignedHeader(chainID, 1, bTime, nil, vals, vals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) - // 3/3 signed - h2 = keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h1.Hash()}) - // 3/3 signed - h3 = keys.GenSignedHeaderLastBlockID(chainID, 3, bTime.Add(1*time.Hour), nil, vals, vals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h2.Hash()}) - valSet = map[int64]*types.ValidatorSet{ - 1: vals, - 2: vals, - 3: vals, - 4: vals, - } - headerSet = map[int64]*types.SignedHeader{ - 1: h1, - // interim header (3/3 signed) - 2: h2, - // last header (3/3 signed) - 3: h3, - } - l1 = &types.LightBlock{SignedHeader: h1, ValidatorSet: vals} - l2 = &types.LightBlock{SignedHeader: h2, ValidatorSet: vals} - l3 = &types.LightBlock{SignedHeader: h3, ValidatorSet: vals} - - dashCoreMockClient dashcore.Client -) +var bTime time.Time -func setupDashCoreMockClient(t *testing.T) { - dashCoreMockClient = dashcore.NewMockClient(chainID, llmqType, privVals[0], true) +func init() { + var err error + bTime, err = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") + if err != nil { + panic(err) + } } -// // start from a large light block to make sure that the pivot height doesn't select a height outside -// // the appropriate range -// func TestClientLargeBisectionVerification(t *testing.T) { -// veryLargeFullNode := mockp.New(genMockNode(chainID, 100, 3, 0, bTime)) -// trustedLightBlock, err := veryLargeFullNode.LightBlock(ctx, 5) -// require.NoError(t, err) -// c, err := light.NewClient( -// ctx, -// chainID, -// light.TrustOptions{ -// Period: 4 * time.Hour, -// Height: trustedLightBlock.Height, -// Hash: trustedLightBlock.Hash(), -// }, -// veryLargeFullNode, -// []provider.Provider{veryLargeFullNode}, -// dbs.New(dbm.NewMemDB(), chainID), -// light.SkippingVerification(light.DefaultTrustLevel), -// ) -// require.NoError(t, err) -// h, err := c.Update(ctx, bTime.Add(100*time.Minute)) -// assert.NoError(t, err) -// h2, err := veryLargeFullNode.LightBlock(ctx, 100) -// require.NoError(t, err) -// assert.Equal(t, h, h2) -// } - -func TestClientBisectionBetweenTrustedHeaders(t *testing.T) { - setupDashCoreMockClient(t) - mockFullNode := mockNodeFromHeadersAndVals(headerSet, valSet) - c, err := light.NewClient( - ctx, - chainID, - mockFullNode, - []provider.Provider{mockFullNode}, - dbs.New(dbm.NewMemDB()), - dashCoreMockClient, - ) - require.NoError(t, err) - - _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) - require.NoError(t, err) - - // confirm that the client already doesn't have the light block - _, err = c.TrustedLightBlock(2) - require.Error(t, err) +func TestClient(t *testing.T) { + var ( + vals, privVals = types.RandValidatorSet(4) + keys = exposeMockPVKeys(privVals, vals.QuorumHash) - // verify using bisection the light block between the two trusted light blocks - _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour)) - assert.NoError(t, err) -} + valSet = map[int64]*types.ValidatorSet{ + 1: vals, + 2: vals, + 3: vals, + 4: vals, + } + + h1 = keys.GenSignedHeader(t, chainID, 1, bTime, nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) + // 3/3 signed + h2 = keys.GenSignedHeaderLastBlockID(t, chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h1.Hash()}) + // 3/3 signed + h3 = keys.GenSignedHeaderLastBlockID(t, chainID, 3, bTime.Add(1*time.Hour), nil, vals, vals, + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h2.Hash()}) + + headerSet = map[int64]*types.SignedHeader{ + 1: h1, + // interim header (3/3 signed) + 2: h2, + // last header (3/3 signed) + 3: h3, + } + l1 = &types.LightBlock{SignedHeader: h1, ValidatorSet: vals} + l2 = &types.LightBlock{SignedHeader: h2, ValidatorSet: vals} + l3 = &types.LightBlock{SignedHeader: h3, ValidatorSet: vals} -func TestClient_Cleanup(t *testing.T) { - setupDashCoreMockClient(t) - mockFullNode := &provider_mocks.Provider{} - mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil) - - c, err := light.NewClientAtHeight( - ctx, - 1, - chainID, - mockFullNode, - []provider.Provider{mockFullNode}, - dbs.New(dbm.NewMemDB()), - dashCoreMockClient, - light.Logger(log.TestingLogger()), + dashCoreMockClient = dashcore.NewMockClient(chainID, llmqType, privVals[0], true) ) - require.NoError(t, err) - _, err = c.TrustedLightBlock(1) - require.NoError(t, err) - err = c.Cleanup() - require.NoError(t, err) - - // Check no light blocks exist after Cleanup. - l, err := c.TrustedLightBlock(1) - assert.Error(t, err) - assert.Nil(t, l) -} - -func TestClientRestoresTrustedHeaderAfterStartup(t *testing.T) { - setupDashCoreMockClient(t) - // 1. options.Hash == trustedHeader.Hash - t.Run("hashes should match", func(t *testing.T) { - mockNode := &provider_mocks.Provider{} - trustedStore := dbs.New(dbm.NewMemDB()) - err := trustedStore.SaveLightBlock(l1) - require.NoError(t, err) - - c, err := light.NewClient( + //t.Run("LargeBisectionVerification", func(t *testing.T) { + // // start from a large light block to make sure that the pivot height doesn't select a height outside + // // the appropriate range + // + // numBlocks := int64(300) + // mockHeaders, mockVals, _ := genLightBlocksWithKeys(t, numBlocks, 101, 2, bTime) + // + // lastBlock := &types.LightBlock{SignedHeader: mockHeaders[numBlocks], ValidatorSet: mockVals[numBlocks]} + // mockNode := &provider_mocks.Provider{} + // mockNode.On("LightBlock", mock.Anything, numBlocks). + // Return(lastBlock, nil) + // + // mockNode.On("LightBlock", mock.Anything, int64(200)). + // Return(&types.LightBlock{SignedHeader: mockHeaders[200], ValidatorSet: mockVals[200]}, nil) + // + // mockNode.On("LightBlock", mock.Anything, int64(256)). + // Return(&types.LightBlock{SignedHeader: mockHeaders[256], ValidatorSet: mockVals[256]}, nil) + // + // mockNode.On("LightBlock", mock.Anything, int64(0)).Return(lastBlock, nil) + // + // ctx, cancel := context.WithCancel(context.Background()) + // defer cancel() + // + // trustedLightBlock, err := mockNode.LightBlock(ctx, int64(200)) + // require.NoError(t, err) + // c, err := light.NewClient( + // ctx, + // chainID, + // light.TrustOptions{ + // Period: 4 * time.Hour, + // Height: trustedLightBlock.Height, + // Hash: trustedLightBlock.Hash(), + // }, + // mockNode, + // nil, + // dbs.New(dbm.NewMemDB()), + // light.SkippingVerification(light.DefaultTrustLevel), + // ) + // require.NoError(t, err) + // h, err := c.Update(ctx, bTime.Add(300*time.Minute)) + // assert.NoError(t, err) + // height, err := c.LastTrustedHeight() + // require.NoError(t, err) + // require.Equal(t, numBlocks, height) + // h2, err := mockNode.LightBlock(ctx, numBlocks) + // require.NoError(t, err) + // assert.Equal(t, h, h2) + // mockNode.AssertExpectations(t) + //}) + t.Run("BisectionBetweenTrustedHeaders", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockFullNode := mockNodeFromHeadersAndVals(headerSet, valSet) + c, err := light.NewClientAtHeight( ctx, + 1, chainID, - mockNode, - []provider.Provider{mockNode}, - trustedStore, + mockFullNode, + nil, + dbs.New(dbm.NewMemDB()), dashCoreMockClient, - light.Logger(log.TestingLogger()), ) require.NoError(t, err) - l, err := c.TrustedLightBlock(1) - assert.NoError(t, err) - assert.NotNil(t, l) - assert.Equal(t, l.Hash(), h1.Hash()) - assert.Equal(t, l.ValidatorSet.Hash(), h1.ValidatorsHash.Bytes()) - mockNode.AssertExpectations(t) - }) - - // load the first three headers into the trusted store - t.Run("hashes should not match", func(t *testing.T) { - trustedStore := dbs.New(dbm.NewMemDB()) - err := trustedStore.SaveLightBlock(l1) - require.NoError(t, err) - - err = trustedStore.SaveLightBlock(l2) + _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) require.NoError(t, err) - mockNode := &provider_mocks.Provider{} + // confirm that the client already doesn't have the light block + _, err = c.TrustedLightBlock(2) + require.Error(t, err) - c, err := light.NewClient( + // verify using bisection the light block between the two trusted light blocks + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour)) + assert.NoError(t, err) + mockFullNode.AssertExpectations(t) + }) + t.Run("Cleanup", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewNopLogger() + + mockFullNode := &provider_mocks.Provider{} + mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil) + c, err := light.NewClientAtHeight( ctx, + 1, chainID, - mockNode, - []provider.Provider{mockNode}, - trustedStore, + mockFullNode, + nil, + dbs.New(dbm.NewMemDB()), dashCoreMockClient, - light.Logger(log.TestingLogger()), + light.Logger(logger), ) require.NoError(t, err) + _, err = c.TrustedLightBlock(1) + require.NoError(t, err) - // Check we still have the 1st light block. - l, err := c.TrustedLightBlock(1) - assert.NoError(t, err) - assert.NotNil(t, l) - assert.Equal(t, l.Hash(), h1.Hash()) - assert.NoError(t, l.ValidateBasic(chainID)) + err = c.Cleanup() + require.NoError(t, err) - l, err = c.TrustedLightBlock(3) + // Check no light blocks exist after Cleanup. + l, err := c.TrustedLightBlock(1) assert.Error(t, err) assert.Nil(t, l) - mockNode.AssertExpectations(t) + mockFullNode.AssertExpectations(t) }) -} - -func TestClient_Update(t *testing.T) { - setupDashCoreMockClient(t) - mockFullNode := &provider_mocks.Provider{} - mockFullNode.On("LightBlock", mock.Anything, int64(0)).Return(l3, nil) - mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil) - mockFullNode.On("LightBlock", mock.Anything, int64(3)).Return(l3, nil) - - c, err := light.NewClientAtHeight( - ctx, - 1, - chainID, - mockFullNode, - []provider.Provider{mockFullNode}, - dbs.New(dbm.NewMemDB()), - dashCoreMockClient, - light.Logger(log.TestingLogger()), - ) - require.NoError(t, err) - - // should result in downloading & verifying header #3 - l, err := c.Update(ctx, bTime.Add(2*time.Hour)) - assert.NoError(t, err) - if assert.NotNil(t, l) { - assert.EqualValues(t, 3, l.Height) - assert.NoError(t, l.ValidateBasic(chainID)) - } -} + t.Run("RestoresTrustedHeaderAfterStartup", func(t *testing.T) { + // trustedHeader.Height == options.Height -func TestClient_Concurrency(t *testing.T) { - setupDashCoreMockClient(t) - mockFullNode := &provider_mocks.Provider{} - mockFullNode.On("LightBlock", mock.Anything, int64(2)).Return(l2, nil) - mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil) - - c, err := light.NewClientAtHeight( - ctx, - 1, - chainID, - mockFullNode, - []provider.Provider{mockFullNode}, - dbs.New(dbm.NewMemDB()), - dashCoreMockClient, - light.Logger(log.TestingLogger()), - ) - require.NoError(t, err) + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() - _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour)) - require.NoError(t, err) + // 1. options.Hash == trustedHeader.Hash + t.Run("hashes should match", func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() - var wg sync.WaitGroup - for i := 0; i < 100; i++ { - wg.Add(1) - go func() { - defer wg.Done() + logger := log.NewNopLogger() - // NOTE: Cleanup, Stop, VerifyLightBlockAtHeight and Verify are not supposed - // to be concurrently safe. + mockNode := &provider_mocks.Provider{} + trustedStore := dbs.New(dbm.NewMemDB()) + err := trustedStore.SaveLightBlock(l1) + require.NoError(t, err) - assert.Equal(t, chainID, c.ChainID()) + c, err := light.NewClientAtHeight( + ctx, + 1, + chainID, + mockNode, + nil, + trustedStore, + dashCoreMockClient, + light.Logger(logger), + ) + require.NoError(t, err) - _, err := c.LastTrustedHeight() + l, err := c.TrustedLightBlock(1) assert.NoError(t, err) + assert.NotNil(t, l) + assert.Equal(t, l.Hash(), h1.Hash()) + assert.Equal(t, l.ValidatorSet.Hash(), h1.ValidatorsHash.Bytes()) + mockNode.AssertExpectations(t) + }) - _, err = c.FirstTrustedHeight() - assert.NoError(t, err) + // 2. options.Hash != trustedHeader.Hash + t.Run("hashes should not match", func(t *testing.T) { + ctx, cancel := context.WithCancel(bctx) + defer cancel() + + trustedStore := dbs.New(dbm.NewMemDB()) + err := trustedStore.SaveLightBlock(l1) + require.NoError(t, err) + + logger := log.NewNopLogger() + + mockNode := &provider_mocks.Provider{} + + c, err := light.NewClientAtHeight( + ctx, + 1, + chainID, + mockNode, + nil, + trustedStore, + dashCoreMockClient, + light.Logger(logger), + ) + require.NoError(t, err) l, err := c.TrustedLightBlock(1) assert.NoError(t, err) - assert.NotNil(t, l) - }() - } + if assert.NotNil(t, l) { + // client take the trusted store and ignores the trusted options + assert.Equal(t, l.Hash(), l1.Hash()) + assert.NoError(t, l.ValidateBasic(chainID)) + } - wg.Wait() -} + l, err = c.TrustedLightBlock(3) + assert.Error(t, err) + assert.Nil(t, l) -func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) { - setupDashCoreMockClient(t) - mockFullNode := &provider_mocks.Provider{} - mockFullNode.On("LightBlock", mock.Anything, mock.Anything).Return(l1, nil) - - mockDeadNode := &provider_mocks.Provider{} - mockDeadNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrNoResponse) - c, err := light.NewClientAtHeight( - ctx, - 1, - chainID, - mockDeadNode, - []provider.Provider{mockDeadNode, mockFullNode}, - dbs.New(dbm.NewMemDB()), - dashCoreMockClient, - light.Logger(log.TestingLogger()), - ) + mockNode.AssertExpectations(t) + }) + }) + t.Run("Update", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - require.NoError(t, err) - _, err = c.Update(ctx, bTime.Add(2*time.Hour)) - require.NoError(t, err) + mockFullNode := &provider_mocks.Provider{} + mockFullNode.On("ID").Return("mockFullNode") + mockFullNode.On("LightBlock", mock.Anything, int64(0)).Return(l3, nil) + mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil) - // the primary should no longer be the deadNode - assert.NotEqual(t, c.Primary(), mockDeadNode) + mockWitnessNode := &provider_mocks.Provider{} + mockWitnessNode.On("ID").Return("mockWitnessNode") + mockWitnessNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil) + mockWitnessNode.On("LightBlock", mock.Anything, int64(3)).Return(l3, nil) - // we should still have the dead node as a witness because it - // hasn't repeatedly been unresponsive yet - assert.Equal(t, 2, len(c.Witnesses())) - mockDeadNode.AssertExpectations(t) - mockFullNode.AssertExpectations(t) -} + logger := log.NewNopLogger() -func TestClientReplacesPrimaryWithWitnessIfPrimaryDoesntHaveBlock(t *testing.T) { - setupDashCoreMockClient(t) - mockFullNode := &provider_mocks.Provider{} - mockFullNode.On("LightBlock", mock.Anything, mock.Anything).Return(l1, nil) - - mockDeadNode := &provider_mocks.Provider{} - mockDeadNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) - c, err := light.NewClientAtHeight( - ctx, - 1, - chainID, - mockDeadNode, - []provider.Provider{mockDeadNode, mockFullNode}, - dbs.New(dbm.NewMemDB()), - dashCoreMockClient, - light.Logger(log.TestingLogger()), - ) - require.NoError(t, err) - _, err = c.Update(ctx, bTime.Add(2*time.Hour)) - require.NoError(t, err) - - // we should still have the dead node as a witness because it - // hasn't repeatedly been unresponsive yet - assert.Equal(t, 2, len(c.Witnesses())) - mockDeadNode.AssertExpectations(t) - mockFullNode.AssertExpectations(t) -} + c, err := light.NewClientAtHeight( + ctx, + 1, + chainID, + mockFullNode, + []provider.Provider{mockWitnessNode}, + dbs.New(dbm.NewMemDB()), + dashCoreMockClient, + light.Logger(logger), + ) + require.NoError(t, err) -func TestClient_NewClientFromTrustedStore(t *testing.T) { - setupDashCoreMockClient(t) - - // 1) Initiate DB and fill with a "trusted" header - db := dbs.New(dbm.NewMemDB()) - err := db.SaveLightBlock(l1) - require.NoError(t, err) - mockNode := &provider_mocks.Provider{} - - c, err := light.NewClientFromTrustedStore( - chainID, - mockNode, - []provider.Provider{mockNode}, - db, - dashCoreMockClient, - ) - require.NoError(t, err) + // should result in downloading & verifying header #3 + l, err := c.Update(ctx, bTime.Add(2*time.Hour)) + assert.NoError(t, err) + if assert.NotNil(t, l) { + assert.EqualValues(t, 3, l.Height) + assert.NoError(t, l.ValidateBasic(chainID)) + } + mockFullNode.AssertExpectations(t) + mockWitnessNode.AssertExpectations(t) + }) - // 2) Check light block exists - h, err := c.TrustedLightBlock(1) - assert.NoError(t, err) - assert.EqualValues(t, l1.Height, h.Height) - mockNode.AssertExpectations(t) -} + t.Run("Concurrency", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewNopLogger() -func TestClient_TrustedValidatorSet(t *testing.T) { - setupDashCoreMockClient(t) - differentVals, _ := types.RandValidatorSet(10) - mockBadValSetNode := &provider_mocks.Provider{} - mockBadValSetNode. - On("LightBlock", mock.Anything, int64(1)). - Return(&types.LightBlock{SignedHeader: h1, ValidatorSet: differentVals}, nil) - mockBadValSetNode. - On("LightBlock", mock.Anything, mock.Anything). - Return(nil, provider.ErrBadLightBlock{Reason: errors.New("error")}) - - mockFullNode := mockNodeFromHeadersAndVals( - map[int64]*types.SignedHeader{ - 1: h1, - 2: h2, - }, - map[int64]*types.ValidatorSet{ - 1: vals, - 2: vals, - }) - c, err := light.NewClientAtHeight( - ctx, - 1, - chainID, - mockFullNode, - []provider.Provider{mockBadValSetNode, mockFullNode}, - dbs.New(dbm.NewMemDB()), - dashCoreMockClient, - light.Logger(log.TestingLogger()), - ) - require.NoError(t, err) - assert.Equal(t, 2, len(c.Witnesses())) - - _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour).Add(1*time.Second)) - assert.NoError(t, err) - assert.Equal(t, 1, len(c.Witnesses())) - mockBadValSetNode.AssertExpectations(t) - mockFullNode.AssertExpectations(t) -} + mockFullNode := &provider_mocks.Provider{} + mockFullNode.On("LightBlock", mock.Anything, int64(2)).Return(l2, nil) + mockFullNode.On("LightBlock", mock.Anything, int64(1)).Return(l1, nil) + c, err := light.NewClientAtHeight( + ctx, + 1, + chainID, + mockFullNode, + nil, + dbs.New(dbm.NewMemDB()), + dashCoreMockClient, + light.Logger(logger), + ) + require.NoError(t, err) -func TestClientPrunesHeadersAndValidatorSets(t *testing.T) { - setupDashCoreMockClient(t) - mockFullNode := mockNodeFromHeadersAndVals( - map[int64]*types.SignedHeader{ - 1: h1, - 3: h3, - 0: h3, - }, - map[int64]*types.ValidatorSet{ - 1: vals, - 3: vals, - 0: vals, - }) + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour)) + require.NoError(t, err) - c, err := light.NewClientAtHeight( - ctx, - 1, - chainID, - mockFullNode, - []provider.Provider{mockFullNode}, - dbs.New(dbm.NewMemDB()), - dashCoreMockClient, - light.Logger(log.TestingLogger()), - light.PruningSize(1), - ) - require.NoError(t, err) - _, err = c.TrustedLightBlock(1) - require.NoError(t, err) + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() - h, err := c.Update(ctx, bTime.Add(2*time.Hour)) - require.NoError(t, err) - require.Equal(t, int64(3), h.Height) + // NOTE: Cleanup, Stop, VerifyLightBlockAtHeight and Verify are not supposed + // to be concurrently safe. - _, err = c.TrustedLightBlock(1) - assert.Error(t, err) - mockFullNode.AssertExpectations(t) -} + assert.Equal(t, chainID, c.ChainID()) + + _, err := c.LastTrustedHeight() + assert.NoError(t, err) -func TestClientEnsureValidHeadersAndValSets(t *testing.T) { - setupDashCoreMockClient(t) + _, err = c.FirstTrustedHeight() + assert.NoError(t, err) - emptyValSet := &types.ValidatorSet{ - Validators: nil, - Proposer: nil, - } + l, err := c.TrustedLightBlock(1) + assert.NoError(t, err) + assert.NotNil(t, l) + }() + } + + wg.Wait() + mockFullNode.AssertExpectations(t) + }) + t.Run("ReplacesPrimaryWithWitnessIfPrimaryIsUnavailable", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockFullNode := &provider_mocks.Provider{} + mockFullNode.On("LightBlock", mock.Anything, mock.Anything).Return(l1, nil) + mockFullNode.On("ID").Return("mockFullNode") + mockDeadNode := &provider_mocks.Provider{} + mockDeadNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrNoResponse) + mockDeadNode.On("ID").Return("mockDeadNode") + + logger := log.NewNopLogger() + + c, err := light.NewClientAtHeight( + ctx, + 1, + chainID, + mockDeadNode, + []provider.Provider{mockFullNode}, + dbs.New(dbm.NewMemDB()), + dashCoreMockClient, + light.Logger(logger), + ) + + require.NoError(t, err) + _, err = c.Update(ctx, bTime.Add(2*time.Hour)) + require.NoError(t, err) + + // the primary should no longer be the deadNode + assert.NotEqual(t, c.Primary(), mockDeadNode) + + // we should still have the dead node as a witness because it + // hasn't repeatedly been unresponsive yet + assert.Equal(t, 1, len(c.Witnesses())) + mockDeadNode.AssertExpectations(t) + mockFullNode.AssertExpectations(t) + }) + t.Run("ReplacesPrimaryWithWitnessIfPrimaryDoesntHaveBlock", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockFullNode := &provider_mocks.Provider{} + mockFullNode.On("LightBlock", mock.Anything, mock.Anything).Return(l1, nil) + mockFullNode.On("ID").Return("mockFullNode") + + logger := log.NewNopLogger() + + mockDeadNode1 := &provider_mocks.Provider{} + mockDeadNode1.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) + mockDeadNode1.On("ID").Return("mockDeadNode1") + + mockDeadNode2 := &provider_mocks.Provider{} + mockDeadNode2.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) + mockDeadNode2.On("ID").Return("mockDeadNode2") + + c, err := light.NewClientAtHeight( + ctx, + 1, + chainID, + mockDeadNode1, + []provider.Provider{mockFullNode, mockDeadNode2}, + dbs.New(dbm.NewMemDB()), + dashCoreMockClient, + light.Logger(logger), + ) + require.NoError(t, err) + _, err = c.Update(ctx, bTime.Add(2*time.Hour)) + require.NoError(t, err) - testCases := []struct { - headers map[int64]*types.SignedHeader - vals map[int64]*types.ValidatorSet + // we should still have the dead node as a witness because it + // hasn't repeatedly been unresponsive yet + assert.Equal(t, 2, len(c.Witnesses())) + mockDeadNode1.AssertExpectations(t) + mockFullNode.AssertExpectations(t) + }) + t.Run("NewClientFromTrustedStore", func(t *testing.T) { + // 1) Initiate DB and fill with a "trusted" header + db := dbs.New(dbm.NewMemDB()) + err := db.SaveLightBlock(l1) + require.NoError(t, err) + mockPrimary := &provider_mocks.Provider{} + mockPrimary.On("ID").Return("mockPrimary") + mockWitness := &provider_mocks.Provider{} + mockWitness.On("ID").Return("mockWitness") + c, err := light.NewClientFromTrustedStore( + chainID, + mockPrimary, + []provider.Provider{mockWitness}, + db, + dashCoreMockClient, + ) + require.NoError(t, err) - errorToThrow error - errorHeight int64 + // 2) Check light block exists + h, err := c.TrustedLightBlock(1) + assert.NoError(t, err) + assert.EqualValues(t, l1.Height, h.Height) + mockPrimary.AssertExpectations(t) + mockWitness.AssertExpectations(t) + }) + t.Run("TrustedValidatorSet", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() - err bool - }{ - { - headers: map[int64]*types.SignedHeader{ + differentVals, _ := types.RandValidatorSet(10) + mockBadValSetNode := mockNodeFromHeadersAndVals( + map[int64]*types.SignedHeader{ 1: h1, - 3: h3, + // 3/3 signed, but validator set at height 2 below is invalid -> witness + // should be removed. + 2: keys.GenSignedHeaderLastBlockID(t, chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, + hash("app_hash2"), hash("cons_hash"), hash("results_hash"), + 0, len(keys), types.BlockID{Hash: h1.Hash()}), }, - vals: map[int64]*types.ValidatorSet{ + map[int64]*types.ValidatorSet{ 1: vals, - 3: vals, - }, - err: false, - }, - { - headers: map[int64]*types.SignedHeader{ + 2: differentVals, + }) + mockBadValSetNode.On("ID").Return("mockBadValSetNode") + + mockFullNode := mockNodeFromHeadersAndVals( + map[int64]*types.SignedHeader{ 1: h1, + 2: h2, }, - vals: map[int64]*types.ValidatorSet{ + map[int64]*types.ValidatorSet{ 1: vals, + 2: vals, + }) + mockFullNode.On("ID").Return("mockFullNode") + + mockGoodWitness := mockNodeFromHeadersAndVals( + map[int64]*types.SignedHeader{ + 1: h1, + 2: h2, }, - errorToThrow: provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")}, - errorHeight: 3, - err: true, - }, - { - headers: map[int64]*types.SignedHeader{ + map[int64]*types.ValidatorSet{ + 1: vals, + 2: vals, + }) + mockGoodWitness.On("ID").Return("mockGoodWitness") + + c, err := light.NewClientAtHeight( + ctx, + 1, + chainID, + mockFullNode, + []provider.Provider{mockBadValSetNode, mockGoodWitness}, + dbs.New(dbm.NewMemDB()), + dashCoreMockClient, + light.Logger(logger), + ) + require.NoError(t, err) + assert.Equal(t, 2, len(c.Witnesses())) + + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(2*time.Hour).Add(1*time.Second)) + assert.NoError(t, err) + assert.Equal(t, 1, len(c.Witnesses())) + mockBadValSetNode.AssertExpectations(t) + mockFullNode.AssertExpectations(t) + }) + t.Run("PrunesHeadersAndValidatorSets", func(t *testing.T) { + mockFullNode := mockNodeFromHeadersAndVals( + map[int64]*types.SignedHeader{ 1: h1, + 0: h3, }, - errorToThrow: provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")}, - errorHeight: 3, - vals: valSet, - err: true, - }, - { - headers: map[int64]*types.SignedHeader{ + map[int64]*types.ValidatorSet{ + 1: vals, + 0: vals, + }) + + mockFullNode.On("ID").Return("mockFullNode") + + mockGoodWitness := mockNodeFromHeadersAndVals( + map[int64]*types.SignedHeader{ 1: h1, 3: h3, }, - vals: map[int64]*types.ValidatorSet{ + map[int64]*types.ValidatorSet{ 1: vals, - 3: emptyValSet, - }, - err: true, - }, - } + 3: vals, + }) + mockGoodWitness.On("ID").Return("mockGoodWitness") - //nolint:scopelint - for tcID, tc := range testCases { - testCase := tc - t.Run(fmt.Sprintf("tc_%d", tcID), func(t *testing.T) { - mockBadNode := mockNodeFromHeadersAndVals(testCase.headers, testCase.vals) - if testCase.errorToThrow != nil { - mockBadNode.On("LightBlock", mock.Anything, testCase.errorHeight).Return(nil, testCase.errorToThrow) - } - c, err := light.NewClient( - ctx, - chainID, - mockBadNode, - []provider.Provider{mockBadNode, mockBadNode}, - dbs.New(dbm.NewMemDB()), - dashCoreMockClient, - ) - require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewNopLogger() - _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) - if tc.err { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - }) - } -} + c, err := light.NewClientAtHeight( + ctx, + 1, + chainID, + mockFullNode, + []provider.Provider{mockGoodWitness}, + dbs.New(dbm.NewMemDB()), + dashCoreMockClient, + light.Logger(logger), + light.PruningSize(1), + ) + require.NoError(t, err) + _, err = c.TrustedLightBlock(1) + require.NoError(t, err) -func TestClientHandlesContexts(t *testing.T) { - mockNode := &provider_mocks.Provider{} - mockNode.On("LightBlock", - mock.MatchedBy(func(ctx context.Context) bool { return ctx.Err() == nil }), - int64(1)).Return(l1, nil) - mockNode.On("LightBlock", - mock.MatchedBy(func(ctx context.Context) bool { return ctx.Err() == context.DeadlineExceeded }), - mock.Anything).Return(nil, context.DeadlineExceeded) - - mockNode.On("LightBlock", - mock.MatchedBy(func(ctx context.Context) bool { return ctx.Err() == context.Canceled }), - mock.Anything).Return(nil, context.Canceled) - - dashcoreClient := dashcore.NewMockClient(chainID, llmqType, privVals[0], true) - - // instantiate the light client with a timeout - ctxTimeOut, cancel := context.WithTimeout(ctx, 1*time.Nanosecond) - defer cancel() - _, err := light.NewClientAtHeight( - ctxTimeOut, - 1, - chainID, - mockNode, - []provider.Provider{mockNode, mockNode}, - dbs.New(dbm.NewMemDB()), - dashcoreClient, - ) - require.Error(t, ctxTimeOut.Err()) - require.Error(t, err) - require.True(t, errors.Is(err, context.DeadlineExceeded)) - - // instantiate the client for real - c, err := light.NewClient( - ctx, - chainID, - mockNode, - []provider.Provider{mockNode, mockNode}, - dbs.New(dbm.NewMemDB()), - dashcoreClient, - ) - require.NoError(t, err) - - // verify a block with a timeout - ctxTimeOutBlock, cancel := context.WithTimeout(ctx, 1*time.Nanosecond) - defer cancel() - _, err = c.VerifyLightBlockAtHeight(ctxTimeOutBlock, 100, bTime.Add(100*time.Minute)) - require.Error(t, ctxTimeOutBlock.Err()) - require.Error(t, err) - require.True(t, errors.Is(err, context.DeadlineExceeded)) - - // verify a block with a cancel - ctxCancel, cancel := context.WithCancel(ctx) - cancel() - _, err = c.VerifyLightBlockAtHeight(ctxCancel, 100, bTime.Add(100*time.Minute)) - require.Error(t, ctxCancel.Err()) - require.Error(t, err) - require.True(t, errors.Is(err, context.Canceled)) + h, err := c.Update(ctx, bTime.Add(2*time.Hour)) + require.NoError(t, err) + require.Equal(t, int64(3), h.Height) + + _, err = c.TrustedLightBlock(1) + assert.Error(t, err) + mockFullNode.AssertExpectations(t) + mockGoodWitness.AssertExpectations(t) + }) + t.Run("EnsureValidHeadersAndValSets", func(t *testing.T) { + emptyValSet := &types.ValidatorSet{ + Validators: nil, + Proposer: nil, + } + + testCases := []struct { + headers map[int64]*types.SignedHeader + vals map[int64]*types.ValidatorSet + + errorToThrow error + errorHeight int64 + + err bool + }{ + { + headers: map[int64]*types.SignedHeader{ + 1: h1, + 3: h3, + }, + vals: map[int64]*types.ValidatorSet{ + 1: vals, + 3: vals, + }, + err: false, + }, + { + headers: map[int64]*types.SignedHeader{ + 1: h1, + }, + vals: map[int64]*types.ValidatorSet{ + 1: vals, + }, + errorToThrow: provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")}, + errorHeight: 3, + err: true, + }, + { + headers: map[int64]*types.SignedHeader{ + 1: h1, + }, + errorToThrow: provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")}, + errorHeight: 3, + vals: valSet, + err: true, + }, + { + headers: map[int64]*types.SignedHeader{ + 1: h1, + 3: h3, + }, + vals: map[int64]*types.ValidatorSet{ + 1: vals, + 3: emptyValSet, + }, + err: true, + }, + } + + //nolint:scopelint + for i, tc := range testCases { + testCase := tc + t.Run(fmt.Sprintf("case: %d", i), func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + mockBadNode := mockNodeFromHeadersAndVals(testCase.headers, testCase.vals) + if testCase.errorToThrow != nil { + mockBadNode.On("LightBlock", mock.Anything, testCase.errorHeight).Return(nil, testCase.errorToThrow) + } + + c, err := light.NewClientAtHeight( + ctx, + 1, + chainID, + mockBadNode, + nil, + dbs.New(dbm.NewMemDB()), + dashCoreMockClient, + ) + require.NoError(t, err) + + _, err = c.VerifyLightBlockAtHeight(ctx, 3, bTime.Add(2*time.Hour)) + if testCase.err { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + mockBadNode.AssertExpectations(t) + }) + } + }) } diff --git a/light/doc.go b/light/doc.go index c30c68eb04..b05ffa8056 100644 --- a/light/doc.go +++ b/light/doc.go @@ -94,7 +94,7 @@ Check out other examples in example_test.go ## 2. Pure functions to verify a new header (see verifier.go) Verify function verifies a new header against some trusted header. See -https://github.com/tendermint/spec/blob/master/spec/light-client/verification/README.md +https://github.com/tendermint/tendermint/blob/master/spec/light-client/verification/README.md for details. There are two methods of verification: sequential and bisection @@ -118,7 +118,7 @@ as a wrapper, which verifies all the headers, using a light client connected to some other node. See -https://github.com/tendermint/spec/tree/master/spec/light-client +https://github.com/tendermint/tendermint/tree/master/spec/light-client for the light client specification. */ package light diff --git a/light/example_test.go b/light/example_test.go index 923acdabf0..cbfba354c0 100644 --- a/light/example_test.go +++ b/light/example_test.go @@ -2,54 +2,52 @@ package light_test import ( "context" - "io/ioutil" - stdlog "log" - "os" + "testing" "time" - dashcore "github.com/tendermint/tendermint/dashcore/rpc" - "github.com/tendermint/tendermint/light" + "github.com/dashevo/dashd-go/btcjson" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/privval" dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/abci/example/kvstore" + dashcore "github.com/tendermint/tendermint/dash/core" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/light/provider" + "github.com/tendermint/tendermint/light" httpp "github.com/tendermint/tendermint/light/provider/http" dbs "github.com/tendermint/tendermint/light/store/db" rpctest "github.com/tendermint/tendermint/rpc/test" ) // Manually getting light blocks and verifying them. -func ExampleClient() { +func TestExampleClient(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf, err := rpctest.CreateConfig("ExampleClient_VerifyLightBlockAtHeight") + conf, err := rpctest.CreateConfig(t, "ExampleClient_VerifyLightBlockAtHeight") if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } - logger := log.TestingLogger() + logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) + if err != nil { + t.Fatal(err) + } // Start a test application app := kvstore.NewApplication() _, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } defer func() { _ = closer(ctx) }() - dbDir, err := ioutil.TempDir("", "light-client-example") - if err != nil { - stdlog.Fatal(err) - } - defer os.RemoveAll(dbDir) - + dbDir := t.TempDir() chainID := conf.ChainID() primary, err := httpp.New(chainID, conf.RPC.ListenAddress) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } // give Tendermint time to generate some blocks @@ -57,52 +55,55 @@ func ExampleClient() { _, err = primary.LightBlock(ctx, 2) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } db, err := dbm.NewGoLevelDB("light-client-db", dbDir) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } + pv, err := privval.LoadOrGenFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile()) + require.NoError(t, err) + c, err := light.NewClient( - context.Background(), + ctx, chainID, primary, - []provider.Provider{primary}, // NOTE: primary should not be used here + nil, dbs.New(db), - dashcore.NewMockClient(chainID, 100, nil, false), - light.Logger(log.TestingLogger()), + dashcore.NewMockClient(chainID, btcjson.LLMQType_5_60, pv, false), + light.Logger(log.NewTestingLogger(t)), ) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } defer func() { if err := c.Cleanup(); err != nil { - stdlog.Fatal(err) + t.Fatal(err) } }() // wait for a few more blocks to be produced time.Sleep(2 * time.Second) - // veify the block at height 3 - _, err = c.VerifyLightBlockAtHeight(context.Background(), 3, time.Now()) + // verify the block at height 3 + _, err = c.VerifyLightBlockAtHeight(ctx, 3, time.Now()) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } // retrieve light block at height 3 _, err = c.TrustedLightBlock(3) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } // update to the latest height lb, err := c.Update(ctx, time.Now()) if err != nil { - stdlog.Fatal(err) + t.Fatal(err) } logger.Info("verified light block", "light-block", lb) diff --git a/light/helpers_test.go b/light/helpers_test.go index 41dd63dfb8..cfe5a8a1a7 100644 --- a/light/helpers_test.go +++ b/light/helpers_test.go @@ -1,6 +1,7 @@ package light_test import ( + "testing" "time" "github.com/dashevo/dashd-go/btcjson" @@ -8,7 +9,6 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" - "github.com/tendermint/tendermint/crypto/tmhash" provider_mocks "github.com/tendermint/tendermint/light/provider/mocks" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -39,21 +39,25 @@ func exposeMockPVKeys(pvs []types.PrivValidator, quorumHash crypto.QuorumHash) p // (should be enough for testing). func (pkz privKeys) ToValidators(thresholdPublicKey crypto.PubKey) *types.ValidatorSet { res := make([]*types.Validator, len(pkz)) + for i, k := range pkz { - res[i] = types.NewValidatorDefaultVotingPower(k.PubKey(), crypto.Sha256(k.PubKey().Address())) + res[i] = types.NewValidatorDefaultVotingPower(k.PubKey(), crypto.Checksum(k.PubKey().Address())) } + // Quorum hash is pseudorandom return types.NewValidatorSet( res, thresholdPublicKey, btcjson.LLMQType_5_60, - crypto.Sha256(thresholdPublicKey.Bytes()), + crypto.Checksum(thresholdPublicKey.Bytes()), true, ) } // signHeader properly signs the header with all keys from first to last exclusive. -func (pkz privKeys) signHeader(header *types.Header, valSet *types.ValidatorSet, first, last int) *types.Commit { +func (pkz privKeys) signHeader(t testing.TB, header *types.Header, valSet *types.ValidatorSet, first, last int) *types.Commit { + t.Helper() + var blockSigs [][]byte var stateSigs [][]byte var blsIDs [][]byte @@ -82,7 +86,7 @@ func (pkz privKeys) signHeader(header *types.Header, valSet *types.ValidatorSet, if !privateKey.PubKey().Equals(val.PubKey) { panic("light client keys do not match") } - vote := makeVote(header, valSet, proTxHash, pkz[i], blockID, stateID) + vote := makeVote(t, header, valSet, proTxHash, pkz[i], blockID, stateID) blockSigs = append(blockSigs, vote.BlockSignature) stateSigs = append(stateSigs, vote.StateSignature) blsIDs = append(blsIDs, vote.ValidatorProTxHash) @@ -94,8 +98,9 @@ func (pkz privKeys) signHeader(header *types.Header, valSet *types.ValidatorSet, return types.NewCommit(header.Height, 1, blockID, stateID, valSet.QuorumHash, thresholdBlockSig, thresholdStateSig) } -func makeVote(header *types.Header, valset *types.ValidatorSet, proTxHash crypto.ProTxHash, +func makeVote(t testing.TB, header *types.Header, valset *types.ValidatorSet, proTxHash crypto.ProTxHash, key crypto.PrivKey, blockID types.BlockID, stateID types.StateID) *types.Vote { + t.Helper() idx, val := valset.GetByProTxHash(proTxHash) if val == nil { @@ -152,51 +157,38 @@ func genHeader(chainID string, height int64, bTime time.Time, txs types.Txs, } // GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader. -func (pkz privKeys) GenSignedHeader(chainID string, height int64, bTime time.Time, txs types.Txs, +func (pkz privKeys) GenSignedHeader(t testing.TB, chainID string, height int64, bTime time.Time, txs types.Txs, valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) *types.SignedHeader { + t.Helper() + header := genHeader(chainID, height, bTime, txs, valset, nextValset, appHash, consHash, resHash) return &types.SignedHeader{ Header: header, - Commit: pkz.signHeader(header, valset, first, last), + Commit: pkz.signHeader(t, header, valset, first, last), } } // GenSignedHeaderLastBlockID calls genHeader and signHeader and combines them into a SignedHeader. -func (pkz privKeys) GenSignedHeaderLastBlockID(chainID string, height int64, bTime time.Time, txs types.Txs, +func (pkz privKeys) GenSignedHeaderLastBlockID(t testing.TB, chainID string, height int64, bTime time.Time, txs types.Txs, valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int, lastBlockID types.BlockID) *types.SignedHeader { + t.Helper() + header := genHeader(chainID, height, bTime, txs, valset, nextValset, appHash, consHash, resHash) header.LastBlockID = lastBlockID return &types.SignedHeader{ Header: header, - Commit: pkz.signHeader(header, valset, first, last), + Commit: pkz.signHeader(t, header, valset, first, last), } } -func hash(s string) []byte { - return tmhash.Sum([]byte(s)) -} - -func mockNodeFromHeadersAndVals( - headers map[int64]*types.SignedHeader, - vals map[int64]*types.ValidatorSet, -) *provider_mocks.Provider { - provider := &provider_mocks.Provider{} - for i, header := range headers { - lb := &types.LightBlock{SignedHeader: header, ValidatorSet: vals[i]} - provider. - On("LightBlock", mock.Anything, i). - Return(lb, nil) - } - return provider -} - // genLightBlocksWithValidatorsRotatingEveryBlock generates the header and validator set to create // blocks to height. BlockIntervals are in per minute. // NOTE: Expected to have a large validator set size ~ 100 validators. func genLightBlocksWithValidatorsRotatingEveryBlock( + t testing.TB, chainID string, numBlocks int64, valSize int, @@ -204,6 +196,7 @@ func genLightBlocksWithValidatorsRotatingEveryBlock( map[int64]*types.SignedHeader, map[int64]*types.ValidatorSet, []types.PrivValidator) { + t.Helper() var ( headers = make(map[int64]*types.SignedHeader, numBlocks) @@ -220,7 +213,7 @@ func genLightBlocksWithValidatorsRotatingEveryBlock( newKeys := exposeMockPVKeys(newPrivVals, newVals.QuorumHash) // genesis header and vals - lastHeader := keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Minute), nil, + lastHeader := keys.GenSignedHeader(t, chainID, 1, bTime.Add(1*time.Minute), nil, vals, newVals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) currentHeader := lastHeader @@ -235,7 +228,7 @@ func genLightBlocksWithValidatorsRotatingEveryBlock( ) newKeys = exposeMockPVKeys(newPrivVals, newVals.QuorumHash) - currentHeader = keys.GenSignedHeaderLastBlockID(chainID, height, bTime.Add(time.Duration(height)*time.Minute), + currentHeader = keys.GenSignedHeaderLastBlockID(t, chainID, height, bTime.Add(time.Duration(height)*time.Minute), nil, vals, newVals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: lastHeader.Hash()}) @@ -247,3 +240,17 @@ func genLightBlocksWithValidatorsRotatingEveryBlock( return headers, valset, newPrivVals } + +func mockNodeFromHeadersAndVals(headers map[int64]*types.SignedHeader, + vals map[int64]*types.ValidatorSet) *provider_mocks.Provider { + mockNode := &provider_mocks.Provider{} + for i, header := range headers { + lb := &types.LightBlock{SignedHeader: header, ValidatorSet: vals[i]} + mockNode.On("LightBlock", mock.Anything, i).Return(lb, nil) + } + return mockNode +} + +func hash(s string) []byte { + return crypto.Checksum([]byte(s)) +} diff --git a/light/light_test.go b/light/light_test.go index 5887e5bcf2..c035d102c8 100644 --- a/light/light_test.go +++ b/light/light_test.go @@ -2,8 +2,6 @@ package light_test import ( "context" - "io/ioutil" - "os" "testing" "time" @@ -12,7 +10,7 @@ import ( "github.com/dashevo/dashd-go/btcjson" "github.com/tendermint/tendermint/abci/example/kvstore" - dashcore "github.com/tendermint/tendermint/dashcore/rpc" + dashcore "github.com/tendermint/tendermint/dash/core" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light" "github.com/tendermint/tendermint/light/provider" @@ -32,9 +30,11 @@ func TestClientIntegration_Update(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf, err := rpctest.CreateConfig(t.Name()) + conf, err := rpctest.CreateConfig(t, t.Name()) require.NoError(t, err) + logger := log.NewNopLogger() + // Start a test application app := kvstore.NewApplication() @@ -48,10 +48,7 @@ func TestClientIntegration_Update(t *testing.T) { // give Tendermint time to generate some blocks time.Sleep(5 * time.Second) - dbDir, err := ioutil.TempDir("", "light-client-test-update-example") - require.NoError(t, err) - defer os.RemoveAll(dbDir) - + dbDir := t.TempDir() chainID := conf.ChainID() primary, err := httpp.New(chainID, conf.RPC.ListenAddress) @@ -69,10 +66,10 @@ func TestClientIntegration_Update(t *testing.T) { 1, chainID, primary, - []provider.Provider{primary}, // NOTE: primary should not be used here + nil, dbs.New(db), dashcore.NewMockClient(chainID, btcjson.LLMQType_5_60, filePV, true), - light.Logger(log.TestingLogger()), + light.Logger(logger), ) require.NoError(t, err) @@ -94,9 +91,11 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { t.Parallel() ctx, cancel := context.WithCancel(context.Background()) defer cancel() - conf, err := rpctest.CreateConfig(t.Name()) + conf, err := rpctest.CreateConfig(t, t.Name()) require.NoError(t, err) + logger := log.NewNopLogger() + // Start a test application app := kvstore.NewApplication() @@ -107,10 +106,7 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { require.NoError(t, err) defer func() { require.NoError(t, closer(ctx)) }() - dbDir, err := ioutil.TempDir("", "light-client-test-verify-example") - require.NoError(t, err) - defer os.RemoveAll(dbDir) - + dbDir := t.TempDir() chainID := conf.ChainID() primary, err := httpp.New(chainID, conf.RPC.ListenAddress) @@ -126,10 +122,10 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { c, err := light.NewClient(ctx, chainID, primary, - []provider.Provider{primary}, // NOTE: primary should not be used here + nil, dbs.New(db), dashcore.NewMockClient(chainID, btcjson.LLMQType_5_60, filePV, true), - light.Logger(log.TestingLogger()), + light.Logger(logger), ) require.NoError(t, err) @@ -167,3 +163,77 @@ func waitForBlock(ctx context.Context, p provider.Provider, height int64) (*type } } } + +func TestClientStatusRPC(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + conf, err := rpctest.CreateConfig(t, t.Name()) + require.NoError(t, err) + + // Start a test application + app := kvstore.NewApplication() + + _, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) + require.NoError(t, err) + defer func() { require.NoError(t, closer(ctx)) }() + + dbDir := t.TempDir() + chainID := conf.ChainID() + + primary, err := httpp.New(chainID, conf.RPC.ListenAddress) + require.NoError(t, err) + + // give Tendermint time to generate some blocks + _, err = waitForBlock(ctx, primary, 2) + require.NoError(t, err) + + filePV, err := privval.LoadOrGenFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile()) + require.NoError(t, err) + + db, err := dbm.NewGoLevelDB("light-client-db", dbDir) + require.NoError(t, err) + + // In order to not create a full testnet we create the light client with no witnesses + // and only verify the primary IP address. + witnesses := []provider.Provider{} + + c, err := light.NewClientAtHeight(ctx, + 2, + chainID, + primary, + witnesses, + dbs.New(db), + dashcore.NewMockClient(chainID, btcjson.LLMQType_5_60, filePV, true), + light.Logger(log.NewNopLogger()), + ) + require.NoError(t, err) + + defer func() { require.NoError(t, c.Cleanup()) }() + + lightStatus := c.Status(ctx) + + // Verify primary IP + require.True(t, lightStatus.PrimaryID == primary.ID()) + + // Verify that number of peers is equal to number of witnesses (+ 1 if the primary is not a witness) + require.Equal(t, len(witnesses)+1*primaryNotInWitnessList(witnesses, primary), lightStatus.NumPeers) + + // Verify that the last trusted hash returned matches the stored hash of the trusted + // block at the last trusted height. + blockAtTrustedHeight, err := c.TrustedLightBlock(lightStatus.LastTrustedHeight) + require.NoError(t, err) + + require.EqualValues(t, lightStatus.LastTrustedHash, blockAtTrustedHeight.Hash()) + +} + +// If the primary is not in the witness list, we will return 1 +// Otherwise, return 0 +func primaryNotInWitnessList(witnesses []provider.Provider, primary provider.Provider) int { + for _, el := range witnesses { + if el == primary { + return 0 + } + } + return 1 +} diff --git a/light/provider/errors.go b/light/provider/errors.go index f632608c85..628e8c67be 100644 --- a/light/provider/errors.go +++ b/light/provider/errors.go @@ -31,16 +31,20 @@ type ErrBadLightBlock struct { } func (e ErrBadLightBlock) Error() string { - return fmt.Sprintf("client provided bad signed header: %s", e.Reason.Error()) + return fmt.Sprintf("client provided bad signed header: %v", e.Reason) } +func (e ErrBadLightBlock) Unwrap() error { return e.Reason } + // ErrUnreliableProvider is a generic error that indicates that the provider isn't // behaving in a reliable manner to the light client. The light client will // remove the provider type ErrUnreliableProvider struct { - Reason string + Reason error } func (e ErrUnreliableProvider) Error() string { - return fmt.Sprintf("client deemed unreliable: %s", e.Reason) + return fmt.Sprintf("client deemed unreliable: %v", e.Reason) } + +func (e ErrUnreliableProvider) Unwrap() error { return e.Reason } diff --git a/light/provider/http/http.go b/light/provider/http/http.go index c8b6275e98..933e21485b 100644 --- a/light/provider/http/http.go +++ b/light/provider/http/http.go @@ -104,7 +104,8 @@ func NewWithClientAndOptions(chainID string, client rpcclient.RemoteClient, opti } } -func (p *http) String() string { +// Identifies the provider with an IP in string format +func (p *http) ID() string { return fmt.Sprintf("http{%s}", p.client.Remote()) } @@ -224,7 +225,7 @@ func (p *http) validatorSet(ctx context.Context, height *int64) (*types.Validato // If we don't know the error then by default we return an unreliable provider error and // terminate the connection with the peer. - return nil, provider.ErrUnreliableProvider{Reason: e.Error()} + return nil, provider.ErrUnreliableProvider{Reason: e} } // update the total and increment the page index so we can fetch the @@ -284,7 +285,7 @@ func (p *http) signedHeader(ctx context.Context, height *int64) (*types.SignedHe // If we don't know the error then by default we return an unreliable provider error and // terminate the connection with the peer. - return nil, provider.ErrUnreliableProvider{Reason: e.Error()} + return nil, provider.ErrUnreliableProvider{Reason: e} } } return nil, p.noResponse() @@ -294,7 +295,7 @@ func (p *http) noResponse() error { p.noResponseCount++ if p.noResponseCount > p.noResponseThreshold { return provider.ErrUnreliableProvider{ - Reason: fmt.Sprintf("failed to respond after %d attempts", p.noResponseCount), + Reason: fmt.Errorf("failed to respond after %d attempts", p.noResponseCount), } } return provider.ErrNoResponse @@ -304,7 +305,7 @@ func (p *http) noBlock(e error) error { p.noBlockCount++ if p.noBlockCount > p.noBlockThreshold { return provider.ErrUnreliableProvider{ - Reason: fmt.Sprintf("failed to provide a block after %d attempts", p.noBlockCount), + Reason: fmt.Errorf("failed to provide a block after %d attempts", p.noBlockCount), } } return e diff --git a/light/provider/http/http_test.go b/light/provider/http/http_test.go index 00552fbd2e..fd9371c32a 100644 --- a/light/provider/http/http_test.go +++ b/light/provider/http/http_test.go @@ -2,7 +2,7 @@ package http_test import ( "context" - "fmt" + "errors" "testing" "time" @@ -23,21 +23,21 @@ import ( func TestNewProvider(t *testing.T) { c, err := lighthttp.New("chain-test", "192.168.0.1:26657") require.NoError(t, err) - require.Equal(t, fmt.Sprintf("%s", c), "http{http://192.168.0.1:26657}") + require.Equal(t, c.ID(), "http{http://192.168.0.1:26657}") c, err = lighthttp.New("chain-test", "http://153.200.0.1:26657") require.NoError(t, err) - require.Equal(t, fmt.Sprintf("%s", c), "http{http://153.200.0.1:26657}") + require.Equal(t, c.ID(), "http{http://153.200.0.1:26657}") c, err = lighthttp.New("chain-test", "153.200.0.1") require.NoError(t, err) - require.Equal(t, fmt.Sprintf("%s", c), "http{http://153.200.0.1}") + require.Equal(t, c.ID(), "http{http://153.200.0.1}") } func TestProvider(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - cfg, err := rpctest.CreateConfig(t.Name()) + cfg, err := rpctest.CreateConfig(t, t.Name()) require.NoError(t, err) // start a tendermint node in the background to test against @@ -53,18 +53,18 @@ func TestProvider(t *testing.T) { chainID := genDoc.ChainID c, err := rpchttp.New(rpcAddr) - require.Nil(t, err) + require.NoError(t, err) p := lighthttp.NewWithClient(chainID, c) require.NoError(t, err) require.NotNil(t, p) // let it produce some blocks - err = rpcclient.WaitForHeight(c, 10, nil) + err = rpcclient.WaitForHeight(ctx, c, 10, nil) require.NoError(t, err) // let's get the highest block - lb, err := p.LightBlock(context.Background(), 0) + lb, err := p.LightBlock(ctx, 0) require.NoError(t, err) assert.True(t, lb.Height < 9001, "height=%d", lb.Height) @@ -73,25 +73,25 @@ func TestProvider(t *testing.T) { // historical queries now work :) lower := lb.Height - 3 - lb, err = p.LightBlock(context.Background(), lower) + lb, err = p.LightBlock(ctx, lower) require.NoError(t, err) assert.Equal(t, lower, lb.Height) // fetching missing heights (both future and pruned) should return appropriate errors - lb, err = p.LightBlock(context.Background(), 9001) + lb, err = p.LightBlock(ctx, 9001) require.Error(t, err) require.Nil(t, lb) - assert.Equal(t, provider.ErrHeightTooHigh, err) + assert.ErrorIs(t, err, provider.ErrHeightTooHigh) - lb, err = p.LightBlock(context.Background(), 1) + lb, err = p.LightBlock(ctx, 1) require.Error(t, err) require.Nil(t, lb) - assert.Equal(t, provider.ErrLightBlockNotFound, err) + assert.ErrorIs(t, err, provider.ErrLightBlockNotFound) // if the provider is unable to provide four more blocks then we should return // an unreliable peer error for i := 0; i < 4; i++ { - _, err = p.LightBlock(context.Background(), 1) + _, err = p.LightBlock(ctx, 1) } assert.IsType(t, provider.ErrUnreliableProvider{}, err) @@ -100,11 +100,13 @@ func TestProvider(t *testing.T) { cancel() time.Sleep(10 * time.Second) - lb, err = p.LightBlock(context.Background(), lower+2) - // we should see a connection refused + lb, err = p.LightBlock(ctx, lower+2) + // Either the connection should be refused, or the context canceled. require.Error(t, err) require.Nil(t, lb) - assert.Equal(t, provider.ErrConnectionClosed, err) + if !errors.Is(err, provider.ErrConnectionClosed) && !errors.Is(err, context.Canceled) { + assert.Fail(t, "Incorrect error", "wanted connection closed or context canceled, got %v", err) + } } // TestLightClient_NilCommit ensures correct handling of a case where commit returned by http client is nil diff --git a/light/provider/mocks/provider.go b/light/provider/mocks/provider.go index aa36fa2d34..e136046f9d 100644 --- a/light/provider/mocks/provider.go +++ b/light/provider/mocks/provider.go @@ -7,6 +7,8 @@ import ( mock "github.com/stretchr/testify/mock" + testing "testing" + types "github.com/tendermint/tendermint/types" ) @@ -15,6 +17,20 @@ type Provider struct { mock.Mock } +// ID provides a mock function with given fields: +func (_m *Provider) ID() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + // LightBlock provides a mock function with given fields: ctx, height func (_m *Provider) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) { ret := _m.Called(ctx, height) @@ -51,3 +67,13 @@ func (_m *Provider) ReportEvidence(_a0 context.Context, _a1 types.Evidence) erro return r0 } + +// NewProvider creates a new instance of Provider. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewProvider(t testing.TB) *Provider { + mock := &Provider{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/light/provider/provider.go b/light/provider/provider.go index 7f15d5c752..d1b3304daa 100644 --- a/light/provider/provider.go +++ b/light/provider/provider.go @@ -25,4 +25,8 @@ type Provider interface { // ReportEvidence reports an evidence of misbehavior. ReportEvidence(context.Context, types.Evidence) error + + // Returns the ID of a provider. For RPC providers it returns the IP address of the client + // For p2p providers it returns a combination of NodeID and IP address + ID() string } diff --git a/light/proxy/proxy.go b/light/proxy/proxy.go index 6f26225887..6e7a5ff2a6 100644 --- a/light/proxy/proxy.go +++ b/light/proxy/proxy.go @@ -6,8 +6,9 @@ import ( "net" "net/http" + tmpubsub "github.com/tendermint/tendermint/internal/pubsub" + rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/light" lrpc "github.com/tendermint/tendermint/light/rpc" rpchttp "github.com/tendermint/tendermint/rpc/client/http" @@ -40,7 +41,7 @@ func NewProxy( return &Proxy{ Addr: listenAddr, Config: config, - Client: lrpc.NewClient(rpcClient, lightClient, opts...), + Client: lrpc.NewClient(logger, rpcClient, lightClient, opts...), Logger: logger, }, nil } @@ -49,14 +50,15 @@ func NewProxy( // routes to proxy via Client, and starts up an HTTP server on the TCP network // address p.Addr. // See http#Server#ListenAndServe. -func (p *Proxy) ListenAndServe() error { - listener, mux, err := p.listen() +func (p *Proxy) ListenAndServe(ctx context.Context) error { + listener, mux, err := p.listen(ctx) if err != nil { return err } p.Listener = listener return rpcserver.Serve( + ctx, listener, mux, p.Logger, @@ -67,14 +69,15 @@ func (p *Proxy) ListenAndServe() error { // ListenAndServeTLS acts identically to ListenAndServe, except that it expects // HTTPS connections. // See http#Server#ListenAndServeTLS. -func (p *Proxy) ListenAndServeTLS(certFile, keyFile string) error { - listener, mux, err := p.listen() +func (p *Proxy) ListenAndServeTLS(ctx context.Context, certFile, keyFile string) error { + listener, mux, err := p.listen(ctx) if err != nil { return err } p.Listener = listener return rpcserver.ServeTLS( + ctx, listener, mux, certFile, @@ -84,16 +87,16 @@ func (p *Proxy) ListenAndServeTLS(certFile, keyFile string) error { ) } -func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) { +func (p *Proxy) listen(ctx context.Context) (net.Listener, *http.ServeMux, error) { mux := http.NewServeMux() // 1) Register regular routes. - r := RPCRoutes(p.Client) + r := rpccore.NewRoutesMap(proxyService{Client: p.Client}, nil) rpcserver.RegisterRPCFuncs(mux, r, p.Logger) // 2) Allow websocket connections. wmLogger := p.Logger.With("protocol", "websocket") - wm := rpcserver.NewWebsocketManager(r, + wm := rpcserver.NewWebsocketManager(wmLogger, r, rpcserver.OnDisconnect(func(remoteAddr string) { err := p.Client.UnsubscribeAll(context.Background(), remoteAddr) if err != nil && err != tmpubsub.ErrSubscriptionNotFound { @@ -102,12 +105,12 @@ func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) { }), rpcserver.ReadLimit(p.Config.MaxBodyBytes), ) - wm.SetLogger(wmLogger) + mux.HandleFunc("/websocket", wm.WebsocketHandler) // 3) Start a client. if !p.Client.IsRunning() { - if err := p.Client.Start(); err != nil { + if err := p.Client.Start(ctx); err != nil { return nil, mux, fmt.Errorf("can't start client: %w", err) } } diff --git a/light/proxy/routes.go b/light/proxy/routes.go index be57df6bfe..2df52b921a 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -1,305 +1,149 @@ package proxy import ( - "github.com/tendermint/tendermint/libs/bytes" + "context" + lrpc "github.com/tendermint/tendermint/light/rpc" rpcclient "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/coretypes" - rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) -func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { - return map[string]*rpcserver.RPCFunc{ - // Subscribe/unsubscribe are reserved for websocket events. - "subscribe": rpcserver.NewWSRPCFunc(c.SubscribeWS, "query"), - "unsubscribe": rpcserver.NewWSRPCFunc(c.UnsubscribeWS, "query"), - "unsubscribe_all": rpcserver.NewWSRPCFunc(c.UnsubscribeAllWS, ""), - - // info API - "health": rpcserver.NewRPCFunc(makeHealthFunc(c), "", false), - "status": rpcserver.NewRPCFunc(makeStatusFunc(c), "", false), - "net_info": rpcserver.NewRPCFunc(makeNetInfoFunc(c), "", false), - "blockchain": rpcserver.NewRPCFunc(makeBlockchainInfoFunc(c), "minHeight,maxHeight", true), - "genesis": rpcserver.NewRPCFunc(makeGenesisFunc(c), "", true), - "genesis_chunked": rpcserver.NewRPCFunc(makeGenesisChunkedFunc(c), "", true), - "header": rpcserver.NewRPCFunc(makeHeaderFunc(c), "height", true), - "header_by_hash": rpcserver.NewRPCFunc(makeHeaderByHashFunc(c), "hash", true), - "block": rpcserver.NewRPCFunc(makeBlockFunc(c), "height", true), - "block_by_hash": rpcserver.NewRPCFunc(makeBlockByHashFunc(c), "hash", true), - "block_results": rpcserver.NewRPCFunc(makeBlockResultsFunc(c), "height", true), - "commit": rpcserver.NewRPCFunc(makeCommitFunc(c), "height", true), - "tx": rpcserver.NewRPCFunc(makeTxFunc(c), "hash,prove", true), - "tx_search": rpcserver.NewRPCFunc(makeTxSearchFunc(c), "query,prove,page,per_page,order_by", false), - "block_search": rpcserver.NewRPCFunc(makeBlockSearchFunc(c), "query,page,per_page,order_by", false), - "validators": rpcserver.NewRPCFunc(makeValidatorsFunc(c), "height,page,per_page,request_quorum_info", true), - "dump_consensus_state": rpcserver.NewRPCFunc(makeDumpConsensusStateFunc(c), "", false), - "consensus_state": rpcserver.NewRPCFunc(makeConsensusStateFunc(c), "", false), - "consensus_params": rpcserver.NewRPCFunc(makeConsensusParamsFunc(c), "height", true), - "unconfirmed_txs": rpcserver.NewRPCFunc(makeUnconfirmedTxsFunc(c), "limit", false), - "num_unconfirmed_txs": rpcserver.NewRPCFunc(makeNumUnconfirmedTxsFunc(c), "", false), - - // tx broadcast API - "broadcast_tx_commit": rpcserver.NewRPCFunc(makeBroadcastTxCommitFunc(c), "tx", false), - "broadcast_tx_sync": rpcserver.NewRPCFunc(makeBroadcastTxSyncFunc(c), "tx", false), - "broadcast_tx_async": rpcserver.NewRPCFunc(makeBroadcastTxAsyncFunc(c), "tx", false), - - // abci API - "abci_query": rpcserver.NewRPCFunc(makeABCIQueryFunc(c), "path,data,height,prove", false), - "abci_info": rpcserver.NewRPCFunc(makeABCIInfoFunc(c), "", true), - - // evidence API - "broadcast_evidence": rpcserver.NewRPCFunc(makeBroadcastEvidenceFunc(c), "evidence", false), - } +// proxyService wraps a light RPC client to export the RPC service interfaces. +// The interfaces are implemented by delegating to the underlying node via the +// specified client. +type proxyService struct { + Client *lrpc.Client } -type rpcHealthFunc func(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) +func (p proxyService) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { panic("ok") } -func makeHealthFunc(c *lrpc.Client) rpcHealthFunc { - return func(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) { - return c.Health(ctx.Context()) - } +func (p proxyService) ABCIQuery(ctx context.Context, req *coretypes.RequestABCIQuery) (*coretypes.ResultABCIQuery, error) { + return p.Client.ABCIQueryWithOptions(ctx, req.Path, req.Data, rpcclient.ABCIQueryOptions{ + Height: int64(req.Height), + Prove: req.Prove, + }) } -type rpcStatusFunc func(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) - -// nolint: interfacer -func makeStatusFunc(c *lrpc.Client) rpcStatusFunc { - return func(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) { - return c.Status(ctx.Context()) - } +func (p proxyService) Block(ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultBlock, error) { + return p.Client.Block(ctx, (*int64)(req.Height)) } -type rpcNetInfoFunc func(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) - -func makeNetInfoFunc(c *lrpc.Client) rpcNetInfoFunc { - return func(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) { - return c.NetInfo(ctx.Context()) - } +func (p proxyService) BlockByHash(ctx context.Context, req *coretypes.RequestBlockByHash) (*coretypes.ResultBlock, error) { + return p.Client.BlockByHash(ctx, req.Hash) } -type rpcBlockchainInfoFunc func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) - -func makeBlockchainInfoFunc(c *lrpc.Client) rpcBlockchainInfoFunc { - return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { - return c.BlockchainInfo(ctx.Context(), minHeight, maxHeight) - } +func (p proxyService) BlockResults(ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultBlockResults, error) { + return p.Client.BlockResults(ctx, (*int64)(req.Height)) } -type rpcGenesisFunc func(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) - -func makeGenesisFunc(c *lrpc.Client) rpcGenesisFunc { - return func(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) { - return c.Genesis(ctx.Context()) - } +func (p proxyService) BlockSearch(ctx context.Context, req *coretypes.RequestBlockSearch) (*coretypes.ResultBlockSearch, error) { + return p.Client.BlockSearch(ctx, req.Query, req.Page.IntPtr(), req.PerPage.IntPtr(), req.OrderBy) } -type rpcGenesisChunkedFunc func(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) - -func makeGenesisChunkedFunc(c *lrpc.Client) rpcGenesisChunkedFunc { - return func(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) { - return c.GenesisChunked(ctx.Context(), chunk) - } +func (p proxyService) BlockchainInfo(ctx context.Context, req *coretypes.RequestBlockchainInfo) (*coretypes.ResultBlockchainInfo, error) { + return p.Client.BlockchainInfo(ctx, int64(req.MinHeight), int64(req.MaxHeight)) } -type rpcHeaderFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultHeader, error) - -func makeHeaderFunc(c *lrpc.Client) rpcHeaderFunc { - return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultHeader, error) { - return c.Header(ctx.Context(), height) - } +func (p proxyService) BroadcastEvidence(ctx context.Context, req *coretypes.RequestBroadcastEvidence) (*coretypes.ResultBroadcastEvidence, error) { + return p.Client.BroadcastEvidence(ctx, req.Evidence) } -type rpcHeaderByHashFunc func(ctx *rpctypes.Context, hash []byte) (*coretypes.ResultHeader, error) - -func makeHeaderByHashFunc(c *lrpc.Client) rpcHeaderByHashFunc { - return func(ctx *rpctypes.Context, hash []byte) (*coretypes.ResultHeader, error) { - return c.HeaderByHash(ctx.Context(), hash) - } +func (p proxyService) BroadcastTxAsync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { + return p.Client.BroadcastTxAsync(ctx, req.Tx) } -type rpcBlockFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlock, error) - -func makeBlockFunc(c *lrpc.Client) rpcBlockFunc { - return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlock, error) { - return c.Block(ctx.Context(), height) - } +func (p proxyService) BroadcastTxCommit(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTxCommit, error) { + return p.Client.BroadcastTxCommit(ctx, req.Tx) } -type rpcBlockByHashFunc func(ctx *rpctypes.Context, hash []byte) (*coretypes.ResultBlock, error) - -func makeBlockByHashFunc(c *lrpc.Client) rpcBlockByHashFunc { - return func(ctx *rpctypes.Context, hash []byte) (*coretypes.ResultBlock, error) { - return c.BlockByHash(ctx.Context(), hash) - } +func (p proxyService) BroadcastTxSync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { + return p.Client.BroadcastTxSync(ctx, req.Tx) } -type rpcBlockResultsFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlockResults, error) - -func makeBlockResultsFunc(c *lrpc.Client) rpcBlockResultsFunc { - return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlockResults, error) { - return c.BlockResults(ctx.Context(), height) - } +func (p proxyService) CheckTx(ctx context.Context, req *coretypes.RequestCheckTx) (*coretypes.ResultCheckTx, error) { + return p.Client.CheckTx(ctx, req.Tx) } -type rpcCommitFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultCommit, error) - -func makeCommitFunc(c *lrpc.Client) rpcCommitFunc { - return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultCommit, error) { - return c.Commit(ctx.Context(), height) - } +func (p proxyService) Commit(ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultCommit, error) { + return p.Client.Commit(ctx, (*int64)(req.Height)) } -type rpcTxFunc func(ctx *rpctypes.Context, hash []byte, prove bool) (*coretypes.ResultTx, error) - -func makeTxFunc(c *lrpc.Client) rpcTxFunc { - return func(ctx *rpctypes.Context, hash []byte, prove bool) (*coretypes.ResultTx, error) { - return c.Tx(ctx.Context(), hash, prove) - } +func (p proxyService) ConsensusParams(ctx context.Context, req *coretypes.RequestConsensusParams) (*coretypes.ResultConsensusParams, error) { + return p.Client.ConsensusParams(ctx, (*int64)(req.Height)) } -type rpcTxSearchFunc func( - ctx *rpctypes.Context, - query string, - prove bool, - page, perPage *int, - orderBy string, -) (*coretypes.ResultTxSearch, error) - -func makeTxSearchFunc(c *lrpc.Client) rpcTxSearchFunc { - return func( - ctx *rpctypes.Context, - query string, - prove bool, - page, perPage *int, - orderBy string, - ) (*coretypes.ResultTxSearch, error) { - return c.TxSearch(ctx.Context(), query, prove, page, perPage, orderBy) - } +func (p proxyService) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { + return p.Client.DumpConsensusState(ctx) } -type rpcBlockSearchFunc func( - ctx *rpctypes.Context, - query string, - prove bool, - page, perPage *int, - orderBy string, -) (*coretypes.ResultBlockSearch, error) - -func makeBlockSearchFunc(c *lrpc.Client) rpcBlockSearchFunc { - return func( - ctx *rpctypes.Context, - query string, - prove bool, - page, perPage *int, - orderBy string, - ) (*coretypes.ResultBlockSearch, error) { - return c.BlockSearch(ctx.Context(), query, page, perPage, orderBy) - } +func (p proxyService) Events(ctx context.Context, req *coretypes.RequestEvents) (*coretypes.ResultEvents, error) { + return p.Client.Events(ctx, req) } -type rpcValidatorsFunc func(ctx *rpctypes.Context, height *int64, - page, perPage *int, requestThresholdPublicKey *bool) (*coretypes.ResultValidators, error) - -func makeValidatorsFunc(c *lrpc.Client) rpcValidatorsFunc { - return func(ctx *rpctypes.Context, height *int64, page, perPage *int, - requestQuorumInfo *bool) (*coretypes.ResultValidators, error) { - return c.Validators(ctx.Context(), height, page, perPage, requestQuorumInfo) - } +func (p proxyService) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { + return p.Client.Genesis(ctx) } -type rpcDumpConsensusStateFunc func(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) - -func makeDumpConsensusStateFunc(c *lrpc.Client) rpcDumpConsensusStateFunc { - return func(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) { - return c.DumpConsensusState(ctx.Context()) - } +func (p proxyService) GenesisChunked(ctx context.Context, req *coretypes.RequestGenesisChunked) (*coretypes.ResultGenesisChunk, error) { + return p.Client.GenesisChunked(ctx, uint(req.Chunk)) } -type rpcConsensusStateFunc func(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) - -func makeConsensusStateFunc(c *lrpc.Client) rpcConsensusStateFunc { - return func(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) { - return c.ConsensusState(ctx.Context()) - } +func (p proxyService) GetConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { + return p.Client.ConsensusState(ctx) } -type rpcConsensusParamsFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultConsensusParams, error) - -func makeConsensusParamsFunc(c *lrpc.Client) rpcConsensusParamsFunc { - return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultConsensusParams, error) { - return c.ConsensusParams(ctx.Context(), height) - } +func (p proxyService) Header(ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultHeader, error) { + return p.Client.Header(ctx, (*int64)(req.Height)) } -type rpcUnconfirmedTxsFunc func(ctx *rpctypes.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) - -func makeUnconfirmedTxsFunc(c *lrpc.Client) rpcUnconfirmedTxsFunc { - return func(ctx *rpctypes.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { - return c.UnconfirmedTxs(ctx.Context(), limit) - } +func (p proxyService) HeaderByHash(ctx context.Context, req *coretypes.RequestBlockByHash) (*coretypes.ResultHeader, error) { + return p.Client.HeaderByHash(ctx, req.Hash) } -type rpcNumUnconfirmedTxsFunc func(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) - -func makeNumUnconfirmedTxsFunc(c *lrpc.Client) rpcNumUnconfirmedTxsFunc { - return func(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) { - return c.NumUnconfirmedTxs(ctx.Context()) - } +func (p proxyService) Health(ctx context.Context) (*coretypes.ResultHealth, error) { + return p.Client.Health(ctx) } -type rpcBroadcastTxCommitFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) - -func makeBroadcastTxCommitFunc(c *lrpc.Client) rpcBroadcastTxCommitFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { - return c.BroadcastTxCommit(ctx.Context(), tx) - } +func (p proxyService) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { + return p.Client.NetInfo(ctx) } -type rpcBroadcastTxSyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) - -func makeBroadcastTxSyncFunc(c *lrpc.Client) rpcBroadcastTxSyncFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - return c.BroadcastTxSync(ctx.Context(), tx) - } +func (p proxyService) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { + return p.Client.NumUnconfirmedTxs(ctx) } -type rpcBroadcastTxAsyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) +func (p proxyService) RemoveTx(ctx context.Context, req *coretypes.RequestRemoveTx) error { + return p.Client.RemoveTx(ctx, req.TxKey) +} -func makeBroadcastTxAsyncFunc(c *lrpc.Client) rpcBroadcastTxAsyncFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - return c.BroadcastTxAsync(ctx.Context(), tx) - } +func (p proxyService) Status(ctx context.Context) (*coretypes.ResultStatus, error) { + return p.Client.Status(ctx) } -type rpcABCIQueryFunc func(ctx *rpctypes.Context, path string, - data bytes.HexBytes, height int64, prove bool) (*coretypes.ResultABCIQuery, error) +func (p proxyService) Subscribe(ctx context.Context, req *coretypes.RequestSubscribe) (*coretypes.ResultSubscribe, error) { + return p.Client.SubscribeWS(ctx, req.Query) +} -func makeABCIQueryFunc(c *lrpc.Client) rpcABCIQueryFunc { - return func(ctx *rpctypes.Context, path string, data bytes.HexBytes, - height int64, prove bool) (*coretypes.ResultABCIQuery, error) { +func (p proxyService) Tx(ctx context.Context, req *coretypes.RequestTx) (*coretypes.ResultTx, error) { + return p.Client.Tx(ctx, req.Hash, req.Prove) +} - return c.ABCIQueryWithOptions(ctx.Context(), path, data, rpcclient.ABCIQueryOptions{ - Height: height, - Prove: prove, - }) - } +func (p proxyService) TxSearch(ctx context.Context, req *coretypes.RequestTxSearch) (*coretypes.ResultTxSearch, error) { + return p.Client.TxSearch(ctx, req.Query, req.Prove, req.Page.IntPtr(), req.PerPage.IntPtr(), req.OrderBy) } -type rpcABCIInfoFunc func(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) +func (p proxyService) UnconfirmedTxs(ctx context.Context, req *coretypes.RequestUnconfirmedTxs) (*coretypes.ResultUnconfirmedTxs, error) { + return p.Client.UnconfirmedTxs(ctx, req.Page.IntPtr(), req.PerPage.IntPtr()) +} -func makeABCIInfoFunc(c *lrpc.Client) rpcABCIInfoFunc { - return func(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) { - return c.ABCIInfo(ctx.Context()) - } +func (p proxyService) Unsubscribe(ctx context.Context, req *coretypes.RequestUnsubscribe) (*coretypes.ResultUnsubscribe, error) { + return p.Client.UnsubscribeWS(ctx, req.Query) } -type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) +func (p proxyService) UnsubscribeAll(ctx context.Context) (*coretypes.ResultUnsubscribe, error) { + return p.Client.UnsubscribeAllWS(ctx) +} -// nolint: interfacer -func makeBroadcastEvidenceFunc(c *lrpc.Client) rpcBroadcastEvidenceFunc { - return func(ctx *rpctypes.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { - return c.BroadcastEvidence(ctx.Context(), ev) - } +func (p proxyService) Validators(ctx context.Context, req *coretypes.RequestValidators) (*coretypes.ResultValidators, error) { + return p.Client.Validators(ctx, (*int64)(req.Height), req.Page.IntPtr(), req.PerPage.IntPtr(), req.RequestQuorumInfo) } diff --git a/light/rpc/client.go b/light/rpc/client.go index 192f89b9fc..94856e462a 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -11,10 +11,13 @@ import ( "github.com/dashevo/dashd-go/btcjson" "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/libs" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/merkle" tmbytes "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" service "github.com/tendermint/tendermint/libs/service" rpcclient "github.com/tendermint/tendermint/rpc/client" @@ -33,6 +36,7 @@ type LightClient interface { Update(ctx context.Context, now time.Time) (*types.LightBlock, error) VerifyLightBlockAtHeight(ctx context.Context, height int64, now time.Time) (*types.LightBlock, error) TrustedLightBlock(height int64) (*types.LightBlock, error) + Status(ctx context.Context) *types.LightClientInfo } var _ rpcclient.Client = (*Client)(nil) @@ -49,6 +53,8 @@ type Client struct { // proof runtime used to verify values returned by ABCIQuery prt *merkle.ProofRuntime keyPathFn KeyPathFunc + + closers []func() } var _ rpcclient.Client = (*Client)(nil) @@ -87,36 +93,48 @@ func DefaultMerkleKeyPathFn() KeyPathFunc { } // NewClient returns a new client. -func NewClient(next rpcclient.Client, lc LightClient, opts ...Option) *Client { +func NewClient(logger log.Logger, next rpcclient.Client, lc LightClient, opts ...Option) *Client { c := &Client{ next: next, lc: lc, prt: merkle.DefaultProofRuntime(), } - c.BaseService = *service.NewBaseService(nil, "Client", c) + c.BaseService = *service.NewBaseService(logger, "Client", c) for _, o := range opts { o(c) } return c } -func (c *Client) OnStart() error { - if !c.next.IsRunning() { - return c.next.Start() +func (c *Client) OnStart(ctx context.Context) error { + nctx, ncancel := context.WithCancel(ctx) + if err := c.next.Start(nctx); err != nil { + ncancel() + return err } + c.closers = append(c.closers, ncancel) + return nil } func (c *Client) OnStop() { - if c.next.IsRunning() { - if err := c.next.Stop(); err != nil { - c.Logger.Error("Error stopping on next", "err", err) - } + for _, closer := range c.closers { + closer() } } +// Returns the status of the light client. Previously this was querying the primary connected to the client +// As a consequence of this change, running /status on the light client will return nil for SyncInfo, NodeInfo +// and ValdiatorInfo. func (c *Client) Status(ctx context.Context) (*coretypes.ResultStatus, error) { - return c.next.Status(ctx) + lightClientInfo := c.lc.Status(ctx) + + return &coretypes.ResultStatus{ + NodeInfo: types.NodeInfo{}, + SyncInfo: coretypes.SyncInfo{}, + ValidatorInfo: coretypes.ValidatorInfo{}, + LightClientInfo: *lightClientInfo, + }, nil } func (c *Client) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { @@ -215,8 +233,8 @@ func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.R return c.next.BroadcastTxSync(ctx, tx) } -func (c *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { - return c.next.UnconfirmedTxs(ctx, limit) +func (c *Client) UnconfirmedTxs(ctx context.Context, page, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) { + return c.next.UnconfirmedTxs(ctx, page, perPage) } func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { @@ -272,6 +290,10 @@ func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*coretypes return res, nil } +func (c *Client) Events(ctx context.Context, req *coretypes.RequestEvents) (*coretypes.ResultEvents, error) { + return c.next.Events(ctx, req) +} + func (c *Client) Health(ctx context.Context) (*coretypes.ResultHealth, error) { return c.next.Health(ctx) } @@ -426,32 +448,25 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*coretypes.Re return nil, err } - // proto-encode BeginBlock events - bbeBytes, err := proto.Marshal(&abci.ResponseBeginBlock{ - Events: res.BeginBlockEvents, + // proto-encode FinalizeBlock events + bbeBytes, err := proto.Marshal(&abci.ResponseFinalizeBlock{ + Events: res.FinalizeBlockEvents, }) if err != nil { return nil, err } - // Build a Merkle tree of proto-encoded DeliverTx results and get a hash. - results := types.NewResults(res.TxsResults) - - // proto-encode EndBlock events. - ebeBytes, err := proto.Marshal(&abci.ResponseEndBlock{ - Events: res.EndBlockEvents, - }) + // Build a Merkle tree out of the slice. + rs, err := abci.MarshalTxResults(res.TxsResults) if err != nil { return nil, err } - - // Build a Merkle tree out of the above 3 binary slices. - rH := merkle.HashFromByteSlices([][]byte{bbeBytes, results.Hash(), ebeBytes}) + mh := merkle.HashFromByteSlices(append([][]byte{bbeBytes}, rs...)) // Verify block results. - if !bytes.Equal(rH, trustedBlock.LastResultsHash) { + if !bytes.Equal(mh, trustedBlock.LastResultsHash) { return nil, fmt.Errorf("last results %X does not match with trusted last results %X", - rH, trustedBlock.LastResultsHash) + mh, trustedBlock.LastResultsHash) } return res, nil @@ -570,13 +585,13 @@ func (c *Client) Validators( } skipCount := validateSkipCount(page, perPage) - v := l.ValidatorSet.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] + v := l.ValidatorSet.Validators[skipCount : skipCount+tmmath.MinInt(int(perPage), totalCount-skipCount)] var ( thresholdPublicKey crypto.PubKey quorumHash crypto.QuorumHash quorumType btcjson.LLMQType ) - if *requestQuorumInfo { + if libs.BoolValue(requestQuorumInfo) { thresholdPublicKey = l.ValidatorSet.ThresholdPublicKey quorumHash = l.ValidatorSet.QuorumHash quorumType = l.ValidatorSet.QuorumType @@ -600,15 +615,15 @@ func (c *Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*cor func (c *Client) Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) { - return c.next.Subscribe(ctx, subscriber, query, outCapacity...) + return c.next.Subscribe(ctx, subscriber, query, outCapacity...) //nolint:staticcheck } func (c *Client) Unsubscribe(ctx context.Context, subscriber, query string) error { - return c.next.Unsubscribe(ctx, subscriber, query) + return c.next.Unsubscribe(ctx, subscriber, query) //nolint:staticcheck } func (c *Client) UnsubscribeAll(ctx context.Context, subscriber string) error { - return c.next.UnsubscribeAll(ctx, subscriber) + return c.next.UnsubscribeAll(ctx, subscriber) //nolint:staticcheck } func (c *Client) updateLightClientIfNeededTo(ctx context.Context, height *int64) (*types.LightBlock, error) { @@ -634,8 +649,12 @@ func (c *Client) RegisterOpDecoder(typ string, dec merkle.OpDecoder) { // SubscribeWS subscribes for events using the given query and remote address as // a subscriber, but does not verify responses (UNSAFE)! // TODO: verify data -func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*coretypes.ResultSubscribe, error) { - out, err := c.next.Subscribe(context.Background(), ctx.RemoteAddr(), query) +func (c *Client) SubscribeWS(ctx context.Context, query string) (*coretypes.ResultSubscribe, error) { + bctx, bcancel := context.WithCancel(context.Background()) + c.closers = append(c.closers, bcancel) + + callInfo := rpctypes.GetCallInfo(ctx) + out, err := c.next.Subscribe(bctx, callInfo.RemoteAddr(), query) //nolint:staticcheck if err != nil { return nil, err } @@ -646,12 +665,8 @@ func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*coretypes.Re case resultEvent := <-out: // We should have a switch here that performs a validation // depending on the event's type. - ctx.WSConn.TryWriteRPCResponse( - rpctypes.NewRPCSuccessResponse( - rpctypes.JSONRPCStringID(fmt.Sprintf("%v#event", ctx.JSONReq.ID)), - resultEvent, - )) - case <-c.Quit(): + callInfo.WSConn.TryWriteRPCResponse(bctx, callInfo.RPCRequest.MakeResponse(resultEvent)) + case <-bctx.Done(): return } } @@ -662,8 +677,8 @@ func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*coretypes.Re // UnsubscribeWS calls original client's Unsubscribe using remote address as a // subscriber. -func (c *Client) UnsubscribeWS(ctx *rpctypes.Context, query string) (*coretypes.ResultUnsubscribe, error) { - err := c.next.Unsubscribe(context.Background(), ctx.RemoteAddr(), query) +func (c *Client) UnsubscribeWS(ctx context.Context, query string) (*coretypes.ResultUnsubscribe, error) { + err := c.next.Unsubscribe(context.Background(), rpctypes.GetCallInfo(ctx).RemoteAddr(), query) //nolint:staticcheck if err != nil { return nil, err } @@ -672,8 +687,8 @@ func (c *Client) UnsubscribeWS(ctx *rpctypes.Context, query string) (*coretypes. // UnsubscribeAllWS calls original client's UnsubscribeAll using remote address // as a subscriber. -func (c *Client) UnsubscribeAllWS(ctx *rpctypes.Context) (*coretypes.ResultUnsubscribe, error) { - err := c.next.UnsubscribeAll(context.Background(), ctx.RemoteAddr()) +func (c *Client) UnsubscribeAllWS(ctx context.Context) (*coretypes.ResultUnsubscribe, error) { + err := c.next.UnsubscribeAll(context.Background(), rpctypes.GetCallInfo(ctx).RemoteAddr()) //nolint:staticcheck if err != nil { return nil, err } @@ -687,16 +702,13 @@ const ( maxPerPage = 100 ) -func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { - if perPage < 1 { - panic(fmt.Errorf("%w (%d)", coretypes.ErrZeroOrNegativePerPage, perPage)) - } +func validatePage(pagePtr *int, perPage uint, totalCount int) (int, error) { if pagePtr == nil { // no page parameter return 1, nil } - pages := ((totalCount - 1) / perPage) + 1 + pages := ((totalCount - 1) / int(perPage)) + 1 if pages == 0 { pages = 1 // one page (even if it's empty) } @@ -708,7 +720,7 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { return page, nil } -func validatePerPage(perPagePtr *int) int { +func validatePerPage(perPagePtr *int) uint { if perPagePtr == nil { // no per_page parameter return defaultPerPage } @@ -719,11 +731,11 @@ func validatePerPage(perPagePtr *int) int { } else if perPage > maxPerPage { return maxPerPage } - return perPage + return uint(perPage) } -func validateSkipCount(page, perPage int) int { - skipCount := (page - 1) * perPage +func validateSkipCount(page int, perPage uint) int { + skipCount := (page - 1) * int(perPage) if skipCount < 0 { return 0 } diff --git a/light/rpc/mocks/light_client.go b/light/rpc/mocks/light_client.go index cc32cf6494..ea6d6a2d44 100644 --- a/light/rpc/mocks/light_client.go +++ b/light/rpc/mocks/light_client.go @@ -7,6 +7,8 @@ import ( mock "github.com/stretchr/testify/mock" + testing "testing" + time "time" types "github.com/tendermint/tendermint/types" @@ -31,6 +33,22 @@ func (_m *LightClient) ChainID() string { return r0 } +// Status provides a mock function with given fields: ctx +func (_m *LightClient) Status(ctx context.Context) *types.LightClientInfo { + ret := _m.Called(ctx) + + var r0 *types.LightClientInfo + if rf, ok := ret.Get(0).(func(context.Context) *types.LightClientInfo); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.LightClientInfo) + } + } + + return r0 +} + // TrustedLightBlock provides a mock function with given fields: height func (_m *LightClient) TrustedLightBlock(height int64) (*types.LightBlock, error) { ret := _m.Called(height) @@ -99,3 +117,13 @@ func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int6 return r0, r1 } + +// NewLightClient creates a new instance of LightClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewLightClient(t testing.TB) *LightClient { + mock := &LightClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/light/setup.go b/light/setup.go index c23068d08f..a6c3e6703b 100644 --- a/light/setup.go +++ b/light/setup.go @@ -3,7 +3,7 @@ package light import ( "context" - dashcore "github.com/tendermint/tendermint/dashcore/rpc" + dashcore "github.com/tendermint/tendermint/dash/core" "github.com/tendermint/tendermint/light/provider" "github.com/tendermint/tendermint/light/provider/http" "github.com/tendermint/tendermint/light/store" @@ -21,7 +21,7 @@ func NewHTTPClient( primaryAddress string, witnessesAddresses []string, trustedStore store.Store, - dashCoreRPCClient dashcore.Client, + dashCoreRPCClient dashcore.QuorumVerifier, options ...Option) (*Client, error) { providers, err := providersFromAddresses(append(witnessesAddresses, primaryAddress), chainID) diff --git a/light/store/db/db.go b/light/store/db/db.go index acfda1f796..c364e17092 100644 --- a/light/store/db/db.go +++ b/light/store/db/db.go @@ -3,11 +3,11 @@ package db import ( "encoding/binary" "fmt" + "sync" "github.com/google/orderedcode" dbm "github.com/tendermint/tm-db" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/light/store" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -21,7 +21,7 @@ const ( type dbs struct { db dbm.DB - mtx tmsync.RWMutex + mtx sync.RWMutex size uint16 } diff --git a/light/store/db/db_test.go b/light/store/db/db_test.go index 2ed90223d7..be30f26676 100644 --- a/light/store/db/db_test.go +++ b/light/store/db/db_test.go @@ -1,6 +1,7 @@ package db import ( + "context" "sync" "testing" "time" @@ -10,7 +11,6 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/tmhash" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" @@ -18,6 +18,8 @@ import ( func TestLast_FirstLightBlockHeight(t *testing.T) { dbStore := New(dbm.NewMemDB()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Empty store height, err := dbStore.LastLightBlockHeight() @@ -29,7 +31,7 @@ func TestLast_FirstLightBlockHeight(t *testing.T) { assert.EqualValues(t, -1, height) // 1 key - err = dbStore.SaveLightBlock(randLightBlock(int64(1))) + err = dbStore.SaveLightBlock(randLightBlock(ctx, t, int64(1))) require.NoError(t, err) height, err = dbStore.LastLightBlockHeight() @@ -43,6 +45,8 @@ func TestLast_FirstLightBlockHeight(t *testing.T) { func Test_SaveLightBlock(t *testing.T) { dbStore := New(dbm.NewMemDB()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Empty store h, err := dbStore.LightBlock(1) @@ -50,7 +54,7 @@ func Test_SaveLightBlock(t *testing.T) { assert.Nil(t, h) // 1 key - err = dbStore.SaveLightBlock(randLightBlock(1)) + err = dbStore.SaveLightBlock(randLightBlock(ctx, t, 1)) require.NoError(t, err) size := dbStore.Size() @@ -73,13 +77,15 @@ func Test_SaveLightBlock(t *testing.T) { func Test_LightBlockBefore(t *testing.T) { dbStore := New(dbm.NewMemDB()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() assert.Panics(t, func() { _, _ = dbStore.LightBlockBefore(0) _, _ = dbStore.LightBlockBefore(100) }) - err := dbStore.SaveLightBlock(randLightBlock(int64(2))) + err := dbStore.SaveLightBlock(randLightBlock(ctx, t, int64(2))) require.NoError(t, err) h, err := dbStore.LightBlockBefore(3) @@ -94,6 +100,8 @@ func Test_LightBlockBefore(t *testing.T) { func Test_Prune(t *testing.T) { dbStore := New(dbm.NewMemDB()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // Empty store assert.EqualValues(t, 0, dbStore.Size()) @@ -101,7 +109,7 @@ func Test_Prune(t *testing.T) { require.NoError(t, err) // One header - err = dbStore.SaveLightBlock(randLightBlock(2)) + err = dbStore.SaveLightBlock(randLightBlock(ctx, t, 2)) require.NoError(t, err) assert.EqualValues(t, 1, dbStore.Size()) @@ -116,7 +124,7 @@ func Test_Prune(t *testing.T) { // Multiple headers for i := 1; i <= 10; i++ { - err = dbStore.SaveLightBlock(randLightBlock(int64(i))) + err = dbStore.SaveLightBlock(randLightBlock(ctx, t, int64(i))) require.NoError(t, err) } @@ -132,13 +140,16 @@ func Test_Prune(t *testing.T) { func Test_Concurrency(t *testing.T) { dbStore := New(dbm.NewMemDB()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + var wg sync.WaitGroup for i := 1; i <= 100; i++ { wg.Add(1) go func(i int64) { defer wg.Done() - err := dbStore.SaveLightBlock(randLightBlock(i)) + err := dbStore.SaveLightBlock(randLightBlock(ctx, t, i)) require.NoError(t, err) _, err = dbStore.LightBlock(i) @@ -181,7 +192,8 @@ func Test_Concurrency(t *testing.T) { wg.Wait() } -func randLightBlock(height int64) *types.LightBlock { +func randLightBlock(ctx context.Context, t *testing.T, height int64) *types.LightBlock { + t.Helper() vals, _ := types.RandValidatorSet(2) return &types.LightBlock{ SignedHeader: &types.SignedHeader{ @@ -191,14 +203,14 @@ func randLightBlock(height int64) *types.LightBlock { Height: height, Time: time.Now(), LastBlockID: types.BlockID{}, - LastCommitHash: crypto.CRandBytes(tmhash.Size), - DataHash: crypto.CRandBytes(tmhash.Size), - ValidatorsHash: crypto.CRandBytes(tmhash.Size), - NextValidatorsHash: crypto.CRandBytes(tmhash.Size), - ConsensusHash: crypto.CRandBytes(tmhash.Size), - AppHash: crypto.CRandBytes(tmhash.Size), - LastResultsHash: crypto.CRandBytes(tmhash.Size), - EvidenceHash: crypto.CRandBytes(tmhash.Size), + LastCommitHash: crypto.CRandBytes(crypto.HashSize), + DataHash: crypto.CRandBytes(crypto.HashSize), + ValidatorsHash: crypto.CRandBytes(crypto.HashSize), + NextValidatorsHash: crypto.CRandBytes(crypto.HashSize), + ConsensusHash: crypto.CRandBytes(crypto.HashSize), + AppHash: crypto.CRandBytes(crypto.HashSize), + LastResultsHash: crypto.CRandBytes(crypto.HashSize), + EvidenceHash: crypto.CRandBytes(crypto.HashSize), ProposerProTxHash: crypto.CRandBytes(crypto.DefaultHashSize), }, Commit: &types.Commit{}, diff --git a/networks/local/README.md b/networks/local/README.md index dcb31ae713..10fc19932c 100644 --- a/networks/local/README.md +++ b/networks/local/README.md @@ -1,3 +1,3 @@ # Local Cluster with Docker Compose -See the [docs](https://docs.tendermint.com/master/networks/docker-compose.html). +See the [docs](https://docs.tendermint.com/master/tools/docker-compose.html). diff --git a/node/node.go b/node/node.go index b71548a4cb..b58fc506ae 100644 --- a/node/node.go +++ b/node/node.go @@ -2,25 +2,28 @@ package node import ( "context" - "errors" "fmt" "net" "net/http" "strconv" + "strings" "time" "github.com/dashevo/dashd-go/btcjson" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/rs/cors" abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/dash/core" dashquorum "github.com/tendermint/tendermint/dash/quorum" - dashcore "github.com/tendermint/tendermint/dashcore/rpc" + "github.com/tendermint/tendermint/internal/blocksync" "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/eventlog" + "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/pex" @@ -28,18 +31,14 @@ import ( rpccore "github.com/tendermint/tendermint/internal/rpc/core" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink" "github.com/tendermint/tendermint/internal/statesync" "github.com/tendermint/tendermint/internal/store" + tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" - tmnet "github.com/tendermint/tendermint/libs/net" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/libs/strings" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" - tmgrpc "github.com/tendermint/tendermint/privval/grpc" - grpccore "github.com/tendermint/tendermint/rpc/grpc" - rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" "github.com/tendermint/tendermint/types" _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port @@ -51,6 +50,7 @@ import ( // It includes all configuration information and running services. type nodeImpl struct { service.BaseService + logger log.Logger // config config *config.Config @@ -58,336 +58,334 @@ type nodeImpl struct { privValidator types.PrivValidator // local node's validator key // network - transport *p2p.MConnTransport - sw *p2p.Switch // p2p connections peerManager *p2p.PeerManager router *p2p.Router - addrBook pex.AddrBook // known peers nodeInfo types.NodeInfo nodeKey types.NodeKey // our node privkey - isListening bool // services - eventBus *types.EventBus // pub/sub for services - stateStore sm.Store - blockStore *store.BlockStore // store the blockchain to disk - bcReactor service.Service // for block-syncing - mempoolReactor service.Service // for gossipping transactions - mempool mempool.Mempool - stateSync bool // whether the node should state sync on startup - stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots - consensusReactor *consensus.Reactor // for participating in the consensus - pexReactor service.Service // for exchanging peer addresses - evidenceReactor service.Service - rpcListeners []net.Listener // rpc servers - indexerService service.Service - rpcEnv *rpccore.Environment - prometheusSrv *http.Server + eventSinks []indexer.EventSink + initialState sm.State + stateStore sm.Store + blockStore *store.BlockStore // store the blockchain to disk + evPool *evidence.Pool + indexerService *indexer.Service + services []service.Service + rpcListeners []net.Listener // rpc servers + shutdownOps closer + rpcEnv *rpccore.Environment + prometheusSrv *http.Server // Dash validatorConnExecutor *dashquorum.ValidatorConnExecutor - dashCoreRPCClient dashcore.Client } // newDefaultNode returns a Tendermint node with default settings for the // PrivValidator, ClientCreator, GenesisDoc, and DBProvider. // It implements NodeProvider. -func newDefaultNode(cfg *config.Config, logger log.Logger) (service.Service, error) { +func newDefaultNode( + ctx context.Context, + cfg *config.Config, + logger log.Logger, +) (service.Service, error) { nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile()) if err != nil { return nil, fmt.Errorf("failed to load or gen node key %s: %w", cfg.NodeKeyFile(), err) } if cfg.Mode == config.ModeSeed { - return makeSeedNode(cfg, + return makeSeedNode( + logger, + cfg, config.DefaultDBProvider, nodeKey, defaultGenesisDocProviderFunc(cfg), - logger, ) } - var pval *privval.FilePV - if cfg.Mode == config.ModeValidator { - pval, err = privval.LoadOrGenFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) - if err != nil { - return nil, err - } - } else { - pval = nil + appClient, _, err := proxy.ClientFactory(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + if err != nil { + return nil, err } - appClient, _ := proxy.DefaultClientCreator(cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) - return makeNode(cfg, - pval, + return makeNode( + ctx, + cfg, nodeKey, appClient, defaultGenesisDocProviderFunc(cfg), config.DefaultDBProvider, - nil, logger, ) } // makeNode returns a new, ready to go, Tendermint Node. -func makeNode(cfg *config.Config, - privValidator types.PrivValidator, +func makeNode( + ctx context.Context, + cfg *config.Config, nodeKey types.NodeKey, - clientCreator abciclient.Creator, + client abciclient.Client, genesisDocProvider genesisDocProvider, dbProvider config.DBProvider, - dashCoreRPCClient dashcore.Client, - logger log.Logger) (service.Service, error) { + logger log.Logger, +) (service.Service, error) { + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) - blockStore, stateDB, err := initDBs(cfg, dbProvider) + closers := []closer{convertCancelCloser(cancel)} + + blockStore, stateDB, dbCloser, err := initDBs(cfg, dbProvider) if err != nil { - return nil, err + return nil, combineCloseError(err, dbCloser) } + closers = append(closers, dbCloser) + stateStore := sm.NewStore(stateDB) genDoc, err := genesisDocProvider() if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } - err = genDoc.ValidateAndComplete() - if err != nil { - return nil, fmt.Errorf("error in genesis doc: %w", err) + if err = genDoc.ValidateAndComplete(); err != nil { + return nil, combineCloseError(fmt.Errorf("error in genesis doc: %w", err), makeCloser(closers)) } state, err := loadStateFromDBOrGenesisDocProvider(stateStore, genDoc) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } nodeMetrics := defaultMetricsProvider(cfg.Instrumentation)(genDoc.ChainID) - // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). - proxyApp, err := createAndStartProxyAppConns(clientCreator, logger, nodeMetrics.proxy) - if err != nil { - return nil, err + proxyApp := proxy.New(client, logger.With("module", "proxy"), nodeMetrics.proxy) + eventBus := eventbus.NewDefault(logger.With("module", "events")) + + var eventLog *eventlog.Log + if w := cfg.RPC.EventLogWindowSize; w > 0 { + var err error + eventLog, err = eventlog.New(eventlog.LogSettings{ + WindowSize: w, + MaxItems: cfg.RPC.EventLogMaxItems, + Metrics: nodeMetrics.eventlog, + }) + if err != nil { + return nil, combineCloseError(fmt.Errorf("initializing event log: %w", err), makeCloser(closers)) + } } - - // EventBus and IndexerService must be started before the handshake because - // we might need to index the txs of the replayed block as this might not have happened - // when the node stopped last time (i.e. the node stopped after it saved the block - // but before it indexed the txs, or, endblocker panicked) - eventBus, err := createAndStartEventBus(logger) + eventSinks, err := sink.EventSinksFromConfig(cfg, dbProvider, genDoc.ChainID) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } + indexerService := indexer.NewService(indexer.ServiceArgs{ + Sinks: eventSinks, + EventBus: eventBus, + Logger: logger.With("module", "txindex"), + Metrics: nodeMetrics.indexer, + }) - indexerService, eventSinks, err := createAndStartIndexerService(cfg, dbProvider, - eventBus, logger, genDoc.ChainID, nodeMetrics.indexer) - if err != nil { - return nil, err - } + var ( + proTxHash crypto.ProTxHash + privValidator types.PrivValidator + dashCoreRPCClient core.Client + ) + switch cfg.Mode { + case config.ModeValidator: + privValidator, err = createPrivval(ctx, logger, cfg, genDoc) + if err != nil { + return nil, combineCloseError(err, makeCloser(closers)) + } + if dashPrivval, ok := privValidator.(privval.DashPrivValidator); ok { + dashCoreRPCClient = dashPrivval.DashRPCClient() + } - var proTxHash crypto.ProTxHash - switch { - case cfg.PrivValidator.CoreRPCHost != "": - logger.Info( - "Initializing Dash Core Signing", - "quorum hash", - state.Validators.QuorumHash.String(), - ) - /* - llmqType := config.Consensus.QuorumType - if llmqType == 0 { - llmqType = btcjson.LLMQType_100_67 - }*/ - if dashCoreRPCClient == nil { - rpcClient, err := DefaultDashCoreRPCClient(cfg, logger.With("module", dashcore.ModuleName)) - if err != nil { - return nil, fmt.Errorf("failed to create Dash Core RPC client %w", err) - } - dashCoreRPCClient = rpcClient + proTxHash, err = privValidator.GetProTxHash(ctx) + if err != nil { + return nil, err } - if cfg.Mode == config.ModeValidator { - // If a local port is provided for Dash Core rpc into the service to sign. - privValidator, err = createAndStartPrivValidatorRPCClient( - cfg.Consensus.QuorumType, - dashCoreRPCClient, - logger, - ) - if err != nil { - return nil, fmt.Errorf("error with private validator RPC client: %w", err) - } - proTxHash, err = privValidator.GetProTxHash(context.TODO()) + case config.ModeFull: + // Special handling on non-Validator nodes + logger.Info("this node is NOT a validator") + + if cfg.PrivValidator.CoreRPCHost != "" { + dashCoreRPCClient, err = DefaultDashCoreRPCClient(cfg, logger.With("module", core.ModuleName)) if err != nil { - return nil, fmt.Errorf("can't get proTxHash using dash core signing: %w", err) + return nil, fmt.Errorf("failed to create Dash Core RPC client: %w", err) } - logger.Info("Connected to Core RPC Masternode", "proTxHash", proTxHash.String()) } else { - logger.Info("Connected to Core RPC FullNode") - } - case cfg.PrivValidator.ListenAddr != "": - // If an address is provided, listen on the socket for a connection from an - // external signing process. - // FIXME: we should start services inside OnStart - protocol, _ := tmnet.ProtocolAndAddress(cfg.PrivValidator.ListenAddr) - // FIXME: we should start services inside OnStart - switch protocol { - case "grpc": - privValidator, err = createAndStartPrivValidatorGRPCClient(cfg, genDoc.ChainID, genDoc.QuorumHash, logger) - if err != nil { - return nil, fmt.Errorf("error with private validator grpc client: %w", err) - } - default: - privValidator, err = createAndStartPrivValidatorSocketClient(cfg.PrivValidator.ListenAddr, genDoc.ChainID, genDoc.QuorumHash, logger) - if err != nil { - return nil, fmt.Errorf("error with private validator socket client: %w", err) - } - } - if cfg.Mode == config.ModeValidator { - proTxHash, err = privValidator.GetProTxHash(context.TODO()) - if err != nil { - return nil, fmt.Errorf("can't get proTxHash using dash core signing: %w", err) + llmqType := cfg.Consensus.QuorumType + if llmqType == 0 { + llmqType = btcjson.LLMQType_100_67 } - logger.Info( - "Connected to Private Validator through listen address", - "proTxHash", - proTxHash.String(), - ) - } else { - logger.Info("Connected to Private Validator through listen address") - } - default: - privValidator, err = privval.LoadOrGenFilePV( - cfg.PrivValidator.KeyFile(), - cfg.PrivValidator.StateFile(), - ) - if err != nil { - return nil, fmt.Errorf("error with private validator loaded: %w", err) - } - proTxHash, err = privValidator.GetProTxHash(context.TODO()) - if err != nil { - return nil, fmt.Errorf("can't get proTxHash through file: %w", err) - } - logger.Info("Private Validator using local file", "proTxHash", proTxHash.String()) - } - - if dashCoreRPCClient == nil { - llmqType := cfg.Consensus.QuorumType - if llmqType == 0 { - llmqType = btcjson.LLMQType_100_67 + // This is used for light client verification only + dashCoreRPCClient = core.NewMockClient(cfg.ChainID(), llmqType, privValidator, false) } - // This is used for light client verification only - dashCoreRPCClient = dashcore.NewMockClient(cfg.ChainID(), llmqType, privValidator, false) } weAreOnlyValidator := onlyValidatorIsUs(state, proTxHash) - // Determine whether we should attempt state sync. - stateSync := cfg.StateSync.Enable && !weAreOnlyValidator - if stateSync && state.LastBlockHeight > 0 { - logger.Info("Found local state with non-zero height, skipping state sync") - stateSync = false + peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, nodeKey.ID) + closers = append(closers, peerCloser) + if err != nil { + return nil, combineCloseError( + fmt.Errorf("failed to create peer manager: %w", err), + makeCloser(closers)) } - // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, - // and replays any blocks as necessary to sync tendermint with the app. - consensusLogger := logger.With("module", "consensus") + // Start Dash connection executor + var validatorConnExecutor *dashquorum.ValidatorConnExecutor if len(proTxHash) > 0 { - consensusLogger = consensusLogger.With("node_proTxHash", proTxHash.ShortString()) - } - proposedAppVersion := uint64(0) - if !stateSync { - if proposedAppVersion, err = doHandshake(stateStore, state, blockStore, genDoc, proTxHash, cfg.Consensus.AppHashSize, eventBus, proxyApp, consensusLogger); err != nil { - return nil, err - } - - // Reload the state. It will have the Version.Consensus.App set by the - // Handshake, and may have other modifications as well (ie. depending on - // what happened during block replay). - state, err = stateStore.Load() + vcLogger := logger.With("node_proTxHash", proTxHash.ShortString(), "module", "ValidatorConnExecutor") + dcm := p2p.NewRouterDashDialer(peerManager, vcLogger) + validatorConnExecutor, err = dashquorum.NewValidatorConnExecutor( + proTxHash, + eventBus, + dcm, + dashquorum.WithLogger(vcLogger), + dashquorum.WithValidatorsSet(state.Validators), + ) if err != nil { - return nil, fmt.Errorf("cannot load state: %w", err) + return nil, combineCloseError(err, makeCloser(closers)) } } - // Determine whether we should do block sync. This must happen after the handshake, since the - // app may modify the validator set, specifying ourself as the only validator. - blockSync := cfg.BlockSync.Enable && !weAreOnlyValidator + // TODO construct node here: + node := &nodeImpl{ + config: cfg, + logger: logger, + genesisDoc: genDoc, + privValidator: privValidator, - logNodeStartupInfo(state, proTxHash, logger, consensusLogger, cfg.Mode) + peerManager: peerManager, + nodeKey: nodeKey, - // TODO: Fetch and provide real options and do proper p2p bootstrapping. - // TODO: Use a persistent peer database. - nodeInfo, err := makeNodeInfo(cfg, nodeKey, proTxHash, eventSinks, genDoc, state) - if err != nil { - return nil, err - } + eventSinks: eventSinks, + indexerService: indexerService, + services: []service.Service{eventBus}, - p2pLogger := logger.With("module", "p2p") - transport := createTransport(p2pLogger, cfg) + initialState: state, + stateStore: stateStore, + blockStore: blockStore, - peerManager, err := createPeerManager(cfg, dbProvider, p2pLogger, nodeKey.ID) - if err != nil { - return nil, fmt.Errorf("failed to create peer manager: %w", err) - } + shutdownOps: makeCloser(closers), - router, err := createRouter(p2pLogger, nodeMetrics.p2p, nodeInfo, nodeKey.PrivKey, - peerManager, transport, getRouterConfig(cfg, proxyApp)) - if err != nil { - return nil, fmt.Errorf("failed to create router: %w", err) + validatorConnExecutor: validatorConnExecutor, + + rpcEnv: &rpccore.Environment{ + ProxyApp: proxyApp, + + StateStore: stateStore, + BlockStore: blockStore, + + PeerManager: peerManager, + + GenDoc: genDoc, + EventSinks: eventSinks, + EventBus: eventBus, + EventLog: eventLog, + Logger: logger.With("module", "rpc"), + Config: *cfg.RPC, + }, } - mpReactorShim, mpReactor, mp, err := createMempoolReactor( - cfg, proxyApp, state, nodeMetrics.mempool, peerManager, router, logger, - ) + node.router, err = createRouter(logger, nodeMetrics.p2p, node.NodeInfo, nodeKey, peerManager, cfg, proxyApp) if err != nil { - return nil, err + return nil, combineCloseError( + fmt.Errorf("failed to create router: %w", err), + makeCloser(closers)) } - evReactorShim, evReactor, evPool, err := createEvidenceReactor( - cfg, dbProvider, stateDB, blockStore, peerManager, router, logger, - ) + evReactor, evPool, edbCloser, err := createEvidenceReactor(logger, cfg, dbProvider, + stateStore, blockStore, peerManager.Subscribe, node.router.OpenChannel, nodeMetrics.evidence, eventBus) + closers = append(closers, edbCloser) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } + node.services = append(node.services, evReactor) + node.rpcEnv.EvidencePool = evPool + node.evPool = evPool + + mpReactor, mp := createMempoolReactor(logger, cfg, proxyApp, stateStore, nodeMetrics.mempool, + peerManager.Subscribe, node.router.OpenChannel, peerManager.GetHeight) + node.rpcEnv.Mempool = mp + node.services = append(node.services, mpReactor) nextCoreChainLock, err := types.CoreChainLockFromProto(genDoc.InitialProposalCoreChainLock) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } // make block executor for consensus and blockchain reactors to execute blocks blockExec := sm.NewBlockExecutor( stateStore, logger.With("module", "state"), - proxyApp.Consensus(), - proxyApp.Query(), + proxyApp, mp, evPool, blockStore, - nextCoreChainLock, - sm.BlockExecutorWithMetrics(nodeMetrics.state), + eventBus, + nodeMetrics.state, ) + blockExec.SetNextCoreChainLock(nextCoreChainLock) - csReactorShim, csReactor, csState := createConsensusReactor( - cfg, state, blockExec, blockStore, mp, evPool, - privValidator, nodeMetrics.consensus, stateSync || blockSync, eventBus, - peerManager, router, proposedAppVersion, consensusLogger, - ) + // Determine whether we should attempt state sync. + stateSync := cfg.StateSync.Enable && !weAreOnlyValidator + if stateSync && state.LastBlockHeight > 0 { + logger.Info("Found local state with non-zero height, skipping state sync") + stateSync = false + } - // Create the blockchain reactor. Note, we do not start block sync if we're - // doing a state sync first. - bcReactorShim, bcReactor, err := createBlockchainReactor( - logger, cfg, state, blockExec, blockStore, proTxHash, csReactor, - peerManager, router, blockSync && !stateSync, nodeMetrics.consensus, + // Determine whether we should do block sync. This must happen after the handshake, since the + // app may modify the validator set, specifying ourself as the only validator. + blockSync := !onlyValidatorIsUs(state, proTxHash) // TODO compare with this: blockSync := cfg.BlockSync.Enable && !weAreOnlyValidator + waitSync := stateSync || blockSync + + csState, err := consensus.NewState(logger.With("module", "consensus"), + cfg.Consensus, + stateStore, + blockExec, + blockStore, + mp, + evPool, + eventBus, + consensus.StateMetrics(nodeMetrics.consensus), + consensus.SkipStateStoreBootstrap, ) + if err != nil { - return nil, fmt.Errorf("could not create blockchain reactor: %w", err) + return nil, combineCloseError(err, makeCloser(closers)) } + node.rpcEnv.ConsensusState = csState - // TODO: Remove this once the switch is removed. - var bcReactorForSwitch p2p.Reactor - if bcReactorShim != nil { - bcReactorForSwitch = bcReactorShim - } else { - bcReactorForSwitch = bcReactor.(p2p.Reactor) - } + csReactor := consensus.NewReactor( + logger, + csState, + node.router.OpenChannel, + peerManager.Subscribe, + eventBus, + waitSync, + nodeMetrics.consensus, + ) + node.services = append(node.services, csReactor) + node.rpcEnv.ConsensusReactor = csReactor + + // Create the blockchain reactor. Note, we do not start block sync if we're + // doing a state sync first. + bcReactor := blocksync.NewReactor( + logger.With("module", "blockchain"), + stateStore, + blockExec, + blockStore, + proTxHash, + csReactor, + node.router.OpenChannel, + peerManager.Subscribe, + blockSync && !stateSync, + nodeMetrics.consensus, + eventBus, + ) + node.services = append(node.services, bcReactor) + node.rpcEnv.BlockSyncReactor = bcReactor // Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first. // FIXME We need to update metrics here, since other reactors don't have access to them. @@ -397,719 +395,249 @@ func makeNode(cfg *config.Config, nodeMetrics.consensus.BlockSyncing.Set(1) } + if cfg.P2P.PexReactor { + node.services = append(node.services, pex.NewReactor(logger, peerManager, node.router.OpenChannel, peerManager.Subscribe)) + } + // Set up state sync reactor, and schedule a sync if requested. // FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy, // we should clean this whole thing up. See: // https://github.com/tendermint/tendermint/issues/4644 - var ( - stateSyncReactor *statesync.Reactor - stateSyncReactorShim *p2p.ReactorShim - - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - stateSyncReactorShim = p2p.NewReactorShim(logger.With("module", "statesync"), "StateSyncShim", statesync.ChannelShims) - - if cfg.P2P.UseLegacy { - channels = getChannelsFromShim(stateSyncReactorShim) - peerUpdates = stateSyncReactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, statesync.ChannelShims) - peerUpdates = peerManager.Subscribe() - } - - stateSyncReactor = statesync.NewReactor( + node.services = append(node.services, statesync.NewReactor( genDoc.ChainID, genDoc.InitialHeight, *cfg.StateSync, - stateSyncReactorShim.Logger, - proxyApp.Snapshot(), - proxyApp.Query(), - channels[statesync.SnapshotChannel], - channels[statesync.ChunkChannel], - channels[statesync.LightBlockChannel], - channels[statesync.ParamsChannel], - peerUpdates, + logger.With("module", "statesync"), + proxyApp, + node.router.OpenChannel, + peerManager.Subscribe, stateStore, blockStore, cfg.StateSync.TempDir, nodeMetrics.statesync, - dashCoreRPCClient, - csState, - ) - - // add the channel descriptors to both the transports - // FIXME: This should be removed when the legacy p2p stack is removed and - // transports can either be agnostic to channel descriptors or can be - // declared in the constructor. - transport.AddChannelDescriptors(mpReactorShim.GetChannels()) - transport.AddChannelDescriptors(bcReactorForSwitch.GetChannels()) - transport.AddChannelDescriptors(csReactorShim.GetChannels()) - transport.AddChannelDescriptors(evReactorShim.GetChannels()) - transport.AddChannelDescriptors(stateSyncReactorShim.GetChannels()) - - // Optionally, start the pex reactor - // - // TODO: - // - // We need to set Seeds and PersistentPeers on the switch, - // since it needs to be able to use these (and their DNS names) - // even if the PEX is off. We can include the DNS name in the NetAddress, - // but it would still be nice to have a clear list of the current "PersistentPeers" - // somewhere that we can return with net_info. - // - // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. - // Note we currently use the addrBook regardless at least for AddOurAddress - - var ( - pexReactor service.Service - sw *p2p.Switch - addrBook pex.AddrBook - ) - - pexCh := pex.ChannelDescriptor() - transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - - if cfg.P2P.UseLegacy { - // setup Transport and Switch - sw = createSwitch( - cfg, transport, nodeMetrics.p2p, mpReactorShim, bcReactorForSwitch, - stateSyncReactorShim, csReactorShim, evReactorShim, proxyApp, nodeInfo, nodeKey, p2pLogger, - ) - - err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peers from persistent-peers field: %w", err) - } + eventBus, + // the post-sync operation + func(ctx context.Context, state sm.State) error { + csReactor.SetStateSyncingMetrics(0) - err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) - } - - addrBook, err = createAddrBookAndSetOnSwitch(cfg, sw, p2pLogger, nodeKey) - if err != nil { - return nil, fmt.Errorf("could not create addrbook: %w", err) - } - - if cfg.P2P.PexReactor { - pexReactor = createPEXReactorAndAddToSwitch(addrBook, cfg, sw, logger.With("module", "pex")) - } - } else { - addrBook = nil - if cfg.P2P.PexReactor { - pexReactor, err = createPEXReactorV2(cfg, logger.With("module", "pex"), peerManager, router) - if err != nil { - return nil, err + // TODO: Some form of orchestrator is needed here between the state + // advancing reactors to be able to control which one of the three + // is running + // FIXME Very ugly to have these metrics bleed through here. + csReactor.SetBlockSyncingMetrics(1) + if err := bcReactor.SwitchToBlockSync(ctx, state); err != nil { + logger.Error("failed to switch to block sync", "err", err) + return err } - } - } - if cfg.RPC.PprofListenAddress != "" { - go func() { - logger.Info("Starting pprof server", "laddr", cfg.RPC.PprofListenAddress) - logger.Error("pprof server error", "err", http.ListenAndServe(cfg.RPC.PprofListenAddress, nil)) - }() - } - - // Start Dash connection executor - var validatorConnExecutor *dashquorum.ValidatorConnExecutor - if len(proTxHash) > 0 { - vcLogger := logger.With("node_proTxHash", proTxHash.ShortString(), "module", "ValidatorConnExecutor") - dcm := p2p.NewRouterDashDialer(peerManager, vcLogger) - validatorConnExecutor, err = dashquorum.NewValidatorConnExecutor( - proTxHash, - eventBus, - dcm, - dashquorum.WithLogger(vcLogger), - dashquorum.WithValidatorsSet(state.Validators), - ) - if err != nil { - return nil, err - } - } - - node := &nodeImpl{ - config: cfg, - genesisDoc: genDoc, - privValidator: privValidator, - - transport: transport, - sw: sw, - peerManager: peerManager, - router: router, - addrBook: addrBook, - nodeInfo: nodeInfo, - nodeKey: nodeKey, - - stateStore: stateStore, - blockStore: blockStore, - bcReactor: bcReactor, - mempoolReactor: mpReactor, - mempool: mp, - consensusReactor: csReactor, - stateSyncReactor: stateSyncReactor, - stateSync: stateSync, - pexReactor: pexReactor, - evidenceReactor: evReactor, - indexerService: indexerService, - eventBus: eventBus, - - validatorConnExecutor: validatorConnExecutor, - dashCoreRPCClient: dashCoreRPCClient, - - rpcEnv: &rpccore.Environment{ - ProxyAppQuery: proxyApp.Query(), - ProxyAppMempool: proxyApp.Mempool(), - - StateStore: stateStore, - BlockStore: blockStore, - EvidencePool: evPool, - ConsensusState: csState, - - ConsensusReactor: csReactor, - BlockSyncReactor: bcReactor.(consensus.BlockSyncReactor), - - P2PPeers: sw, - PeerManager: peerManager, - GenDoc: genDoc, - EventSinks: eventSinks, - EventBus: eventBus, - Mempool: mp, - Logger: logger.With("module", "rpc"), - Config: *cfg.RPC, + return nil }, - } + stateSync, + dashCoreRPCClient, + csState, + )) - // this is a terrible, because typed nil interfaces are not == - // nil, so this is just cleanup to avoid having a non-nil - // value in the RPC environment that has the semantic - // properties of nil. - if sw == nil { - node.rpcEnv.P2PPeers = nil - } else if peerManager == nil { - node.rpcEnv.PeerManager = nil + if cfg.Mode == config.ModeValidator { + if privValidator != nil { + csState.SetPrivValidator(ctx, privValidator) + } + node.rpcEnv.ProTxHash = proTxHash } - // end hack - - node.rpcEnv.P2PTransport = node node.BaseService = *service.NewBaseService(logger, "Node", node) return node, nil } -// DefaultDashCoreRPCClient returns RPC client for the Dash Core node -func DefaultDashCoreRPCClient(cfg *config.Config, logger log.Logger) (dashcore.Client, error) { - return dashcore.NewRPCClient( - cfg.PrivValidator.CoreRPCHost, - cfg.PrivValidator.CoreRPCUsername, - cfg.PrivValidator.CoreRPCPassword, - logger, - ) -} - -// makeSeedNode returns a new seed node, containing only p2p, pex reactor -func makeSeedNode(cfg *config.Config, - dbProvider config.DBProvider, - nodeKey types.NodeKey, - genesisDocProvider genesisDocProvider, - logger log.Logger, -) (service.Service, error) { - if !cfg.P2P.PexReactor { - return nil, errors.New("cannot run seed nodes with PEX disabled") +// OnStart starts the Node. It implements service.Service. +func (n *nodeImpl) OnStart(ctx context.Context) error { + if err := n.rpcEnv.ProxyApp.Start(ctx); err != nil { + return fmt.Errorf("error starting proxy app connections: %w", err) } - genDoc, err := genesisDocProvider() - if err != nil { - return nil, err + var proTxHash tmbytes.HexBytes + if n.privValidator != nil { + var err error + if proTxHash, err = n.privValidator.GetProTxHash(ctx); err != nil { + return err + } } - state, err := sm.MakeGenesisState(genDoc) - if err != nil { - return nil, err + // EventBus and IndexerService must be started before the handshake because + // we might need to index the txs of the replayed block as this might not have happened + // when the node stopped last time (i.e. the node stopped or crashed after it saved the block + // but before it indexed the txs) + if err := n.rpcEnv.EventBus.Start(ctx); err != nil { + return err } - nodeInfo, err := makeSeedNodeInfo(cfg, nodeKey, genDoc, state) - if err != nil { - return nil, err + if err := n.indexerService.Start(ctx); err != nil { + return err } - // Setup Transport and Switch. - p2pMetrics := p2p.PrometheusMetrics(cfg.Instrumentation.Namespace, "chain_id", genDoc.ChainID) - p2pLogger := logger.With("module", "p2p") - transport := createTransport(p2pLogger, cfg) - - peerManager, err := createPeerManager(cfg, dbProvider, p2pLogger, nodeKey.ID) + // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, + // and replays any blocks as necessary to sync tendermint with the app. + handshaker := consensus.NewHandshaker(n.logger.With("module", "handshaker"), + n.stateStore, n.initialState, n.blockStore, n.rpcEnv.EventBus, n.genesisDoc, + proTxHash, n.config.Consensus.AppHashSize, + ) + proposedVersion, err := handshaker.Handshake(ctx, n.rpcEnv.ProxyApp) if err != nil { - return nil, fmt.Errorf("failed to create peer manager: %w", err) + return err } + cs := n.rpcEnv.ConsensusReactor.GetConsensusState() + cs.SetProposedAppVersion(proposedVersion) - router, err := createRouter(p2pLogger, p2pMetrics, nodeInfo, nodeKey.PrivKey, - peerManager, transport, getRouterConfig(cfg, nil)) + // Reload the state. It will have the Version.Consensus.App set by the + // Handshake, and may have other modifications as well (ie. depending on + // what happened during block replay). + state, err := n.stateStore.Load() if err != nil { - return nil, fmt.Errorf("failed to create router: %w", err) + return fmt.Errorf("cannot load state: %w", err) } - var ( - pexReactor service.Service - sw *p2p.Switch - addrBook pex.AddrBook - ) - - // add the pex reactor - // FIXME: we add channel descriptors to both the router and the transport but only the router - // should be aware of channel info. We should remove this from transport once the legacy - // p2p stack is removed. - pexCh := pex.ChannelDescriptor() - transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - - if cfg.P2P.UseLegacy { - sw = createSwitch( - cfg, transport, p2pMetrics, nil, nil, - nil, nil, nil, nil, nodeInfo, nodeKey, p2pLogger, - ) - - err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) - } - - err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) - } - - addrBook, err = createAddrBookAndSetOnSwitch(cfg, sw, p2pLogger, nodeKey) - if err != nil { - return nil, fmt.Errorf("could not create addrbook: %w", err) - } + logNodeStartupInfo(state, proTxHash, n.logger, n.config.Mode) - if cfg.P2P.PexReactor { - pexReactor = createPEXReactorAndAddToSwitch(addrBook, cfg, sw, logger) - } - } else { - if cfg.P2P.PexReactor { - pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router) - if err != nil { - return nil, err - } - } + // TODO: Fetch and provide real options and do proper p2p bootstrapping. + // TODO: Use a persistent peer database. + n.nodeInfo, err = makeNodeInfo(n.config, n.nodeKey, proTxHash, n.eventSinks, n.genesisDoc, state.Version.Consensus) + if err != nil { + return err } + // Start Internal Services - if cfg.RPC.PprofListenAddress != "" { + if n.config.RPC.PprofListenAddress != "" { + signal := make(chan struct{}) + srv := &http.Server{Addr: n.config.RPC.PprofListenAddress, Handler: nil} go func() { - logger.Info("Starting pprof server", "laddr", cfg.RPC.PprofListenAddress) - logger.Error("pprof server error", "err", http.ListenAndServe(cfg.RPC.PprofListenAddress, nil)) + select { + case <-ctx.Done(): + sctx, scancel := context.WithTimeout(context.Background(), time.Second) + defer scancel() + _ = srv.Shutdown(sctx) + case <-signal: + } }() - } - - node := &nodeImpl{ - config: cfg, - genesisDoc: genDoc, - transport: transport, - sw: sw, - addrBook: addrBook, - nodeInfo: nodeInfo, - nodeKey: nodeKey, - peerManager: peerManager, - router: router, + go func() { + n.logger.Info("Starting pprof server", "laddr", n.config.RPC.PprofListenAddress) - pexReactor: pexReactor, + if err := srv.ListenAndServe(); err != nil { + n.logger.Error("pprof server error", "err", err) + close(signal) + } + }() } - node.BaseService = *service.NewBaseService(logger, "SeedNode", node) - - return node, nil -} -// OnStart starts the Node. It implements service.Service. -func (n *nodeImpl) OnStart() error { now := tmtime.Now() genTime := n.genesisDoc.GenesisTime if genTime.After(now) { - n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime) - time.Sleep(genTime.Sub(now)) - } + n.logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime) - // Start the RPC server before the P2P server - // so we can eg. receive txs for the first block - if n.config.RPC.ListenAddress != "" && n.config.Mode != config.ModeSeed { - listeners, err := n.startRPC() - if err != nil { - return err + timer := time.NewTimer(genTime.Sub(now)) + defer timer.Stop() + select { + case <-ctx.Done(): + return ctx.Err() + case <-timer.C: } - n.rpcListeners = listeners - } - - if n.config.Instrumentation.Prometheus && - n.config.Instrumentation.PrometheusListenAddr != "" { - n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr) } - // Start the transport. - addr, err := types.NewNetAddressString(n.nodeKey.ID.AddressString(n.config.P2P.ListenAddress)) + state, err = n.stateStore.Load() if err != nil { return err } - if err := n.transport.Listen(p2p.NewEndpoint(addr)); err != nil { + if err := n.evPool.Start(state); err != nil { return err } - n.isListening = true - n.Logger.Info("p2p service", "legacy_enabled", n.config.P2P.UseLegacy) + if n.config.Instrumentation.Prometheus && n.config.Instrumentation.PrometheusListenAddr != "" { + n.prometheusSrv = n.startPrometheusServer(ctx, n.config.Instrumentation.PrometheusListenAddr) + } - if n.config.P2P.UseLegacy { - // Add private IDs to addrbook to block those peers being added - n.addrBook.AddPrivateIDs(strings.SplitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " ")) - if err = n.sw.Start(); err != nil { - return err - } - } else if err = n.router.Start(); err != nil { + // Start the transport. + if err := n.router.Start(ctx); err != nil { return err } + n.rpcEnv.IsListening = true - if n.config.Mode != config.ModeSeed { - if n.config.BlockSync.Version == config.BlockSyncV0 { - if err := n.bcReactor.Start(); err != nil { - return err - } - } - - // Start the real consensus reactor separately since the switch uses the shim. - if err := n.consensusReactor.Start(); err != nil { - return err - } - - // Start the real state sync reactor separately since the switch uses the shim. - if err := n.stateSyncReactor.Start(); err != nil { - return err - } - - // Start the real mempool reactor separately since the switch uses the shim. - if err := n.mempoolReactor.Start(); err != nil { - return err - } - - // Start the real evidence reactor separately since the switch uses the shim. - if err := n.evidenceReactor.Start(); err != nil { - return err + for _, reactor := range n.services { + if err := reactor.Start(ctx); err != nil { + return fmt.Errorf("problem starting service '%T': %w ", reactor, err) } } - if n.config.P2P.UseLegacy { - // Always connect to persistent peers - err = n.sw.DialPeersAsync(strings.SplitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " ")) + n.rpcEnv.NodeInfo = n.nodeInfo + // Start the RPC server before the P2P server + // so we can eg. receive txs for the first block + if n.config.RPC.ListenAddress != "" { + var err error + n.rpcListeners, err = n.rpcEnv.StartService(ctx, n.config) if err != nil { - return fmt.Errorf("could not dial peers from persistent-peers field: %w", err) - } - } else if n.config.P2P.PexReactor { - if err := n.pexReactor.Start(); err != nil { return err } } - // Initialize ValidatorConnExecutor (only on Validators) - if n.validatorConnExecutor != nil { - if err := n.validatorConnExecutor.Start(); err != nil { - return fmt.Errorf("cannot start ValidatorConnExecutor: %w", err) - } - } - - // Run state sync - // TODO: We shouldn't run state sync if we already have state that has a - // LastBlockHeight that is not InitialHeight - if n.stateSync { - bcR, ok := n.bcReactor.(consensus.BlockSyncReactor) - if !ok { - return fmt.Errorf("this blockchain reactor does not support switching from state sync") - } - - // we need to get the genesis state to get parameters such as - state, err := sm.MakeGenesisState(n.genesisDoc) - if err != nil { - return fmt.Errorf("unable to derive state: %w", err) - } - - // TODO: we may want to move these events within the respective - // reactors. - // At the beginning of the statesync start, we use the initialHeight as the event height - // because of the statesync doesn't have the concreate state height before fetched the snapshot. - d := types.EventDataStateSyncStatus{Complete: false, Height: state.InitialHeight} - if err := n.eventBus.PublishEventStateSyncStatus(d); err != nil { - n.eventBus.Logger.Error("failed to emit the statesync start event", "err", err) - } - - // FIXME: We shouldn't allow state sync to silently error out without - // bubbling up the error and gracefully shutting down the rest of the node - go func() { - n.Logger.Info("starting state sync") - state, err := n.stateSyncReactor.Sync(context.TODO()) - if err != nil { - n.Logger.Error("state sync failed; shutting down this node", "err", err) - // stop the node - if err := n.Stop(); err != nil { - n.Logger.Error("failed to shut down node", "err", err) - } - return - } - - n.consensusReactor.SetStateSyncingMetrics(0) - - d := types.EventDataStateSyncStatus{Complete: true, Height: state.LastBlockHeight} - if err := n.eventBus.PublishEventStateSyncStatus(d); err != nil { - n.eventBus.Logger.Error("failed to emit the statesync start event", "err", err) - } - - // TODO: Some form of orchestrator is needed here between the state - // advancing reactors to be able to control which one of the three - // is running - if n.config.BlockSync.Enable { - // FIXME Very ugly to have these metrics bleed through here. - n.consensusReactor.SetBlockSyncingMetrics(1) - if err := bcR.SwitchToBlockSync(state); err != nil { - n.Logger.Error("failed to switch to block sync", "err", err) - return - } - - d := types.EventDataBlockSyncStatus{Complete: false, Height: state.LastBlockHeight} - if err := n.eventBus.PublishEventBlockSyncStatus(d); err != nil { - n.eventBus.Logger.Error("failed to emit the block sync starting event", "err", err) - } - - } else { - n.consensusReactor.SwitchToConsensus(state, true) - } - }() - } return nil } // OnStop stops the Node. It implements service.Service. func (n *nodeImpl) OnStop() { - - n.Logger.Info("Stopping Node") - - // first stop the non-reactor services - if err := n.eventBus.Stop(); err != nil { - n.Logger.Error("Error closing eventBus", "err", err) - } - if err := n.indexerService.Stop(); err != nil { - n.Logger.Error("Error closing indexerService", "err", err) - } - - if n.config.Mode != config.ModeSeed { - // now stop the reactors - if n.config.BlockSync.Version == config.BlockSyncV0 { - // Stop the real blockchain reactor separately since the switch uses the shim. - if err := n.bcReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the blockchain reactor", "err", err) - } - } - - // Stop the real consensus reactor separately since the switch uses the shim. - if err := n.consensusReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the consensus reactor", "err", err) - } - - // Stop the real state sync reactor separately since the switch uses the shim. - if err := n.stateSyncReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the state sync reactor", "err", err) - } - - // Stop the real mempool reactor separately since the switch uses the shim. - if err := n.mempoolReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the mempool reactor", "err", err) - } - - // Stop the real evidence reactor separately since the switch uses the shim. - if err := n.evidenceReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the evidence reactor", "err", err) + n.logger.Info("Stopping Node") + // stop the listeners / external services first + for _, l := range n.rpcListeners { + n.logger.Info("Closing rpc listener", "listener", l) + if err := l.Close(); err != nil { + n.logger.Error("error closing listener", "listener", l, "err", err) } } - if err := n.pexReactor.Stop(); err != nil { - n.Logger.Error("failed to stop the PEX v2 reactor", "err", err) - } - - if n.config.P2P.UseLegacy { - if err := n.sw.Stop(); err != nil { - n.Logger.Error("failed to stop switch", "err", err) - } - } else { - if err := n.router.Stop(); err != nil { - n.Logger.Error("failed to stop router", "err", err) + for _, es := range n.eventSinks { + if err := es.Stop(); err != nil { + n.logger.Error("failed to stop event sink", "err", err) } } - if err := n.transport.Close(); err != nil { - n.Logger.Error("Error closing transport", "err", err) + for _, reactor := range n.services { + reactor.Wait() } - n.isListening = false - - // finally stop the listeners / external services - for _, l := range n.rpcListeners { - n.Logger.Info("Closing rpc listener", "listener", l) - if err := l.Close(); err != nil { - n.Logger.Error("Error closing listener", "listener", l, "err", err) - } - } + n.router.Wait() + n.rpcEnv.IsListening = false - if pvsc, ok := n.privValidator.(service.Service); ok { - if err := pvsc.Stop(); err != nil { - n.Logger.Error("Error closing private validator", "err", err) + if n.privValidator != nil { + if pvsc, ok := n.privValidator.(service.Service); ok { + pvsc.Wait() } } if n.prometheusSrv != nil { if err := n.prometheusSrv.Shutdown(context.Background()); err != nil { // Error from closing listeners, or context timeout: - n.Logger.Error("Prometheus HTTP server Shutdown", "err", err) + n.logger.Error("Prometheus HTTP server Shutdown", "err", err) + } + + } + if err := n.shutdownOps(); err != nil { + if strings.TrimSpace(err.Error()) != "" { + n.logger.Error("problem shutting down additional services", "err", err) } } if n.blockStore != nil { if err := n.blockStore.Close(); err != nil { - n.Logger.Error("problem closing blockstore", "err", err) + n.logger.Error("problem closing blockstore", "err", err) } } if n.stateStore != nil { if err := n.stateStore.Close(); err != nil { - n.Logger.Error("problem closing statestore", "err", err) - } - } -} - -func (n *nodeImpl) startRPC() ([]net.Listener, error) { - if n.config.Mode == config.ModeValidator { - proTxHash, err := n.privValidator.GetProTxHash(context.TODO()) - if proTxHash == nil || err != nil { - return nil, fmt.Errorf("can't get proTxHash: %w", err) - } - n.rpcEnv.ProTxHash = proTxHash - } - if err := n.rpcEnv.InitGenesisChunks(); err != nil { - return nil, err - } - - listenAddrs := strings.SplitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ") - routes := n.rpcEnv.GetRoutes() - - if n.config.RPC.Unsafe { - n.rpcEnv.AddUnsafe(routes) - } - - cfg := rpcserver.DefaultConfig() - cfg.MaxBodyBytes = n.config.RPC.MaxBodyBytes - cfg.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes - cfg.MaxOpenConnections = n.config.RPC.MaxOpenConnections - // If necessary adjust global WriteTimeout to ensure it's greater than - // TimeoutBroadcastTxCommit. - // See https://github.com/tendermint/tendermint/issues/3435 - if cfg.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { - cfg.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second - } - - // we may expose the rpc over both a unix and tcp socket - listeners := make([]net.Listener, len(listenAddrs)) - for i, listenAddr := range listenAddrs { - mux := http.NewServeMux() - rpcLogger := n.Logger.With("module", "rpc-server") - wmLogger := rpcLogger.With("protocol", "websocket") - wm := rpcserver.NewWebsocketManager(routes, - rpcserver.OnDisconnect(func(remoteAddr string) { - err := n.eventBus.UnsubscribeAll(context.Background(), remoteAddr) - if err != nil && err != tmpubsub.ErrSubscriptionNotFound { - wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) - } - }), - rpcserver.ReadLimit(cfg.MaxBodyBytes), - rpcserver.WriteChanCapacity(n.config.RPC.WebSocketWriteBufferSize), - ) - wm.SetLogger(wmLogger) - mux.HandleFunc("/websocket", wm.WebsocketHandler) - rpcserver.RegisterRPCFuncs(mux, routes, rpcLogger) - listener, err := rpcserver.Listen( - listenAddr, - cfg.MaxOpenConnections, - ) - if err != nil { - return nil, err - } - - var rootHandler http.Handler = mux - if n.config.RPC.IsCorsEnabled() { - corsMiddleware := cors.New(cors.Options{ - AllowedOrigins: n.config.RPC.CORSAllowedOrigins, - AllowedMethods: n.config.RPC.CORSAllowedMethods, - AllowedHeaders: n.config.RPC.CORSAllowedHeaders, - }) - rootHandler = corsMiddleware.Handler(mux) - } - if n.config.RPC.IsTLSEnabled() { - go func() { - if err := rpcserver.ServeTLS( - listener, - rootHandler, - n.config.RPC.CertFile(), - n.config.RPC.KeyFile(), - rpcLogger, - cfg, - ); err != nil { - n.Logger.Error("Error serving server with TLS", "err", err) - } - }() - } else { - go func() { - if err := rpcserver.Serve( - listener, - rootHandler, - rpcLogger, - cfg, - ); err != nil { - n.Logger.Error("Error serving server", "err", err) - } - }() - } - - listeners[i] = listener - } - - // we expose a simplified api over grpc for convenience to app devs - grpcListenAddr := n.config.RPC.GRPCListenAddress - if grpcListenAddr != "" { - cfg := rpcserver.DefaultConfig() - cfg.MaxBodyBytes = n.config.RPC.MaxBodyBytes - cfg.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes - // NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections - cfg.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections - // If necessary adjust global WriteTimeout to ensure it's greater than - // TimeoutBroadcastTxCommit. - // See https://github.com/tendermint/tendermint/issues/3435 - if cfg.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { - cfg.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second - } - listener, err := rpcserver.Listen(grpcListenAddr, cfg.MaxOpenConnections) - if err != nil { - return nil, err + n.logger.Error("problem closing statestore", "err", err) } - go func() { - if err := grpccore.StartGRPCServer(n.rpcEnv, listener); err != nil { - n.Logger.Error("Error starting gRPC server", "err", err) - } - }() - listeners = append(listeners, listener) - } - - return listeners, nil - } // startPrometheusServer starts a Prometheus HTTP server, listening for metrics // collectors on addr. -func (n *nodeImpl) startPrometheusServer(addr string) *http.Server { +func (n *nodeImpl) startPrometheusServer(ctx context.Context, addr string) *http.Server { srv := &http.Server{ Addr: addr, Handler: promhttp.InstrumentMetricHandler( @@ -1119,34 +647,35 @@ func (n *nodeImpl) startPrometheusServer(addr string) *http.Server { ), ), } + + signal := make(chan struct{}) go func() { - if err := srv.ListenAndServe(); err != http.ErrServerClosed { - // Error starting or closing listener: - n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err) + select { + case <-ctx.Done(): + sctx, scancel := context.WithTimeout(context.Background(), time.Second) + defer scancel() + _ = srv.Shutdown(sctx) + case <-signal: } }() - return srv -} -// ConsensusReactor returns the Node's ConsensusReactor. -func (n *nodeImpl) ConsensusReactor() *consensus.Reactor { - return n.consensusReactor -} + go func() { + if err := srv.ListenAndServe(); err != nil { + n.logger.Error("Prometheus HTTP server ListenAndServe", "err", err) + close(signal) + } + }() -// Mempool returns the Node's mempool. -func (n *nodeImpl) Mempool() mempool.Mempool { - return n.mempool + return srv } -// EventBus returns the Node's EventBus. -func (n *nodeImpl) EventBus() *types.EventBus { - return n.eventBus +func (n *nodeImpl) NodeInfo() *types.NodeInfo { + return &n.nodeInfo } -// PrivValidator returns the Node's PrivValidator. -// XXX: for convenience only! -func (n *nodeImpl) PrivValidator() types.PrivValidator { - return n.privValidator +// EventBus returns the Node's EventBus. +func (n *nodeImpl) EventBus() *eventbus.EventBus { + return n.rpcEnv.EventBus } // GenesisDoc returns the Node's GenesisDoc. @@ -1161,21 +690,6 @@ func (n *nodeImpl) RPCEnvironment() *rpccore.Environment { //------------------------------------------------------------------------------ -func (n *nodeImpl) Listeners() []string { - return []string{ - fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress), - } -} - -func (n *nodeImpl) IsListening() bool { - return n.isListening -} - -// NodeInfo returns the Node's Info from the Switch. -func (n *nodeImpl) NodeInfo() types.NodeInfo { - return n.nodeInfo -} - // genesisDocProvider returns a GenesisDoc. // It allows the GenesisDoc to be pulled from sources other than the // filesystem, for instance from a distributed key-value store cluster. @@ -1191,12 +705,14 @@ func defaultGenesisDocProviderFunc(cfg *config.Config) genesisDocProvider { type nodeMetrics struct { consensus *consensus.Metrics + eventlog *eventlog.Metrics indexer *indexer.Metrics mempool *mempool.Metrics p2p *p2p.Metrics + proxy *proxy.Metrics state *sm.Metrics statesync *statesync.Metrics - proxy *proxy.Metrics + evidence *evidence.Metrics } // metricsProvider returns consensus, p2p, mempool, state, statesync Metrics. @@ -1209,12 +725,14 @@ func defaultMetricsProvider(cfg *config.InstrumentationConfig) metricsProvider { if cfg.Prometheus { return &nodeMetrics{ consensus: consensus.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + eventlog: eventlog.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), indexer: indexer.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), mempool: mempool.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), p2p: p2p.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + proxy: proxy.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), state: sm.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), statesync: statesync.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), - proxy: proxy.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + evidence: evidence.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), } } return &nodeMetrics{ @@ -1222,9 +740,10 @@ func defaultMetricsProvider(cfg *config.InstrumentationConfig) metricsProvider { indexer: indexer.NopMetrics(), mempool: mempool.NopMetrics(), p2p: p2p.NopMetrics(), + proxy: proxy.NopMetrics(), state: sm.NopMetrics(), statesync: statesync.NopMetrics(), - proxy: proxy.NopMetrics(), + evidence: evidence.NopMetrics(), } } } @@ -1234,10 +753,7 @@ func defaultMetricsProvider(cfg *config.InstrumentationConfig) metricsProvider { // loadStateFromDBOrGenesisDocProvider attempts to load the state from the // database, or creates one using the given genesisDocProvider. On success this also // returns the genesis doc loaded through the given provider. -func loadStateFromDBOrGenesisDocProvider( - stateStore sm.Store, - genDoc *types.GenesisDoc, -) (sm.State, error) { +func loadStateFromDBOrGenesisDocProvider(stateStore sm.Store, genDoc *types.GenesisDoc) (sm.State, error) { // 1. Attempt to load state form the database state, err := stateStore.Load() @@ -1251,102 +767,25 @@ func loadStateFromDBOrGenesisDocProvider( if err != nil { return sm.State{}, err } - } - - return state, nil -} - -func createAndStartPrivValidatorSocketClient( - listenAddr, - chainID string, - quorumHash crypto.QuorumHash, - logger log.Logger, -) (types.PrivValidator, error) { - - pve, err := privval.NewSignerListener(listenAddr, logger) - if err != nil { - return nil, fmt.Errorf("failed to start private validator: %w", err) - } - - pvsc, err := privval.NewSignerClient(pve, chainID) - if err != nil { - return nil, fmt.Errorf("failed to start private validator: %w", err) - } - - // try to get a pubkey from private validate first time - _, err = pvsc.GetPubKey(context.TODO(), quorumHash) - if err != nil { - return nil, fmt.Errorf("can't get pubkey: %w", err) - } - - const ( - retries = 50 // 50 * 100ms = 5s total - timeout = 100 * time.Millisecond - ) - pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout) - - return pvscWithRetries, nil -} - -func createAndStartPrivValidatorRPCClient( - defaultQuorumType btcjson.LLMQType, - dashCoreRPCClient dashcore.Client, - logger log.Logger, -) (types.PrivValidator, error) { - pvsc, err := privval.NewDashCoreSignerClient(dashCoreRPCClient, defaultQuorumType) - if err != nil { - return nil, fmt.Errorf("failed to start private validator: %w", err) - } - - // try to ping Core from private validator first time to make sure connection works - err = pvsc.Ping() - if err != nil { - return nil, fmt.Errorf( - "can't ping core server when starting private validator rpc client: %w", - err, - ) - } - return pvsc, nil -} - -func createAndStartPrivValidatorGRPCClient( - cfg *config.Config, - chainID string, - quorumHash crypto.QuorumHash, - logger log.Logger, -) (types.PrivValidator, error) { - pvsc, err := tmgrpc.DialRemoteSigner( - cfg.PrivValidator, - chainID, - logger, - cfg.Instrumentation.Prometheus, - ) - if err != nil { - return nil, fmt.Errorf("failed to start private validator: %w", err) - } - - // try to get a pubkey from private validate first time - _, err = pvsc.GetPubKey(context.TODO(), quorumHash) - if err != nil { - return nil, fmt.Errorf("can't get pubkey: %w", err) + // 3. save the gensis document to the state store so + // its fetchable by other callers. + if err := stateStore.Save(state); err != nil { + return sm.State{}, err + } } - return pvsc, nil + return state, nil } -func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOptions { +func getRouterConfig(conf *config.Config, appClient abciclient.Client) p2p.RouterOptions { opts := p2p.RouterOptions{ QueueType: conf.P2P.QueueType, } - if conf.P2P.MaxNumInboundPeers > 0 { - opts.MaxIncomingConnectionAttempts = conf.P2P.MaxIncomingConnectionAttempts - } - - if conf.FilterPeers && proxyApp != nil { + if conf.FilterPeers && appClient != nil { opts.FilterPeerByID = func(ctx context.Context, id types.NodeID) error { - res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ + res, err := appClient.Query(ctx, &abci.RequestQuery{ Path: fmt.Sprintf("/p2p/filter/id/%s", id), }) if err != nil { @@ -1360,7 +799,7 @@ func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOpt } opts.FilterPeerByIP = func(ctx context.Context, ip net.IP, port uint16) error { - res, err := proxyApp.Query().QuerySync(ctx, abci.RequestQuery{ + res, err := appClient.Query(ctx, &abci.RequestQuery{ Path: fmt.Sprintf("/p2p/filter/addr/%s", net.JoinHostPort(ip.String(), strconv.Itoa(int(port)))), }) if err != nil { @@ -1378,30 +817,12 @@ func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOpt return opts } -// FIXME: Temporary helper function, shims should be removed. -func makeChannelsFromShims( - router *p2p.Router, - chShims map[p2p.ChannelID]*p2p.ChannelDescriptorShim, -) map[p2p.ChannelID]*p2p.Channel { - - channels := map[p2p.ChannelID]*p2p.Channel{} - for chID, chShim := range chShims { - ch, err := router.OpenChannel(*chShim.Descriptor, chShim.MsgType, chShim.Descriptor.RecvBufferCapacity) - if err != nil { - panic(fmt.Sprintf("failed to open channel %v: %v", chID, err)) - } - - channels[chID] = ch - } - - return channels -} - -func getChannelsFromShim(reactorShim *p2p.ReactorShim) map[p2p.ChannelID]*p2p.Channel { - channels := map[p2p.ChannelID]*p2p.Channel{} - for chID := range reactorShim.Channels { - channels[chID] = reactorShim.GetChannel(chID) - } - - return channels +// DefaultDashCoreRPCClient returns RPC client for the Dash Core node +func DefaultDashCoreRPCClient(cfg *config.Config, logger log.Logger) (core.Client, error) { + return core.NewRPCClient( + cfg.PrivValidator.CoreRPCHost, + cfg.PrivValidator.CoreRPCUsername, + cfg.PrivValidator.CoreRPCPassword, + logger, + ) } diff --git a/node/node_test.go b/node/node_test.go index 8b01ce6748..23a8729e80 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -7,61 +7,74 @@ import ( "math" "net" "os" - "syscall" "testing" "time" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/dash/quorum" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" "github.com/tendermint/tendermint/internal/proxy" + "github.com/tendermint/tendermint/internal/pubsub" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/libs/service" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) func TestNodeStartStop(t *testing.T) { - cfg, err := config.ResetTestRoot("node_node_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_node_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) + ctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + + logger := log.NewTestingLogger(t) // create & start node - ns, err := newDefaultNode(cfg, log.TestingLogger()) + ns, err := newDefaultNode(ctx, cfg, logger) require.NoError(t, err) - require.NoError(t, ns.Start()) n, ok := ns.(*nodeImpl) require.True(t, ok) + t.Cleanup(func() { + bcancel() + n.Wait() + }) + t.Cleanup(leaktest.CheckTimeout(t, time.Second)) + require.NoError(t, n.Start(ctx)) // wait for the node to produce a block - blocksSub, err := n.EventBus(). - Subscribe(context.Background(), "node_test", types.EventQueryNewBlock) - require.NoError(t, err) - select { - case <-blocksSub.Out(): - case <-blocksSub.Canceled(): - t.Fatal("blocksSub was canceled") - case <-time.After(10 * time.Second): - t.Fatal("timed out waiting for the node to produce a block") - } + tctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + blocksSub, err := n.EventBus().SubscribeWithArgs(tctx, pubsub.SubscribeArgs{ + ClientID: "node_test", + Query: types.EventQueryNewBlock, + Limit: 1000, + }) + require.NoError(t, err) + _, err = blocksSub.Next(tctx) + require.NoError(t, err, "waiting for event") // check if we can read node ID of this node va, err := types.ParseValidatorAddress(cfg.P2P.ListenAddress) @@ -70,64 +83,75 @@ func TestNodeStartStop(t *testing.T) { assert.Equal(t, n.nodeInfo.ID(), nodeAddress.NodeID) assert.NoError(t, err) - // stop the node - go func() { - err = n.Stop() - require.NoError(t, err) - }() + cancel() // stop the subscription context + bcancel() // stop the base context + n.Wait() - select { - case <-n.Quit(): - case <-time.After(5 * time.Second): - pid := os.Getpid() - p, err := os.FindProcess(pid) - if err != nil { - panic(err) - } - err = p.Signal(syscall.SIGABRT) - fmt.Println(err) - t.Fatal("timed out waiting for shutdown") - } + require.False(t, n.IsRunning(), "node must shut down") } -func getTestNode(t *testing.T, conf *config.Config, logger log.Logger) *nodeImpl { +func getTestNode(ctx context.Context, t *testing.T, conf *config.Config, logger log.Logger) *nodeImpl { t.Helper() - ns, err := newDefaultNode(conf, logger) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + ns, err := newDefaultNode(ctx, conf, logger) require.NoError(t, err) n, ok := ns.(*nodeImpl) require.True(t, ok) + + t.Cleanup(func() { + cancel() + if n.IsRunning() { + ns.Wait() + } + }) + + t.Cleanup(leaktest.CheckTimeout(t, time.Second)) + return n } func TestNodeDelayedStart(t *testing.T) { - cfg, err := config.ResetTestRoot("node_delayed_start_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_delayed_start_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) now := tmtime.Now() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + // create & start node - n := getTestNode(t, cfg, log.TestingLogger()) + n := getTestNode(ctx, t, cfg, logger) n.GenesisDoc().GenesisTime = now.Add(2 * time.Second) - require.NoError(t, n.Start()) - defer n.Stop() //nolint:errcheck // ignore for tests + require.NoError(t, n.Start(ctx)) startTime := tmtime.Now() assert.Equal(t, true, startTime.After(n.GenesisDoc().GenesisTime)) } func TestNodeSetAppVersion(t *testing.T) { - cfg, err := config.ResetTestRoot("node_app_version_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_app_version_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + // create node - n := getTestNode(t, cfg, log.TestingLogger()) + n := getTestNode(ctx, t, cfg, logger) + + require.NoError(t, n.Start(ctx)) // default config uses the kvstore app - var appVersion uint64 = kvstore.ProtocolVersion + appVersion := kvstore.ProtocolVersion // check version is set in state state, err := n.stateStore.Load() @@ -141,16 +165,19 @@ func TestNodeSetAppVersion(t *testing.T) { func TestNodeSetPrivValTCP(t *testing.T) { addr := "tcp://" + testFreeAddr(t) - cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") + t.Cleanup(leaktest.Check(t)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + cfg, err := config.ResetTestRoot(t.TempDir(), "node_priv_val_tcp_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cfg.PrivValidator.ListenAddr = addr dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey()) - dialerEndpoint := privval.NewSignerDialerEndpoint( - log.TestingLogger(), - dialer, - ) + dialerEndpoint := privval.NewSignerDialerEndpoint(logger, dialer) privval.SignerDialerEndpointTimeoutReadWrite(100 * time.Millisecond)(dialerEndpoint) // We need to get the quorum hash used in config to set up the node @@ -166,44 +193,61 @@ func TestNodeSetPrivValTCP(t *testing.T) { ) go func() { - err := signerServer.Start() - if err != nil { - panic(err) - } + err := signerServer.Start(ctx) + require.NoError(t, err) }() - defer signerServer.Stop() //nolint:errcheck // ignore for tests + defer signerServer.Stop() - n := getTestNode(t, cfg, log.TestingLogger()) - assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) + genDoc, err := defaultGenesisDocProviderFunc(cfg)() + require.NoError(t, err) + + pval, err := createPrivval(ctx, logger, cfg, genDoc) + require.NoError(t, err) + + assert.IsType(t, &privval.RetrySignerClient{}, pval) } // address without a protocol must result in error func TestPrivValidatorListenAddrNoProtocol(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + addrNoPrefix := testFreeAddr(t) - cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_priv_val_tcp_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cfg.PrivValidator.ListenAddr = addrNoPrefix - _, err = newDefaultNode(cfg, log.TestingLogger()) + logger := log.NewNopLogger() + + n, err := newDefaultNode(ctx, cfg, logger) + assert.Error(t, err) + + if n != nil && n.IsRunning() { + cancel() + n.Wait() + } } func TestNodeSetPrivValIPC(t *testing.T) { tmpfile := "/tmp/kms." + tmrand.Str(6) + ".sock" defer os.Remove(tmpfile) // clean up - cfg, err := config.ResetTestRoot("node_priv_val_tcp_test") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot(t.TempDir(), "node_priv_val_tcp_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) cfg.PrivValidator.ListenAddr = "unix://" + tmpfile + logger := log.NewNopLogger() + dialer := privval.DialUnixFn(tmpfile) - dialerEndpoint := privval.NewSignerDialerEndpoint( - log.TestingLogger(), - dialer, - ) + dialerEndpoint := privval.NewSignerDialerEndpoint(logger, dialer) + privval.SignerDialerEndpointTimeoutReadWrite(100 * time.Millisecond)(dialerEndpoint) // We need to get the quorum hash used in config to set up the node @@ -219,12 +263,17 @@ func TestNodeSetPrivValIPC(t *testing.T) { ) go func() { - err := pvsc.Start() + err := pvsc.Start(ctx) require.NoError(t, err) }() - defer pvsc.Stop() //nolint:errcheck // ignore for tests - n := getTestNode(t, cfg, log.TestingLogger()) - assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) + defer pvsc.Stop() + genDoc, err := defaultGenesisDocProviderFunc(cfg)() + require.NoError(t, err) + + pval, err := createPrivval(ctx, logger, cfg, genDoc) + require.NoError(t, err) + + assert.IsType(t, &privval.RetrySignerClient{}, pval) } // testFreeAddr claims a free port so we don't block on listener being ready. @@ -239,19 +288,22 @@ func testFreeAddr(t *testing.T) string { // create a proposal block using real and full // mempool and evidence pool and validate it. func TestCreateProposalBlock(t *testing.T) { - cfg, err := config.ResetTestRoot("node_create_proposal") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot(t.TempDir(), "node_create_proposal") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) - cc := abciclient.NewLocalCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err = proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests - logger := log.TestingLogger() + logger := log.NewNopLogger() + + cc := abciclient.NewLocalClient(logger, kvstore.NewApplication()) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err = proxyApp.Start(ctx) + require.NoError(t, err) const height int64 = 1 - state, stateDB, privVals := state(1, height) + state, stateDB, privVals := state(t, 1, height) stateStore := sm.NewStore(stateDB) maxBytes := 16568 var partSize uint32 = 256 @@ -260,27 +312,22 @@ func TestCreateProposalBlock(t *testing.T) { state.ConsensusParams.Evidence.MaxBytes = maxEvidenceBytes proposerProTxHash, _ := state.Validators.GetByIndex(0) - // Make Mempool - mp := mempoolv0.NewCListMempool( + mp := mempool.NewTxMempool( + logger.With("module", "mempool"), cfg.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempoolv0.WithMetrics(mempool.NopMetrics()), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), + proxyApp, ) - mp.SetLogger(logger) // Make EvidencePool evidenceDB := dbm.NewMemDB() blockStore := store.NewBlockStore(dbm.NewMemDB()) - evidencePool, err := evidence.NewPool(logger, evidenceDB, stateStore, blockStore) - require.NoError(t, err) + evidencePool := evidence.NewPool(logger, evidenceDB, stateStore, blockStore, evidence.NopMetrics(), nil) // fill the evidence pool with more evidence // than can fit in a block - for currentBytes := 0; int64(currentBytes) <= maxEvidenceBytes; { - ev, err := types.NewMockDuplicateVoteEvidenceWithValidator( + var currentBytes int64 + for currentBytes <= maxEvidenceBytes { + ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, height, time.Now(), privVals[0], @@ -289,13 +336,13 @@ func TestCreateProposalBlock(t *testing.T) { state.Validators.QuorumHash, ) require.NoError(t, err) - currentBytes += len(ev.Bytes()) + currentBytes += int64(len(ev.Bytes())) evidencePool.ReportConflictingVotes(ev.VoteA, ev.VoteB) } evList, size := evidencePool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) require.Less(t, size, state.ConsensusParams.Evidence.MaxBytes+1) - evData := &types.EvidenceData{Evidence: evList} + evData := types.EvidenceList(evList) require.EqualValues(t, size, evData.ByteSize()) // fill the mempool with more txs @@ -303,35 +350,38 @@ func TestCreateProposalBlock(t *testing.T) { txLength := 100 for i := 0; i <= maxBytes/txLength; i++ { tx := tmrand.Bytes(txLength) - err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err := mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) } + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) blockExec := sm.NewBlockExecutor( stateStore, logger, - proxyApp.Consensus(), - proxyApp.Query(), + proxyApp, mp, evidencePool, blockStore, - nil, + eventBus, + sm.NopMetrics(), ) - commit := types.NewCommit(height-1, 0, types.BlockID{}, types.StateID{}, nil, nil, nil) - proposedAppVersion := uint64(1) - - block, _ := blockExec.CreateProposalBlock( + commit := types.NewCommit(height-1, 0, types.BlockID{}, types.StateID{}, nil, nil, nil) + block, err := blockExec.CreateProposalBlock( + ctx, height, - state, - commit, + state, commit, proposerProTxHash, proposedAppVersion, + nil, ) + require.NoError(t, err) // check that the part set does not exceed the maximum block size - partSet := block.MakePartSet(partSize) + partSet, err := block.MakePartSet(partSize) + require.NoError(t, err) assert.Less(t, partSet.ByteSize(), int64(maxBytes)) partSetFromHeader := types.NewPartSetFromHeader(partSet.Header()) @@ -342,26 +392,30 @@ func TestCreateProposalBlock(t *testing.T) { } assert.EqualValues(t, partSetFromHeader.ByteSize(), partSet.ByteSize()) - err = blockExec.ValidateBlock(state, block) + err = blockExec.ValidateBlock(ctx, state, block) assert.NoError(t, err) assert.EqualValues(t, block.Header.ProposedAppVersion, proposedAppVersion) } func TestMaxTxsProposalBlockSize(t *testing.T) { - cfg, err := config.ResetTestRoot("node_create_proposal") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot(t.TempDir(), "node_create_proposal") require.NoError(t, err) + defer os.RemoveAll(cfg.RootDir) - cc := abciclient.NewLocalCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err = proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests - logger := log.TestingLogger() + logger := log.NewNopLogger() + + cc := abciclient.NewLocalClient(logger, kvstore.NewApplication()) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err = proxyApp.Start(ctx) + require.NoError(t, err) const height int64 = 1 - state, stateDB, _ := state(1, height) + state, stateDB, _ := state(t, 1, height) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) const maxBytes int64 = 16384 @@ -370,64 +424,71 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { proposerProTxHash, _ := state.Validators.GetByIndex(0) // Make Mempool - mp := mempoolv0.NewCListMempool( + + mp := mempool.NewTxMempool( + logger.With("module", "mempool"), cfg.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempoolv0.WithMetrics(mempool.NopMetrics()), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), + proxyApp, ) - mp.SetLogger(logger) // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes)) tx := tmrand.Bytes(txLength - 4) // to account for the varint - err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + blockExec := sm.NewBlockExecutor( stateStore, logger, - proxyApp.Consensus(), - proxyApp.Query(), + proxyApp, mp, sm.EmptyEvidencePool{}, blockStore, - nil, + eventBus, + sm.NopMetrics(), ) commit := types.NewCommit(height-1, 0, types.BlockID{}, types.StateID{}, nil, nil, nil) - block, _ := blockExec.CreateProposalBlock( + block, err := blockExec.CreateProposalBlock( + ctx, height, - state, - commit, + state, commit, proposerProTxHash, 0, + nil, ) + require.NoError(t, err) pb, err := block.ToProto() require.NoError(t, err) assert.Less(t, int64(pb.Size()), maxBytes) // check that the part set does not exceed the maximum block size - partSet := block.MakePartSet(partSize) + partSet, err := block.MakePartSet(partSize) + require.NoError(t, err) assert.EqualValues(t, partSet.ByteSize(), int64(pb.Size())) } func TestMaxProposalBlockSize(t *testing.T) { - cfg, err := config.ResetTestRoot("node_create_proposal") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg, err := config.ResetTestRoot(t.TempDir(), "node_create_proposal") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) - cc := abciclient.NewLocalCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) - err = proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() //nolint:errcheck // ignore for tests - logger := log.TestingLogger() + logger := log.NewNopLogger() + + cc := abciclient.NewLocalClient(logger, kvstore.NewApplication()) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err = proxyApp.Start(ctx) + require.NoError(t, err) + + state, stateDB, _ := state(t, 100, int64(1)) - state, stateDB, _ := state(100, int64(1)) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) const maxBytes int64 = 1024 * 1024 * 2 @@ -436,26 +497,22 @@ func TestMaxProposalBlockSize(t *testing.T) { proposerProTxHash, _ := state.Validators.GetByIndex(0) // Make Mempool - mp := mempoolv0.NewCListMempool( + mp := mempool.NewTxMempool( + logger.With("module", "mempool"), cfg.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempoolv0.WithMetrics(mempool.NopMetrics()), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), + proxyApp, ) - mp.SetLogger(logger) - //// fill the mempool with one txs just below the maximum size + // fill the mempool with one txs just below the maximum size txLength := cfg.Mempool.MaxTxBytes - 6 tx := tmrand.Bytes(txLength) // to account for the varint - err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) // now produce more txs than what a normal block can hold with 10 smaller txs // At the end of the test, only the single big tx should be added for i := 0; i < 10; i++ { tx := tmrand.Bytes(10) - err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) } @@ -465,37 +522,47 @@ func TestMaxProposalBlockSize(t *testing.T) { Signature: crypto.CRandBytes(96), } + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + blockExec := sm.NewBlockExecutor( stateStore, logger, - proxyApp.Consensus(), - proxyApp.Query(), + proxyApp, mp, sm.EmptyEvidencePool{}, blockStore, - &coreChainLock, + eventBus, + sm.NopMetrics(), ) + blockExec.SetNextCoreChainLock(&coreChainLock) blockID := types.BlockID{ - Hash: tmhash.Sum([]byte("blockID_hash")), + Hash: crypto.Checksum([]byte("blockID_hash")), PartSetHeader: types.PartSetHeader{ Total: math.MaxInt32, - Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + Hash: crypto.Checksum([]byte("blockID_part_set_header_hash")), }, } + // save the updated validator set for use by the block executor. + state.LastBlockHeight = math.MaxInt64 - 3 + state.LastHeightValidatorsChanged = math.MaxInt64 - 1 + state.NextValidators = state.Validators.Copy() + require.NoError(t, stateStore.Save(state)) + stateID := types.StateID{ Height: math.MaxInt64 - 1, - LastAppHash: tmhash.Sum([]byte("app_hash")), + LastAppHash: crypto.Checksum([]byte("app_hash")), } timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) // change state in order to produce the largest accepted header state.LastBlockID = blockID - state.LastBlockHeight = math.MaxInt64 - 1 + state.LastBlockHeight = math.MaxInt64 - 2 state.LastBlockTime = timestamp - state.LastResultsHash = tmhash.Sum([]byte("last_results_hash")) - state.AppHash = tmhash.Sum([]byte("app_hash")) + state.LastResultsHash = crypto.Checksum([]byte("last_results_hash")) + state.AppHash = crypto.Checksum([]byte("app_hash")) state.Version.Consensus.Block = math.MaxInt64 state.Version.Consensus.App = math.MaxInt64 maxChainID := "" @@ -514,11 +581,17 @@ func TestMaxProposalBlockSize(t *testing.T) { ThresholdStateSignature: crypto.CRandBytes(bls12381.SignatureSize), } - block, partSet := blockExec.CreateProposalBlock( + block, err := blockExec.CreateProposalBlock( + ctx, math.MaxInt64, state, commit, - proposerProTxHash, 0, + proposerProTxHash, + 0, + nil, ) + require.NoError(t, err) + partSet, err := block.MakePartSet(types.BlockPartSizeBytes) + require.NoError(t, err) // this ensures that the header is at max size block.Header.Time = timestamp @@ -538,51 +611,83 @@ func TestMaxProposalBlockSize(t *testing.T) { } func TestNodeNewSeedNode(t *testing.T) { - cfg, err := config.ResetTestRoot("node_new_node_custom_reactors_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_new_node_custom_reactors_test") require.NoError(t, err) cfg.Mode = config.ModeSeed defer os.RemoveAll(cfg.RootDir) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile()) require.NoError(t, err) - ns, err := makeSeedNode(cfg, + logger := log.NewNopLogger() + + ns, err := makeSeedNode( + logger, + cfg, config.DefaultDBProvider, nodeKey, defaultGenesisDocProviderFunc(cfg), - log.TestingLogger(), ) + t.Cleanup(ns.Wait) + t.Cleanup(leaktest.CheckTimeout(t, time.Second)) + require.NoError(t, err) - n, ok := ns.(*nodeImpl) + n, ok := ns.(*seedNodeImpl) require.True(t, ok) - err = n.Start() + err = n.Start(ctx) require.NoError(t, err) - assert.True(t, n.pexReactor.IsRunning()) + + cancel() + n.Wait() + + assert.False(t, n.pexReactor.IsRunning()) } func TestNodeSetEventSink(t *testing.T) { - cfg, err := config.ResetTestRoot("node_app_version_test") + cfg, err := config.ResetTestRoot(t.TempDir(), "node_app_version_test") require.NoError(t, err) defer os.RemoveAll(cfg.RootDir) - logger := log.TestingLogger() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + setupTest := func(t *testing.T, conf *config.Config) []indexer.EventSink { - eventBus, err := createAndStartEventBus(logger) - require.NoError(t, err) + eventBus := eventbus.NewDefault(logger.With("module", "events")) + require.NoError(t, eventBus.Start(ctx)) + t.Cleanup(eventBus.Wait) genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) require.NoError(t, err) - indexService, eventSinks, err := createAndStartIndexerService(cfg, - config.DefaultDBProvider, eventBus, logger, genDoc.ChainID, - indexer.NopMetrics()) + eventSinks, err := sink.EventSinksFromConfig(cfg, config.DefaultDBProvider, genDoc.ChainID) require.NoError(t, err) - t.Cleanup(func() { require.NoError(t, indexService.Stop()) }) + return eventSinks } + cleanup := func(ns service.Service) func() { + return func() { + n, ok := ns.(*nodeImpl) + if !ok { + return + } + if n == nil { + return + } + if !n.IsRunning() { + return + } + cancel() + n.Wait() + } + } eventSinks := setupTest(t, cfg) assert.Equal(t, 1, len(eventSinks)) @@ -601,9 +706,10 @@ func TestNodeSetEventSink(t *testing.T) { assert.Equal(t, indexer.NULL, eventSinks[0].Type()) cfg.TxIndex.Indexer = []string{"kvv"} - ns, err := newDefaultNode(cfg, logger) + ns, err := newDefaultNode(ctx, cfg, logger) assert.Nil(t, ns) - assert.Equal(t, errors.New("unsupported event sink type"), err) + assert.Contains(t, err.Error(), "unsupported event sink type") + t.Cleanup(cleanup(ns)) cfg.TxIndex.Indexer = []string{} eventSinks = setupTest(t, cfg) @@ -612,31 +718,32 @@ func TestNodeSetEventSink(t *testing.T) { assert.Equal(t, indexer.NULL, eventSinks[0].Type()) cfg.TxIndex.Indexer = []string{"psql"} - ns, err = newDefaultNode(cfg, logger) + ns, err = newDefaultNode(ctx, cfg, logger) assert.Nil(t, ns) - assert.Equal(t, errors.New("the psql connection settings cannot be empty"), err) + assert.Contains(t, err.Error(), "the psql connection settings cannot be empty") + t.Cleanup(cleanup(ns)) // N.B. We can't create a PSQL event sink without starting a postgres // instance for it to talk to. The indexer service tests exercise that case. var e = errors.New("found duplicated sinks, please check the tx-index section in the config.toml") cfg.TxIndex.Indexer = []string{"null", "kv", "Kv"} - _, err = newDefaultNode(cfg, logger) + ns, err = newDefaultNode(ctx, cfg, logger) require.Error(t, err) - assert.Equal(t, e, err) + assert.Contains(t, err.Error(), e.Error()) + t.Cleanup(cleanup(ns)) cfg.TxIndex.Indexer = []string{"Null", "kV", "kv", "nUlL"} - _, err = newDefaultNode(cfg, logger) + ns, err = newDefaultNode(ctx, cfg, logger) require.Error(t, err) - assert.Equal(t, e, err) + assert.Contains(t, err.Error(), e.Error()) + t.Cleanup(cleanup(ns)) } -func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { +func state(t *testing.T, nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { + t.Helper() vals, privVals := types.RandValidatorSet(nVals) genVals := types.MakeGenesisValsFromValidatorSet(vals) - for i := 0; i < nVals; i++ { - genVals[i].Name = fmt.Sprintf("test%d", i) - } s, _ := sm.MakeGenesisState(&types.GenesisDoc{ ChainID: "test-chain", Validators: genVals, @@ -647,38 +754,39 @@ func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { // save validators to db for 2 heights stateDB := dbm.NewMemDB() + t.Cleanup(func() { require.NoError(t, stateDB.Close()) }) + stateStore := sm.NewStore(stateDB) - if err := stateStore.Save(s); err != nil { - panic(err) - } + require.NoError(t, stateStore.Save(s)) for i := 1; i < int(height); i++ { s.LastBlockHeight++ s.LastValidators = s.Validators.Copy() - if err := stateStore.Save(s); err != nil { - panic(err) - } + require.NoError(t, stateStore.Save(s)) } return s, stateDB, privVals } func TestLoadStateFromGenesis(t *testing.T) { - _ = loadStatefromGenesis(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _ = loadStatefromGenesis(ctx, t) } -func loadStatefromGenesis(t *testing.T) sm.State { +func loadStatefromGenesis(ctx context.Context, t *testing.T) sm.State { t.Helper() stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - cfg, err := config.ResetTestRoot("load_state_from_genesis") + cfg, err := config.ResetTestRoot(t.TempDir(), "load_state_from_genesis") require.NoError(t, err) loadedState, err := stateStore.Load() require.NoError(t, err) require.True(t, loadedState.IsEmpty()) - genDoc, _ := factory.RandGenesisDoc(cfg, 10, 0) + genDoc, _ := factory.RandGenesisDoc(cfg, 10, 0, factory.ConsensusParams()) state, err := loadStateFromDBOrGenesisDocProvider( stateStore, diff --git a/node/public.go b/node/public.go index af8d475751..f4c52157d4 100644 --- a/node/public.go +++ b/node/public.go @@ -2,14 +2,13 @@ package node import ( + "context" "fmt" abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/config" - dashcore "github.com/tendermint/tendermint/dashcore/rpc" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" ) @@ -17,8 +16,12 @@ import ( // process that host their own process-local tendermint node. This is // equivalent to running tendermint in it's own process communicating // to an external ABCI application. -func NewDefault(conf *config.Config, logger log.Logger) (service.Service, error) { - return newDefaultNode(conf, logger) +func NewDefault( + ctx context.Context, + conf *config.Config, + logger log.Logger, +) (service.Service, error) { + return newDefaultNode(ctx, conf, logger) } // New constructs a tendermint node. The ClientCreator makes it @@ -27,11 +30,12 @@ func NewDefault(conf *config.Config, logger log.Logger) (service.Service, error) // Genesis document: if the value is nil, the genesis document is read // from the file specified in the config, and otherwise the node uses // value of the final argument. -func New(conf *config.Config, +func New( + ctx context.Context, + conf *config.Config, logger log.Logger, - cf abciclient.Creator, + cf abciclient.Client, gen *types.GenesisDoc, - dashCoreRPCClient dashcore.Client, ) (service.Service, error) { nodeKey, err := types.LoadOrGenNodeKey(conf.NodeKeyFile()) if err != nil { @@ -48,21 +52,16 @@ func New(conf *config.Config, switch conf.Mode { case config.ModeFull, config.ModeValidator: - pval, err := privval.LoadOrGenFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile()) - if err != nil { - return nil, err - } - - return makeNode(conf, - pval, + return makeNode( + ctx, + conf, nodeKey, cf, genProvider, config.DefaultDBProvider, - dashCoreRPCClient, logger) case config.ModeSeed: - return makeSeedNode(conf, config.DefaultDBProvider, nodeKey, genProvider, logger) + return makeSeedNode(logger, conf, config.DefaultDBProvider, nodeKey, genProvider) default: return nil, fmt.Errorf("%q is not a valid mode", conf.Mode) } diff --git a/node/seed.go b/node/seed.go new file mode 100644 index 0000000000..a0b71e411f --- /dev/null +++ b/node/seed.go @@ -0,0 +1,162 @@ +package node + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + "time" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/p2p/pex" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/service" + tmtime "github.com/tendermint/tendermint/libs/time" + "github.com/tendermint/tendermint/types" +) + +type seedNodeImpl struct { + service.BaseService + logger log.Logger + + // config + config *config.Config + genesisDoc *types.GenesisDoc // initial validator set + + // network + peerManager *p2p.PeerManager + router *p2p.Router + nodeKey types.NodeKey // our node privkey + isListening bool + + // services + pexReactor service.Service // for exchanging peer addresses + shutdownOps closer +} + +// makeSeedNode returns a new seed node, containing only p2p, pex reactor +func makeSeedNode( + logger log.Logger, + cfg *config.Config, + dbProvider config.DBProvider, + nodeKey types.NodeKey, + genesisDocProvider genesisDocProvider, +) (service.Service, error) { + if !cfg.P2P.PexReactor { + return nil, errors.New("cannot run seed nodes with PEX disabled") + } + + genDoc, err := genesisDocProvider() + if err != nil { + return nil, err + } + + state, err := sm.MakeGenesisState(genDoc) + if err != nil { + return nil, err + } + + nodeInfo, err := makeSeedNodeInfo(cfg, nodeKey, genDoc, state) + if err != nil { + return nil, err + } + + // Setup Transport and Switch. + p2pMetrics := p2p.PrometheusMetrics(cfg.Instrumentation.Namespace, "chain_id", genDoc.ChainID) + + peerManager, closer, err := createPeerManager(cfg, dbProvider, nodeKey.ID) + if err != nil { + return nil, combineCloseError( + fmt.Errorf("failed to create peer manager: %w", err), + closer) + } + + router, err := createRouter(logger, p2pMetrics, func() *types.NodeInfo { return &nodeInfo }, nodeKey, peerManager, cfg, nil) + if err != nil { + return nil, combineCloseError( + fmt.Errorf("failed to create router: %w", err), + closer) + } + + node := &seedNodeImpl{ + config: cfg, + logger: logger, + genesisDoc: genDoc, + + nodeKey: nodeKey, + peerManager: peerManager, + router: router, + + shutdownOps: closer, + + pexReactor: pex.NewReactor(logger, peerManager, router.OpenChannel, peerManager.Subscribe), + } + node.BaseService = *service.NewBaseService(logger, "SeedNode", node) + + return node, nil +} + +// OnStart starts the Seed Node. It implements service.Service. +func (n *seedNodeImpl) OnStart(ctx context.Context) error { + if n.config.RPC.PprofListenAddress != "" { + rpcCtx, rpcCancel := context.WithCancel(ctx) + srv := &http.Server{Addr: n.config.RPC.PprofListenAddress, Handler: nil} + go func() { + select { + case <-ctx.Done(): + sctx, scancel := context.WithTimeout(context.Background(), time.Second) + defer scancel() + _ = srv.Shutdown(sctx) + case <-rpcCtx.Done(): + } + }() + + go func() { + n.logger.Info("Starting pprof server", "laddr", n.config.RPC.PprofListenAddress) + + if err := srv.ListenAndServe(); err != nil { + n.logger.Error("pprof server error", "err", err) + rpcCancel() + } + }() + } + + now := tmtime.Now() + genTime := n.genesisDoc.GenesisTime + if genTime.After(now) { + n.logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime) + time.Sleep(genTime.Sub(now)) + } + + // Start the transport. + if err := n.router.Start(ctx); err != nil { + return err + } + n.isListening = true + + if n.config.P2P.PexReactor { + if err := n.pexReactor.Start(ctx); err != nil { + return err + } + } + + return nil +} + +// OnStop stops the Seed Node. It implements service.Service. +func (n *seedNodeImpl) OnStop() { + n.logger.Info("Stopping Node") + + n.pexReactor.Wait() + n.router.Wait() + n.isListening = false + + if err := n.shutdownOps(); err != nil { + if strings.TrimSpace(err.Error()) != "" { + n.logger.Error("problem shutting down additional services", "err", err) + } + } +} diff --git a/node/setup.go b/node/setup.go index 9b63968da6..b14fc2ea96 100644 --- a/node/setup.go +++ b/node/setup.go @@ -5,126 +5,98 @@ import ( "context" "errors" "fmt" - "math" - "net" + "strings" "time" + "github.com/dashevo/dashd-go/btcjson" dbm "github.com/tendermint/tm-db" abciclient "github.com/tendermint/tendermint/abci/client" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" - bcv0 "github.com/tendermint/tendermint/internal/blocksync/v0" - bcv2 "github.com/tendermint/tendermint/internal/blocksync/v2" + dashcore "github.com/tendermint/tendermint/dash/core" + "github.com/tendermint/tendermint/internal/blocksync" "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/eventbus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" - mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v1" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/internal/p2p/pex" - "github.com/tendermint/tendermint/internal/proxy" sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/state/indexer" - "github.com/tendermint/tendermint/internal/state/indexer/sink" "github.com/tendermint/tendermint/internal/statesync" "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" + tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" tmstrings "github.com/tendermint/tendermint/libs/strings" - protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p" + "github.com/tendermint/tendermint/privval" + tmgrpc "github.com/tendermint/tendermint/privval/grpc" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port ) -func initDBs(cfg *config.Config, dbProvider config.DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { //nolint:lll - var blockStoreDB dbm.DB - blockStoreDB, err = dbProvider(&config.DBContext{ID: "blockstore", Config: cfg}) - if err != nil { - return nil, nil, fmt.Errorf("unable to initialize blockstore: %w", err) - } - blockStore = store.NewBlockStore(blockStoreDB) +type closer func() error - stateDB, err = dbProvider(&config.DBContext{ID: "state", Config: cfg}) - if err != nil { - return nil, nil, fmt.Errorf("unable to initialize statestore: %w", err) +func makeCloser(cs []closer) closer { + return func() error { + errs := make([]string, 0, len(cs)) + for _, cl := range cs { + if err := cl(); err != nil { + errs = append(errs, err.Error()) + } + } + if len(errs) >= 0 { + return errors.New(strings.Join(errs, "; ")) + } + return nil } +} - return blockStore, stateDB, nil +func convertCancelCloser(cancel context.CancelFunc) closer { + return func() error { cancel(); return nil } } -// nolint:lll -func createAndStartProxyAppConns(clientCreator abciclient.Creator, logger log.Logger, metrics *proxy.Metrics) (proxy.AppConns, error) { - proxyApp := proxy.NewAppConns(clientCreator, metrics) - proxyApp.SetLogger(logger.With("module", "proxy")) - if err := proxyApp.Start(); err != nil { - return nil, fmt.Errorf("error starting proxy app connections: %v", err) +func combineCloseError(err error, cl closer) error { + if err == nil { + return cl() } - return proxyApp, nil -} -func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { - eventBus := types.NewEventBus() - eventBus.SetLogger(logger.With("module", "events")) - if err := eventBus.Start(); err != nil { - return nil, err + clerr := cl() + if clerr == nil { + return err } - return eventBus, nil + + return fmt.Errorf("error=%q closerError=%q", err.Error(), clerr.Error()) } -func createAndStartIndexerService( +func initDBs( cfg *config.Config, dbProvider config.DBProvider, - eventBus *types.EventBus, - logger log.Logger, - chainID string, - metrics *indexer.Metrics, -) (*indexer.Service, []indexer.EventSink, error) { - eventSinks, err := sink.EventSinksFromConfig(cfg, dbProvider, chainID) +) (*store.BlockStore, dbm.DB, closer, error) { + + blockStoreDB, err := dbProvider(&config.DBContext{ID: "blockstore", Config: cfg}) if err != nil { - return nil, nil, err + return nil, nil, func() error { return nil }, fmt.Errorf("unable to initialize blockstore: %w", err) } + closers := []closer{} + blockStore := store.NewBlockStore(blockStoreDB) + closers = append(closers, blockStoreDB.Close) - indexerService := indexer.NewService(indexer.ServiceArgs{ - Sinks: eventSinks, - EventBus: eventBus, - Logger: logger.With("module", "txindex"), - Metrics: metrics, - }) - - if err := indexerService.Start(); err != nil { - return nil, nil, err + stateDB, err := dbProvider(&config.DBContext{ID: "state", Config: cfg}) + if err != nil { + return nil, nil, makeCloser(closers), fmt.Errorf("unable to initialize statestore: %w", err) } - return indexerService, eventSinks, nil -} + closers = append(closers, stateDB.Close) -func doHandshake( - stateStore sm.Store, - state sm.State, - blockStore sm.BlockStore, - genDoc *types.GenesisDoc, - nodeProTxHash crypto.ProTxHash, - appHashSize int, - eventBus types.BlockEventPublisher, - proxyApp proxy.AppConns, - consensusLogger log.Logger) (uint64, error) { - - handshaker := consensus.NewHandshaker(stateStore, state, blockStore, genDoc, nodeProTxHash, - appHashSize) - handshaker.SetLogger(consensusLogger) - handshaker.SetEventBus(eventBus) - appVersion, err := handshaker.Handshake(proxyApp) - if err != nil { - return appVersion, fmt.Errorf("error during handshake: %v", err) - } - return appVersion, nil + return blockStore, stateDB, makeCloser(closers), nil } -func logNodeStartupInfo(state sm.State, proTxHash crypto.ProTxHash, logger, consensusLogger log.Logger, mode string) { +func logNodeStartupInfo(state sm.State, proTxHash crypto.ProTxHash, logger log.Logger, mode string) { // Log the version info. logger.Info("Version info", "tmVersion", version.TMCoreSemVer, @@ -140,15 +112,20 @@ func logNodeStartupInfo(state sm.State, proTxHash crypto.ProTxHash, logger, cons "state", state.Version.Consensus.Block, ) } - switch { - case mode == config.ModeFull: - consensusLogger.Info("This node is a fullnode") - case mode == config.ModeValidator: + + switch mode { + case config.ModeFull: + logger.Info("This node is a fullnode") + case config.ModeValidator: // Log whether this node is a validator or an observer - if proTxHash != nil && state.Validators.HasProTxHash(proTxHash) { - consensusLogger.Info("This node is a validator", "proTxHash", proTxHash) + if state.Validators.HasProTxHash(proTxHash) { + logger.Info("This node is a validator", + "proTxHash", proTxHash.ShortString(), + ) } else { - consensusLogger.Info("This node is not a validator", "proTxHash", proTxHash) + logger.Info("This node is a validator (NOT in the active validator set)", + "proTxHash", proTxHash.ShortString(), + ) } } } @@ -165,275 +142,80 @@ func onlyValidatorIsUs(state sm.State, proTxHash types.ProTxHash) bool { } func createMempoolReactor( + logger log.Logger, cfg *config.Config, - proxyApp proxy.AppConns, - state sm.State, + appClient abciclient.Client, + store sm.Store, memplMetrics *mempool.Metrics, - peerManager *p2p.PeerManager, - router *p2p.Router, - logger log.Logger, -) (*p2p.ReactorShim, service.Service, mempool.Mempool, error) { + peerEvents p2p.PeerEventSubscriber, + chCreator p2p.ChannelCreator, + peerHeight func(types.NodeID) int64, +) (service.Service, mempool.Mempool) { + logger = logger.With("module", "mempool") - logger = logger.With("module", "mempool", "version", cfg.Mempool.Version) - channelShims := mempoolv0.GetChannelShims(cfg.Mempool) - reactorShim := p2p.NewReactorShim(logger, "MempoolShim", channelShims) - - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates + mp := mempool.NewTxMempool( + logger, + cfg.Mempool, + appClient, + mempool.WithMetrics(memplMetrics), + mempool.WithPreCheck(sm.TxPreCheckFromStore(store)), + mempool.WithPostCheck(sm.TxPostCheckFromStore(store)), ) - if cfg.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, channelShims) - peerUpdates = peerManager.Subscribe() - } - - switch cfg.Mempool.Version { - case config.MempoolV0: - mp := mempoolv0.NewCListMempool( - cfg.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempoolv0.WithMetrics(memplMetrics), - mempoolv0.WithPreCheck(sm.TxPreCheck(state)), - mempoolv0.WithPostCheck(sm.TxPostCheck(state)), - ) - - mp.SetLogger(logger) - - reactor := mempoolv0.NewReactor( - logger, - cfg.Mempool, - peerManager, - mp, - channels[mempool.MempoolChannel], - peerUpdates, - ) - - if cfg.Consensus.WaitForTxs() { - mp.EnableTxsAvailable() - } - - return reactorShim, reactor, mp, nil - - case config.MempoolV1: - mp := mempoolv1.NewTxMempool( - logger, - cfg.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempoolv1.WithMetrics(memplMetrics), - mempoolv1.WithPreCheck(sm.TxPreCheck(state)), - mempoolv1.WithPostCheck(sm.TxPostCheck(state)), - ) - - reactor := mempoolv1.NewReactor( - logger, - cfg.Mempool, - peerManager, - mp, - channels[mempool.MempoolChannel], - peerUpdates, - ) - - if cfg.Consensus.WaitForTxs() { - mp.EnableTxsAvailable() - } - - return reactorShim, reactor, mp, nil + reactor := mempool.NewReactor( + logger, + cfg.Mempool, + mp, + chCreator, + peerEvents, + peerHeight, + ) - default: - return nil, nil, nil, fmt.Errorf("unknown mempool version: %s", cfg.Mempool.Version) + if cfg.Consensus.WaitForTxs() { + mp.EnableTxsAvailable() } + + return reactor, mp } func createEvidenceReactor( + logger log.Logger, cfg *config.Config, dbProvider config.DBProvider, - stateDB dbm.DB, + store sm.Store, blockStore *store.BlockStore, - peerManager *p2p.PeerManager, - router *p2p.Router, - logger log.Logger, -) (*p2p.ReactorShim, *evidence.Reactor, *evidence.Pool, error) { + peerEvents p2p.PeerEventSubscriber, + chCreator p2p.ChannelCreator, + metrics *evidence.Metrics, + eventBus *eventbus.EventBus, +) (*evidence.Reactor, *evidence.Pool, closer, error) { evidenceDB, err := dbProvider(&config.DBContext{ID: "evidence", Config: cfg}) if err != nil { - return nil, nil, nil, fmt.Errorf("unable to initialize evidence db: %w", err) + return nil, nil, func() error { return nil }, fmt.Errorf("unable to initialize evidence db: %w", err) } logger = logger.With("module", "evidence") - reactorShim := p2p.NewReactorShim(logger, "EvidenceShim", evidence.ChannelShims) - - evidencePool, err := evidence.NewPool(logger, evidenceDB, sm.NewStore(stateDB), blockStore) - if err != nil { - return nil, nil, nil, fmt.Errorf("creating evidence pool: %w", err) - } - - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - if cfg.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, evidence.ChannelShims) - peerUpdates = peerManager.Subscribe() - } - - evidenceReactor := evidence.NewReactor( - logger, - channels[evidence.EvidenceChannel], - peerUpdates, - evidencePool, - ) - - return reactorShim, evidenceReactor, evidencePool, nil -} -func createBlockchainReactor( - logger log.Logger, - cfg *config.Config, - state sm.State, - blockExec *sm.BlockExecutor, - blockStore *store.BlockStore, - nodeProTxHash crypto.ProTxHash, - csReactor *consensus.Reactor, - peerManager *p2p.PeerManager, - router *p2p.Router, - blockSync bool, - metrics *consensus.Metrics, -) (*p2p.ReactorShim, service.Service, error) { - - logger = logger.With("module", "blockchain") - - switch cfg.BlockSync.Version { - case config.BlockSyncV0: - reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims) - - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - if cfg.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, bcv0.ChannelShims) - peerUpdates = peerManager.Subscribe() - } - - reactor, err := bcv0.NewReactor( - logger, state.Copy(), blockExec, blockStore, nodeProTxHash, csReactor, - channels[bcv0.BlockSyncChannel], peerUpdates, blockSync, - metrics, - ) - if err != nil { - return nil, nil, err - } - - return reactorShim, reactor, nil - - case config.BlockSyncV2: - return nil, nil, errors.New("block sync version v2 is no longer supported. Please use v0") - - default: - return nil, nil, fmt.Errorf("unknown block sync version %s", cfg.BlockSync.Version) - } -} - -func createConsensusReactor( - cfg *config.Config, - state sm.State, - blockExec *sm.BlockExecutor, - blockStore sm.BlockStore, - mp mempool.Mempool, - evidencePool *evidence.Pool, - privValidator types.PrivValidator, - csMetrics *consensus.Metrics, - waitSync bool, - eventBus *types.EventBus, - peerManager *p2p.PeerManager, - router *p2p.Router, - proposedAppVersion uint64, - logger log.Logger, -) (*p2p.ReactorShim, *consensus.Reactor, *consensus.State) { - - consensusState := consensus.NewStateWithLogger( - cfg.Consensus, - state.Copy(), - blockExec, - blockStore, - mp, - evidencePool, - logger, - proposedAppVersion, - consensus.StateMetrics(csMetrics), - ) - if privValidator != nil && cfg.Mode == config.ModeValidator { - consensusState.SetPrivValidator(privValidator) - } - - reactorShim := p2p.NewReactorShim(logger, "ConsensusShim", consensus.ChannelShims) + evidencePool := evidence.NewPool(logger, evidenceDB, store, blockStore, metrics, eventBus) + evidenceReactor := evidence.NewReactor(logger, chCreator, peerEvents, evidencePool) - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - if cfg.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, consensus.ChannelShims) - peerUpdates = peerManager.Subscribe() - } - - reactor := consensus.NewReactor( - logger, - consensusState, - channels[consensus.StateChannel], - channels[consensus.DataChannel], - channels[consensus.VoteChannel], - channels[consensus.VoteSetBitsChannel], - peerUpdates, - waitSync, - consensus.ReactorMetrics(csMetrics), - ) - - // Services which will be publishing and/or subscribing for messages (events) - // consensusReactor will set it on consensusState and blockExecutor. - reactor.SetEventBus(eventBus) - - return reactorShim, reactor, consensusState -} - -func createTransport(logger log.Logger, cfg *config.Config) *p2p.MConnTransport { - return p2p.NewMConnTransport( - logger, p2p.MConnConfig(cfg.P2P), []*p2p.ChannelDescriptor{}, - p2p.MConnTransportOptions{ - MaxAcceptedConnections: uint32(cfg.P2P.MaxNumInboundPeers + - len(tmstrings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " ")), - ), - }, - ) + return evidenceReactor, evidencePool, evidenceDB.Close, nil } func createPeerManager( cfg *config.Config, dbProvider config.DBProvider, - p2pLogger log.Logger, nodeID types.NodeID, -) (*p2p.PeerManager, error) { +) (*p2p.PeerManager, closer, error) { selfAddr, err := p2p.ParseNodeAddress(nodeID.AddressString(cfg.P2P.ExternalAddress)) if err != nil { - return nil, fmt.Errorf("couldn't parse ExternalAddress %q: %w", cfg.P2P.ExternalAddress, err) + return nil, func() error { return nil }, fmt.Errorf("couldn't parse ExternalAddress %q: %w", cfg.P2P.ExternalAddress, err) + } + + privatePeerIDs := make(map[types.NodeID]struct{}) + for _, id := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PrivatePeerIDs, ",", " ") { + privatePeerIDs[types.NodeID(id)] = struct{}{} } var maxConns uint16 @@ -441,29 +223,10 @@ func createPeerManager( switch { case cfg.P2P.MaxConnections > 0: maxConns = cfg.P2P.MaxConnections - - case cfg.P2P.MaxNumInboundPeers > 0 && cfg.P2P.MaxNumOutboundPeers > 0: - x := cfg.P2P.MaxNumInboundPeers + cfg.P2P.MaxNumOutboundPeers - if x > math.MaxUint16 { - return nil, fmt.Errorf( - "max inbound peers (%d) + max outbound peers (%d) exceeds maximum (%d)", - cfg.P2P.MaxNumInboundPeers, - cfg.P2P.MaxNumOutboundPeers, - math.MaxUint16, - ) - } - - maxConns = uint16(x) - default: maxConns = 64 } - privatePeerIDs := make(map[types.NodeID]struct{}) - for _, id := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PrivatePeerIDs, ",", " ") { - privatePeerIDs[types.NodeID(id)] = struct{}{} - } - options := p2p.PeerManagerOptions{ SelfAddress: selfAddr, MaxConnected: maxConns, @@ -480,7 +243,7 @@ func createPeerManager( for _, p := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " ") { address, err := p2p.ParseNodeAddress(p) if err != nil { - return nil, fmt.Errorf("invalid peer address %q: %w", p, err) + return nil, func() error { return nil }, fmt.Errorf("invalid peer address %q: %w", p, err) } peers = append(peers, address) @@ -490,201 +253,69 @@ func createPeerManager( for _, p := range tmstrings.SplitAndTrimEmpty(cfg.P2P.BootstrapPeers, ",", " ") { address, err := p2p.ParseNodeAddress(p) if err != nil { - return nil, fmt.Errorf("invalid peer address %q: %w", p, err) + return nil, func() error { return nil }, fmt.Errorf("invalid peer address %q: %w", p, err) } peers = append(peers, address) } peerDB, err := dbProvider(&config.DBContext{ID: "peerstore", Config: cfg}) if err != nil { - return nil, fmt.Errorf("unable to initialize peer store: %w", err) + return nil, func() error { return nil }, fmt.Errorf("unable to initialize peer store: %w", err) } peerManager, err := p2p.NewPeerManager(nodeID, peerDB, options) if err != nil { - return nil, fmt.Errorf("failed to create peer manager: %w", err) + return nil, peerDB.Close, fmt.Errorf("failed to create peer manager: %w", err) } for _, peer := range peers { if _, err := peerManager.Add(peer); err != nil { - return nil, fmt.Errorf("failed to add peer %q: %w", peer, err) + return nil, peerDB.Close, fmt.Errorf("failed to add peer %q: %w", peer, err) } } - return peerManager, nil + return peerManager, peerDB.Close, nil } func createRouter( - p2pLogger log.Logger, + logger log.Logger, p2pMetrics *p2p.Metrics, - nodeInfo types.NodeInfo, - privKey crypto.PrivKey, + nodeInfoProducer func() *types.NodeInfo, + nodeKey types.NodeKey, peerManager *p2p.PeerManager, - transport p2p.Transport, - options p2p.RouterOptions, -) (*p2p.Router, error) { - - return p2p.NewRouter( - p2pLogger, - p2pMetrics, - nodeInfo, - privKey, - peerManager, - []p2p.Transport{transport}, - options, - ) -} - -func createSwitch( cfg *config.Config, - transport p2p.Transport, - p2pMetrics *p2p.Metrics, - mempoolReactor *p2p.ReactorShim, - bcReactor p2p.Reactor, - stateSyncReactor *p2p.ReactorShim, - consensusReactor *p2p.ReactorShim, - evidenceReactor *p2p.ReactorShim, - proxyApp proxy.AppConns, - nodeInfo types.NodeInfo, - nodeKey types.NodeKey, - p2pLogger log.Logger, -) *p2p.Switch { - - var ( - connFilters = []p2p.ConnFilterFunc{} - peerFilters = []p2p.PeerFilterFunc{} - ) - - if !cfg.P2P.AllowDuplicateIP { - connFilters = append(connFilters, p2p.ConnDuplicateIPFilter) - } - - // Filter peers by addr or pubkey with an ABCI query. - // If the query return code is OK, add peer. - if cfg.FilterPeers { - connFilters = append( - connFilters, - // ABCI query for address filtering. - func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error { - res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ - Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()), - }) - if err != nil { - return err - } - if res.IsErr() { - return fmt.Errorf("error querying abci app: %v", res) - } - - return nil - }, - ) + appClient abciclient.Client, +) (*p2p.Router, error) { - peerFilters = append( - peerFilters, - // ABCI query for ID filtering. - func(_ p2p.IPeerSet, p p2p.Peer) error { - res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ - Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()), - }) - if err != nil { - return err - } - if res.IsErr() { - return fmt.Errorf("error querying abci app: %v", res) - } - - return nil - }, - ) - } + p2pLogger := logger.With("module", "p2p") - sw := p2p.NewSwitch( - cfg.P2P, - transport, - p2p.WithMetrics(p2pMetrics), - p2p.SwitchPeerFilters(peerFilters...), - p2p.SwitchConnFilters(connFilters...), + transportConf := conn.DefaultMConnConfig() + transportConf.FlushThrottle = cfg.P2P.FlushThrottleTimeout + transportConf.SendRate = cfg.P2P.SendRate + transportConf.RecvRate = cfg.P2P.RecvRate + transportConf.MaxPacketMsgPayloadSize = cfg.P2P.MaxPacketMsgPayloadSize + transport := p2p.NewMConnTransport( + p2pLogger, transportConf, []*p2p.ChannelDescriptor{}, + p2p.MConnTransportOptions{ + MaxAcceptedConnections: uint32(cfg.P2P.MaxConnections), + }, ) - sw.SetLogger(p2pLogger) - if cfg.Mode != config.ModeSeed { - sw.AddReactor("MEMPOOL", mempoolReactor) - sw.AddReactor("BLOCKCHAIN", bcReactor) - sw.AddReactor("CONSENSUS", consensusReactor) - sw.AddReactor("EVIDENCE", evidenceReactor) - sw.AddReactor("STATESYNC", stateSyncReactor) - } - - sw.SetNodeInfo(nodeInfo) - sw.SetNodeKey(nodeKey) - - p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID, "file", cfg.NodeKeyFile()) - return sw -} - -func createAddrBookAndSetOnSwitch(cfg *config.Config, sw *p2p.Switch, - p2pLogger log.Logger, nodeKey types.NodeKey) (pex.AddrBook, error) { - - addrBook := pex.NewAddrBook(cfg.P2P.AddrBookFile(), cfg.P2P.AddrBookStrict) - addrBook.SetLogger(p2pLogger.With("book", cfg.P2P.AddrBookFile())) - - // Add ourselves to addrbook to prevent dialing ourselves - if cfg.P2P.ExternalAddress != "" { - addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(cfg.P2P.ExternalAddress)) - if err != nil { - return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err) - } - addrBook.AddOurAddress(addr) - } - if cfg.P2P.ListenAddress != "" { - addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(cfg.P2P.ListenAddress)) - if err != nil { - return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err) - } - addrBook.AddOurAddress(addr) - } - - sw.SetAddrBook(addrBook) - - return addrBook, nil -} - -func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, cfg *config.Config, - sw *p2p.Switch, logger log.Logger) *pex.Reactor { - - reactorConfig := &pex.ReactorConfig{ - Seeds: tmstrings.SplitAndTrimEmpty(cfg.P2P.Seeds, ",", " "), //nolint: staticcheck - SeedMode: cfg.Mode == config.ModeSeed, - // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 - // blocks assuming 10s blocks ~ 28 hours. - // TODO (melekes): make it dynamic based on the actual block latencies - // from the live network. - // https://github.com/tendermint/tendermint/issues/3523 - SeedDisconnectWaitPeriod: 28 * time.Hour, - PersistentPeersMaxDialPeriod: cfg.P2P.PersistentPeersMaxDialPeriod, - } - // TODO persistent peers ? so we can have their DNS addrs saved - pexReactor := pex.NewReactor(addrBook, reactorConfig) - pexReactor.SetLogger(logger.With("module", "pex")) - sw.AddReactor("PEX", pexReactor) - return pexReactor -} - -func createPEXReactorV2( - cfg *config.Config, - logger log.Logger, - peerManager *p2p.PeerManager, - router *p2p.Router, -) (service.Service, error) { - - channel, err := router.OpenChannel(pex.ChannelDescriptor(), &protop2p.PexMessage{}, 128) + ep, err := p2p.NewEndpoint(nodeKey.ID.AddressString(cfg.P2P.ListenAddress)) if err != nil { return nil, err } - peerUpdates := peerManager.Subscribe() - return pex.NewReactorV2(logger, peerManager, channel, peerUpdates), nil + return p2p.NewRouter( + p2pLogger, + p2pMetrics, + nodeKey.PrivKey, + peerManager, + nodeInfoProducer, + transport, + ep, + getRouterConfig(cfg, appClient), + ) } func makeNodeInfo( @@ -693,37 +324,26 @@ func makeNodeInfo( proTxHash crypto.ProTxHash, eventSinks []indexer.EventSink, genDoc *types.GenesisDoc, - state sm.State, + versionInfo version.Consensus, ) (types.NodeInfo, error) { + txIndexerStatus := "off" if indexer.IndexingEnabled(eventSinks) { txIndexerStatus = "on" } - var bcChannel byte - switch cfg.BlockSync.Version { - case config.BlockSyncV0: - bcChannel = byte(bcv0.BlockSyncChannel) - - case config.BlockSyncV2: - bcChannel = bcv2.BlockchainChannel - - default: - return types.NodeInfo{}, fmt.Errorf("unknown blocksync version %s", cfg.BlockSync.Version) - } - nodeInfo := types.NodeInfo{ ProtocolVersion: types.ProtocolVersion{ P2P: version.P2PProtocol, // global - Block: state.Version.Consensus.Block, - App: state.Version.Consensus.App, + Block: versionInfo.Block, + App: versionInfo.App, }, NodeID: nodeKey.ID, Network: genDoc.ChainID, Version: version.TMCoreSemVer, Channels: []byte{ - bcChannel, + byte(blocksync.BlockSyncChannel), byte(consensus.StateChannel), byte(consensus.DataChannel), byte(consensus.VoteChannel), @@ -747,16 +367,12 @@ func makeNodeInfo( nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) } - lAddr := cfg.P2P.ExternalAddress - - if lAddr == "" { - lAddr = cfg.P2P.ListenAddress + nodeInfo.ListenAddr = cfg.P2P.ExternalAddress + if nodeInfo.ListenAddr == "" { + nodeInfo.ListenAddr = cfg.P2P.ListenAddress } - nodeInfo.ListenAddr = lAddr - - err := nodeInfo.Validate() - return nodeInfo, err + return nodeInfo, nodeInfo.Validate() } func makeSeedNodeInfo( @@ -771,29 +387,177 @@ func makeSeedNodeInfo( Block: state.Version.Consensus.Block, App: state.Version.Consensus.App, }, - NodeID: nodeKey.ID, - Network: genDoc.ChainID, - Version: version.TMCoreSemVer, - Channels: []byte{}, - Moniker: cfg.Moniker, + NodeID: nodeKey.ID, + Network: genDoc.ChainID, + Version: version.TMCoreSemVer, + Channels: []byte{ + pex.PexChannel, + }, + Moniker: cfg.Moniker, Other: types.NodeInfoOther{ TxIndex: "off", RPCAddress: cfg.RPC.ListenAddress, }, } - if cfg.P2P.PexReactor { - nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) + nodeInfo.ListenAddr = cfg.P2P.ExternalAddress + if nodeInfo.ListenAddr == "" { + nodeInfo.ListenAddr = cfg.P2P.ListenAddress } - lAddr := cfg.P2P.ExternalAddress + return nodeInfo, nodeInfo.Validate() +} + +func createAndStartPrivValidatorSocketClient( + ctx context.Context, + listenAddr, chainID string, + quorumHash crypto.QuorumHash, + logger log.Logger, +) (types.PrivValidator, error) { - if lAddr == "" { - lAddr = cfg.P2P.ListenAddress + pve, err := privval.NewSignerListener(listenAddr, logger) + if err != nil { + return nil, fmt.Errorf("starting validator listener: %w", err) } - nodeInfo.ListenAddr = lAddr + pvsc, err := privval.NewSignerClient(ctx, pve, chainID) + if err != nil { + return nil, fmt.Errorf("starting validator client: %w", err) + } + + // try to get a pubkey from private validate first time + _, err = pvsc.GetPubKey(ctx, quorumHash) + if err != nil { + return nil, fmt.Errorf("can't get pubkey: %w", err) + } + + const ( + timeout = 100 * time.Millisecond + maxTime = 5 * time.Second + retries = int(maxTime / timeout) + ) + pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout) + + return pvscWithRetries, nil +} + +func createAndStartPrivValidatorGRPCClient( + ctx context.Context, + cfg *config.Config, + chainID string, + quorumHash crypto.QuorumHash, + logger log.Logger, +) (types.PrivValidator, error) { + pvsc, err := tmgrpc.DialRemoteSigner( + ctx, + cfg.PrivValidator, + chainID, + logger, + cfg.Instrumentation.Prometheus, + ) + if err != nil { + return nil, fmt.Errorf("failed to start private validator: %w", err) + } + + // try to get a pubkey from private validate first time + _, err = pvsc.GetPubKey(ctx, quorumHash) + if err != nil { + return nil, fmt.Errorf("can't get pubkey: %w", err) + } + + return pvsc, nil +} + +func makeDefaultPrivval(conf *config.Config) (*privval.FilePV, error) { + if conf.Mode == config.ModeValidator { + pval, err := privval.LoadOrGenFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile()) + if err != nil { + return nil, err + } + return pval, nil + } + + return nil, nil +} + +// createPrivval creates and returns new PrivVal based on provided config. +func createPrivval(ctx context.Context, logger log.Logger, conf *config.Config, genDoc *types.GenesisDoc) (types.PrivValidator, error) { + switch { + case conf.PrivValidator.ListenAddr != "": // Generic tendermint privval + protocol, _ := tmnet.ProtocolAndAddress(conf.PrivValidator.ListenAddr) + // FIXME: we should return un-started services and + // then start them later. + if protocol == "grpc" { + privValidator, err := createAndStartPrivValidatorGRPCClient(ctx, conf, genDoc.ChainID, genDoc.QuorumHash, logger) + if err != nil { + return nil, fmt.Errorf("error with private validator grpc client: %w", err) + } + return privValidator, nil + } + + privValidator, err := createAndStartPrivValidatorSocketClient( + ctx, + conf.PrivValidator.ListenAddr, + genDoc.ChainID, + genDoc.QuorumHash, + logger, + ) + if err != nil { + return nil, fmt.Errorf("error with private validator socket client: %w", err) + } + return privValidator, nil + + case conf.PrivValidator.CoreRPCHost != "": // DASH Core Privval + if conf.Mode != config.ModeValidator { + return nil, fmt.Errorf("cannot initialize PrivValidator: this node is NOT a validator") + } + + logger.Info("Initializing Dash Core PrivValidator") + + dashCoreRPCClient, err := DefaultDashCoreRPCClient(conf, logger.With("module", dashcore.ModuleName)) + if err != nil { + return nil, fmt.Errorf("failed to create Dash Core RPC client: %w", err) + } + + // If a local port is provided for Dash Core rpc into the service to sign. + privValidator, err := createAndStartPrivValidatorDashCoreClient( + conf.Consensus.QuorumType, + dashCoreRPCClient, + logger, + ) + if err != nil { + return nil, fmt.Errorf("error with private validator RPC client: %w", err) + } + proTxHash, err := privValidator.GetProTxHash(ctx) + if err != nil { + return nil, fmt.Errorf("can't get proTxHash using dash core signing: %w", err) + } + logger.Info("Connected to Core RPC Masternode", "proTxHash", proTxHash.String()) + + return privValidator, nil + default: + return makeDefaultPrivval(conf) + } +} + +func createAndStartPrivValidatorDashCoreClient( + defaultQuorumType btcjson.LLMQType, + dashCoreRPCClient dashcore.Client, + logger log.Logger, +) (types.PrivValidator, error) { + pvsc, err := privval.NewDashCoreSignerClient(dashCoreRPCClient, defaultQuorumType) + if err != nil { + return nil, fmt.Errorf("failed to start private validator: %w", err) + } + + // try to ping Core from private validator first time to make sure connection works + err = pvsc.Ping() + if err != nil { + return nil, fmt.Errorf( + "can't ping core server when starting private validator rpc client: %w", + err, + ) + } - err := nodeInfo.Validate() - return nodeInfo, err + return pvsc, nil } diff --git a/privval/dash_core_mock_signer_server.go b/privval/dash_core_mock_signer_server.go index b60bc962f3..4ba7c51d0f 100644 --- a/privval/dash_core_mock_signer_server.go +++ b/privval/dash_core_mock_signer_server.go @@ -2,8 +2,8 @@ package privval import ( "github.com/go-pkgz/jrpc" - "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/types" ) diff --git a/privval/dash_core_signer_client.go b/privval/dash_core_signer_client.go index 29cf5ea993..72ffeca37f 100644 --- a/privval/dash_core_signer_client.go +++ b/privval/dash_core_signer_client.go @@ -11,14 +11,21 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" - dashcore "github.com/tendermint/tendermint/dashcore/rpc" + dashcore "github.com/tendermint/tendermint/dash/core" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) -// DashCoreSignerClient implements PrivValidator. +// DashPrivValidator is a PrivValidator that uses Dash-specific logic +type DashPrivValidator interface { + types.PrivValidator + dashcore.QuorumVerifier + DashRPCClient() dashcore.Client +} + +// DashCoreSignerClient implements DashPrivValidator. // Handles remote validator connections that provide signing services type DashCoreSignerClient struct { dashCoreRPCClient dashcore.Client @@ -26,7 +33,7 @@ type DashCoreSignerClient struct { defaultQuorumType btcjson.LLMQType } -var _ types.PrivValidator = (*DashCoreSignerClient)(nil) +var _ DashPrivValidator = (*DashCoreSignerClient)(nil) // NewDashCoreSignerClient returns an instance of SignerClient. // it will start the endpoint (if not already started) @@ -232,7 +239,7 @@ func (sc *DashCoreSignerClient) SignVote( } blockSignBytes := types.VoteBlockSignBytes(chainID, protoVote) - blockMessageHash := crypto.Sha256(blockSignBytes) + blockMessageHash := crypto.Checksum(blockSignBytes) blockRequestID := types.VoteBlockRequestIDProto(protoVote) blockResponse, err := sc.dashCoreRPCClient.QuorumSign(quorumType, blockRequestID, blockMessageHash, quorumHash) @@ -261,7 +268,7 @@ func (sc *DashCoreSignerClient) SignVote( quorumType, tmbytes.Reverse(quorumHash), tmbytes.Reverse(blockRequestID), - tmbytes.Reverse(blockMessageHash), + tmbytes.Reverse(blockMessageHash[:]), ) coreSignID, err := hex.DecodeString(blockResponse.SignHash) @@ -290,7 +297,7 @@ func (sc *DashCoreSignerClient) SignVote( // Only sign the state when voting for the block if protoVote.BlockID.Hash != nil { stateSignBytes := stateID.SignBytes(chainID) - stateMessageHash := crypto.Sha256(stateSignBytes) + stateMessageHash := crypto.Checksum(stateSignBytes) stateRequestID := stateID.SignRequestID() stateResponse, err := sc.dashCoreRPCClient.QuorumSign( @@ -314,6 +321,26 @@ func (sc *DashCoreSignerClient) SignVote( protoVote.StateSignature = stateDecodedSignature } + if protoVote.Type == tmproto.PrecommitType { + if len(protoVote.Extension) > 0 { + extSignBytes := types.VoteExtensionSignBytes(chainID, protoVote) + extMsgHash := crypto.Checksum(extSignBytes) + extReqID := types.VoteExtensionRequestID(protoVote) + + extResp, err := sc.dashCoreRPCClient.QuorumSign(quorumType, extReqID, extMsgHash, quorumHash) + if err != nil { + return err + } + + protoVote.ExtensionSignature, err = hex.DecodeString(extResp.Signature) + if err != nil { + return err + } + } + } else if len(protoVote.Extension) > 0 { + return errors.New("unexpected vote extension - extensions are only allowed in precommits") + } + // fmt.Printf("Signed Vote proTxHash %s stateSignBytes %s block signature %s \n", // proTxHash, hex.EncodeToString(stateSignBytes), // hex.EncodeToString(stateDecodedSignature)) @@ -340,10 +367,10 @@ func (sc *DashCoreSignerClient) SignVote( // SignProposal requests a remote signer to sign a proposal func (sc *DashCoreSignerClient) SignProposal( ctx context.Context, chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, proposalProto *tmproto.Proposal, -) ([]byte, error) { +) (tmbytes.HexBytes, error) { messageBytes := types.ProposalBlockSignBytes(chainID, proposalProto) - messageHash := crypto.Sha256(messageBytes) + messageHash := crypto.Checksum(messageBytes) requestIDHash := types.ProposalRequestIDProto(proposalProto) @@ -413,3 +440,22 @@ func (sc *DashCoreSignerClient) UpdatePrivateKey( func (sc *DashCoreSignerClient) GetPrivateKey(ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PrivKey, error) { return nil, nil } + +// QuorumVerify implements dashcore.QuorumVerifier +func (sc *DashCoreSignerClient) QuorumVerify( + quorumType btcjson.LLMQType, + requestID tmbytes.HexBytes, + messageHash tmbytes.HexBytes, + signature tmbytes.HexBytes, + quorumHash tmbytes.HexBytes, +) (bool, error) { + return sc.dashCoreRPCClient.QuorumVerify(quorumType, requestID, messageHash, signature, quorumHash) +} + +// DashRPCClient implements DashPrivValidator +func (sc *DashCoreSignerClient) DashRPCClient() dashcore.Client { + if sc == nil { + return nil + } + return sc.dashCoreRPCClient +} diff --git a/privval/file.go b/privval/file.go index 448ed36289..cc31679a5d 100644 --- a/privval/file.go +++ b/privval/file.go @@ -4,28 +4,21 @@ import ( "bytes" "context" "encoding/hex" + "encoding/json" "errors" "fmt" - "io/ioutil" + "os" "strconv" - "time" - - "github.com/tendermint/tendermint/libs/log" + "sync" "github.com/dashevo/dashd-go/btcjson" - "github.com/tendermint/tendermint/crypto/bls12381" - - "github.com/gogo/protobuf/proto" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/libs/protoio" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/crypto/bls12381" "github.com/tendermint/tendermint/internal/libs/tempfile" tmbytes "github.com/tendermint/tendermint/libs/bytes" - tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" - tmtime "github.com/tendermint/tendermint/libs/time" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -39,14 +32,14 @@ const ( ) // A vote is either stepPrevote or stepPrecommit. -func voteToStep(vote *tmproto.Vote) int8 { +func voteToStep(vote *tmproto.Vote) (int8, error) { switch vote.Type { case tmproto.PrevoteType: - return stepPrevote + return stepPrevote, nil case tmproto.PrecommitType: - return stepPrecommit + return stepPrecommit, nil default: - panic(fmt.Sprintf("Unknown vote type: %v", vote.Type)) + return 0, fmt.Errorf("unknown vote type: %v", vote.Type) } } @@ -54,32 +47,58 @@ func voteToStep(vote *tmproto.Vote) int8 { // FilePVKey stores the immutable part of PrivValidator. type FilePVKey struct { + PrivateKeys map[string]crypto.QuorumKeys + // heightString -> quorumHash + UpdateHeights map[string]crypto.QuorumHash + // quorumHash -> heightString + FirstHeightOfQuorums map[string]string + ProTxHash crypto.ProTxHash + + filePath string +} + +type filePVKeyJSON struct { PrivateKeys map[string]crypto.QuorumKeys `json:"private_keys"` // heightString -> quorumHash UpdateHeights map[string]crypto.QuorumHash `json:"update_heights"` // quorumHash -> heightString FirstHeightOfQuorums map[string]string `json:"first_height_of_quorums"` ProTxHash crypto.ProTxHash `json:"pro_tx_hash"` +} - filePath string +func (pvKey FilePVKey) MarshalJSON() ([]byte, error) { + return json.Marshal(filePVKeyJSON{ + PrivateKeys: pvKey.PrivateKeys, + UpdateHeights: pvKey.UpdateHeights, + FirstHeightOfQuorums: pvKey.FirstHeightOfQuorums, + ProTxHash: pvKey.ProTxHash, + }) +} + +func (pvKey *FilePVKey) UnmarshalJSON(data []byte) error { + var key filePVKeyJSON + if err := json.Unmarshal(data, &key); err != nil { + return err + } + pvKey.PrivateKeys = key.PrivateKeys + pvKey.UpdateHeights = key.UpdateHeights + pvKey.FirstHeightOfQuorums = key.FirstHeightOfQuorums + pvKey.ProTxHash = key.ProTxHash + return nil } // Save persists the FilePVKey to its filePath. -func (pvKey FilePVKey) Save() { +func (pvKey FilePVKey) Save() error { outFile := pvKey.filePath if outFile == "" { - panic("cannot save PrivValidator key: filePath not set") + return errors.New("cannot save PrivValidator key: filePath not set") } - jsonBytes, err := tmjson.MarshalIndent(pvKey, "", " ") - if err != nil { - panic(err) - } - err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0600) + data, err := json.MarshalIndent(pvKey, "", " ") if err != nil { - panic(err) + return err } - + return tempfile.WriteFileAtomic(outFile, data, 0600) } func (pvKey FilePVKey) ThresholdPublicKeyForQuorumHash(quorumHash crypto.QuorumHash) (crypto.PubKey, error) { @@ -93,7 +112,7 @@ func (pvKey FilePVKey) ThresholdPublicKeyForQuorumHash(quorumHash crypto.QuorumH // FilePVLastSignState stores the mutable part of PrivValidator. type FilePVLastSignState struct { - Height int64 `json:"height"` + Height int64 `json:"height,string"` Round int32 `json:"round"` Step int8 `json:"step"` BlockSignature []byte `json:"block_signature,omitempty"` @@ -104,14 +123,24 @@ type FilePVLastSignState struct { filePath string } -// CheckHRS checks the given height, round, step (HRS) against that of the +func (lss *FilePVLastSignState) reset() { + lss.Height = 0 + lss.Round = 0 + lss.Step = 0 + lss.BlockSignature = nil + lss.BlockSignBytes = nil + lss.StateSignature = nil + lss.StateSignBytes = nil +} + +// checkHRS checks the given height, round, step (HRS) against that of the // FilePVLastSignState. It returns an error if the arguments constitute a regression, // or if they match but the SignBytes are empty. // The returned boolean indicates whether the last Signature should be reused - // it returns true if the HRS matches the arguments and the SignBytes are not empty (indicating // we have already signed for this HRS, and can reuse the existing signature). // It panics if the HRS matches the arguments, there's a SignBytes, but no Signature. -func (lss *FilePVLastSignState) CheckHRS(height int64, round int32, step int8) (bool, error) { +func (lss *FilePVLastSignState) checkHRS(height int64, round int32, step int8) (bool, error) { if lss.Height > height { return false, fmt.Errorf("height regression. Got %v, last height %v", height, lss.Height) @@ -152,19 +181,16 @@ func (lss *FilePVLastSignState) CheckHRS(height int64, round int32, step int8) ( } // Save persists the FilePvLastSignState to its filePath. -func (lss *FilePVLastSignState) Save() { +func (lss *FilePVLastSignState) Save() error { outFile := lss.filePath if outFile == "" { - panic("cannot save FilePVLastSignState: filePath not set") - } - jsonBytes, err := tmjson.MarshalIndent(lss, "", " ") - if err != nil { - panic(err) + return errors.New("cannot save FilePVLastSignState: filePath not set") } - err = tempfile.WriteFileAtomic(outFile, jsonBytes, 0600) + jsonBytes, err := json.MarshalIndent(lss, "", " ") if err != nil { - panic(err) + return err } + return tempfile.WriteFileAtomic(outFile, jsonBytes, 0600) } //------------------------------------------------------------------------------- @@ -177,7 +203,7 @@ func (lss *FilePVLastSignState) Save() { type FilePV struct { Key FilePVKey LastSignState FilePVLastSignState - mtx tmsync.RWMutex + mtx sync.RWMutex } // FilePVOption ... @@ -192,6 +218,10 @@ func NewFilePVOneKey( panic("error setting incorrect proTxHash size in NewFilePV") } + if thresholdPublicKey == nil { + thresholdPublicKey = privKey.PubKey() + } + quorumKeys := crypto.QuorumKeys{ PrivKey: privKey, PubKey: privKey.PubKey(), @@ -335,18 +365,18 @@ func LoadFilePVEmptyState(keyFilePath, stateFilePath string) (*FilePV, error) { // If loadState is true, we load from the stateFilePath. Otherwise, we use an empty LastSignState. func loadFilePV(keyFilePath, stateFilePath string, loadState bool) (*FilePV, error) { - keyJSONBytes, err := ioutil.ReadFile(keyFilePath) + keyJSONBytes, err := os.ReadFile(keyFilePath) if err != nil { return nil, err } pvKey := FilePVKey{} - err = tmjson.Unmarshal(keyJSONBytes, &pvKey) + err = json.Unmarshal(keyJSONBytes, &pvKey) if err != nil { return nil, fmt.Errorf("error reading PrivValidator key from %v: %w", keyFilePath, err) } // verify proTxHash is 32 bytes if it exists if pvKey.ProTxHash != nil && len(pvKey.ProTxHash) != crypto.ProTxHashSize { - tmos.Exit(fmt.Sprintf("loadFilePV proTxHash must be 32 bytes in key file path %s", keyFilePath)) + return nil, fmt.Errorf("loadFilePV proTxHash must be 32 bytes in key file path %s", keyFilePath) } pvKey.filePath = keyFilePath @@ -354,11 +384,11 @@ func loadFilePV(keyFilePath, stateFilePath string, loadState bool) (*FilePV, err pvState := FilePVLastSignState{} if loadState { - stateJSONBytes, err := ioutil.ReadFile(stateFilePath) + stateJSONBytes, err := os.ReadFile(stateFilePath) if err != nil { return nil, err } - err = tmjson.Unmarshal(stateJSONBytes, &pvState) + err = json.Unmarshal(stateJSONBytes, &pvState) if err != nil { return nil, fmt.Errorf("error reading PrivValidator state from %v: %w", stateFilePath, err) } @@ -375,17 +405,19 @@ func loadFilePV(keyFilePath, stateFilePath string, loadState bool) (*FilePV, err // LoadOrGenFilePV loads a FilePV from the given filePaths // or else generates a new one and saves it to the filePaths. func LoadOrGenFilePV(keyFilePath, stateFilePath string) (*FilePV, error) { - var ( - pv *FilePV - err error - ) if tmos.FileExists(keyFilePath) { - pv, err = LoadFilePV(keyFilePath, stateFilePath) - } else { - pv = GenFilePV(keyFilePath, stateFilePath) - pv.Save() + pv, err := LoadFilePV(keyFilePath, stateFilePath) + if err != nil { + return nil, err + } + return pv, nil } - return pv, err + pv := GenFilePV(keyFilePath, stateFilePath) + if err := pv.Save(); err != nil { + return nil, err + } + + return pv, nil } // GetPubKey returns the public key of the validator. @@ -523,12 +555,18 @@ func (pv *FilePV) GetProTxHash(context context.Context) (crypto.ProTxHash, error // SignVote signs a canonical representation of the vote, along with the // chainID. Implements PrivValidator. func (pv *FilePV) SignVote( - ctx context.Context, chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, - vote *tmproto.Vote, stateID types.StateID, logger log.Logger) error { + ctx context.Context, + chainID string, + quorumType btcjson.LLMQType, + quorumHash crypto.QuorumHash, + vote *tmproto.Vote, + stateID types.StateID, + logger log.Logger, +) error { pv.mtx.RLock() defer pv.mtx.RUnlock() - if err := pv.signVote(chainID, quorumType, quorumHash, vote, stateID); err != nil { + if err := pv.signVote(ctx, chainID, quorumType, quorumHash, vote, stateID); err != nil { return fmt.Errorf("error signing vote: %v", err) } return nil @@ -537,12 +575,16 @@ func (pv *FilePV) SignVote( // SignProposal signs a canonical representation of the proposal, along with // the chainID. Implements PrivValidator. func (pv *FilePV) SignProposal( - ctx context.Context, chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, proposal *tmproto.Proposal, -) ([]byte, error) { + ctx context.Context, + chainID string, + quorumType btcjson.LLMQType, + quorumHash crypto.QuorumHash, + proposal *tmproto.Proposal, +) (tmbytes.HexBytes, error) { pv.mtx.RLock() defer pv.mtx.RUnlock() - signID, err := pv.signProposal(chainID, quorumType, quorumHash, proposal) + signID, err := pv.signProposal(ctx, chainID, quorumType, quorumHash, proposal) if err != nil { return signID, fmt.Errorf("error signing proposal: %v", err) } @@ -550,29 +592,23 @@ func (pv *FilePV) SignProposal( } // Save persists the FilePV to disk. -func (pv *FilePV) Save() { +func (pv *FilePV) Save() error { pv.mtx.Lock() defer pv.mtx.Unlock() - pv.Key.Save() - pv.LastSignState.Save() + if err := pv.Key.Save(); err != nil { + return err + } + return pv.LastSignState.Save() } // Reset resets all fields in the FilePV. // NOTE: Unsafe! -func (pv *FilePV) Reset() { +func (pv *FilePV) Reset() error { pv.mtx.Lock() - var blockSig []byte - var stateSig []byte - pv.LastSignState.Height = 0 - pv.LastSignState.Round = 0 - pv.LastSignState.Step = 0 - pv.LastSignState.BlockSignature = blockSig - pv.LastSignState.StateSignature = stateSig - pv.LastSignState.BlockSignBytes = nil - pv.LastSignState.StateSignBytes = nil + pv.LastSignState.reset() pv.mtx.Unlock() - pv.Save() + return pv.Save() } // String returns a string representation of the FilePV. @@ -616,14 +652,23 @@ func (pv *FilePV) UpdatePrivateKey( // It may need to set the timestamp as well if the vote is otherwise the same as // a previously signed vote (ie. we crashed after signing but before the vote hit the WAL). func (pv *FilePV) signVote( - chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, - vote *tmproto.Vote, stateID types.StateID, + ctx context.Context, + chainID string, + quorumType btcjson.LLMQType, + quorumHash crypto.QuorumHash, + vote *tmproto.Vote, + stateID types.StateID, ) error { - height, round, step := vote.Height, vote.Round, voteToStep(vote) + step, err := voteToStep(vote) + if err != nil { + return err + } + height := vote.Height + round := vote.Round lss := pv.LastSignState - sameHRS, err := lss.CheckHRS(height, round, step) + sameHRS, err := lss.checkHRS(height, round, step) if err != nil { return err } @@ -641,25 +686,40 @@ func (pv *FilePV) signVote( stateSignBytes := stateID.SignBytes(chainID) + privKey, err := pv.getPrivateKey(ctx, quorumHash) + if err != nil { + return err + } + + // Vote extensions are non-deterministic, so it is possible that an + // application may have created a different extension. We therefore always + // re-sign the vote extensions of precommits. For prevotes, the extension + // signature will always be empty. + var extSig []byte + if vote.Type == tmproto.PrecommitType { + extSignID := types.VoteExtensionSignID(chainID, vote, quorumType, quorumHash) + extSig, err = privKey.SignDigest(extSignID) + if err != nil { + return err + } + } else if len(vote.Extension) > 0 { + return errors.New("unexpected vote extension - extensions are only allowed in precommits") + } + // We might crash before writing to the wal, // causing us to try to re-sign for the same HRS. // If signbytes are the same, use the last signature. // If they only differ by timestamp, use last timestamp and signature // Otherwise, return error if sameHRS { - if bytes.Equal(blockSignBytes, lss.BlockSignBytes) && bytes.Equal(stateSignBytes, lss.StateSignBytes) { vote.BlockSignature = lss.BlockSignature vote.StateSignature = lss.StateSignature } else { - err = fmt.Errorf("conflicting data") + return errors.New("conflicting data") } - return err - } - - privKey, err := pv.getPrivateKey(context.TODO(), quorumHash) - if err != nil { - return err + vote.ExtensionSignature = extSig + return nil } sigBlock, err := privKey.SignDigest(blockSignID) @@ -683,10 +743,14 @@ func (pv *FilePV) signVote( // sigBlock, vote) // } - pv.saveSigned(height, round, step, blockSignBytes, sigBlock, stateSignBytes, sigState) + err = pv.saveSigned(height, round, step, blockSignBytes, sigBlock, stateSignBytes, sigState) + if err != nil { + return err + } vote.BlockSignature = sigBlock vote.StateSignature = sigState + vote.ExtensionSignature = extSig return nil } @@ -695,13 +759,17 @@ func (pv *FilePV) signVote( // It may need to set the timestamp as well if the proposal is otherwise the same as // a previously signed proposal ie. we crashed after signing but before the proposal hit the WAL). func (pv *FilePV) signProposal( - chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, proposal *tmproto.Proposal, + ctx context.Context, + chainID string, + quorumType btcjson.LLMQType, + quorumHash crypto.QuorumHash, + proposal *tmproto.Proposal, ) ([]byte, error) { height, round, step := proposal.Height, proposal.Round, stepPropose lss := pv.LastSignState - sameHRS, err := lss.CheckHRS(height, round, step) + sameHRS, err := lss.checkHRS(height, round, step) if err != nil { return nil, err } @@ -713,21 +781,15 @@ func (pv *FilePV) signProposal( // We might crash before writing to the wal, // causing us to try to re-sign for the same HRS. // If signbytes are the same, use the last signature. - // If they only differ by timestamp, use last timestamp and signature - // Otherwise, return error if sameHRS { - if bytes.Equal(blockSignBytes, lss.BlockSignBytes) { - proposal.Signature = lss.BlockSignBytes - } else if timestamp, ok := checkProposalsOnlyDifferByTimestamp(lss.BlockSignBytes, blockSignBytes); ok { - proposal.Timestamp = timestamp - proposal.Signature = lss.BlockSignBytes - } else { - err = fmt.Errorf("conflicting data") + if !bytes.Equal(blockSignBytes, lss.BlockSignBytes) { + return nil, errors.New("conflicting data") } + proposal.Signature = lss.BlockSignBytes return blockSignID, err } - privKey, err := pv.getPrivateKey(context.TODO(), quorumHash) + privKey, err := pv.getPrivateKey(ctx, quorumHash) if err != nil { return blockSignID, err } @@ -742,15 +804,24 @@ func (pv *FilePV) signProposal( // pv.Key.ProTxHash, // proposal.Height, pv.Key.PrivKey.PubKey().Bytes(), blockSignID, blockSig) - pv.saveSigned(height, round, step, blockSignBytes, blockSig, nil, nil) + err = pv.saveSigned(height, round, step, blockSignBytes, blockSig, nil, nil) + if err != nil { + return nil, err + } proposal.Signature = blockSig return blockSignID, nil } // Persist height/round/step and signature -func (pv *FilePV) saveSigned(height int64, round int32, step int8, - blockSignBytes []byte, blockSig []byte, stateSignBytes []byte, stateSig []byte) { - +func (pv *FilePV) saveSigned( + height int64, + round int32, + step int8, + blockSignBytes []byte, + blockSig []byte, + stateSignBytes []byte, + stateSig []byte, +) error { pv.LastSignState.Height = height pv.LastSignState.Round = round pv.LastSignState.Step = step @@ -758,27 +829,5 @@ func (pv *FilePV) saveSigned(height int64, round int32, step int8, pv.LastSignState.BlockSignBytes = blockSignBytes pv.LastSignState.StateSignature = stateSig pv.LastSignState.StateSignBytes = stateSignBytes - pv.LastSignState.Save() -} - -//----------------------------------------------------------------------------------------- - -// returns the timestamp from the lastSignBytes. -// returns true if the only difference in the proposals is their timestamp -func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { - var lastProposal, newProposal tmproto.CanonicalProposal - if err := protoio.UnmarshalDelimited(lastSignBytes, &lastProposal); err != nil { - panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err)) - } - if err := protoio.UnmarshalDelimited(newSignBytes, &newProposal); err != nil { - panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err)) - } - - lastTime := lastProposal.Timestamp - // set the times to the same value and check equality - now := tmtime.Now() - lastProposal.Timestamp = now - newProposal.Timestamp = now - - return lastTime, proto.Equal(&newProposal, &lastProposal) + return pv.LastSignState.Save() } diff --git a/privval/file_test.go b/privval/file_test.go index 40f0670373..9b41bf2740 100644 --- a/privval/file_test.go +++ b/privval/file_test.go @@ -4,64 +4,55 @@ import ( "context" "encoding/base64" "encoding/hex" + "encoding/json" "fmt" - "io/ioutil" "os" "testing" "time" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/bls12381" - + "github.com/dashevo/dashd-go/btcjson" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto/tmhash" - tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/bls12381" + "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" - tmtime "github.com/tendermint/tendermint/libs/time" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) func TestGenLoadValidator(t *testing.T) { - assert := assert.New(t) - - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") - require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") - require.Nil(t, err) - - privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) - require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + privVal, tempKeyFileName, tempStateFileName := newTestFilePV(t) height := int64(100) privVal.LastSignState.Height = height - privVal.Save() - proTxHash, err := privVal.GetProTxHash(context.Background()) - assert.NoError(err) - publicKey, err := privVal.GetFirstPubKey(context.Background()) - assert.NoError(err) - privVal, err = LoadFilePV(tempKeyFile.Name(), tempStateFile.Name()) - assert.NoError(err) - proTxHash2, err := privVal.GetProTxHash(context.Background()) - assert.NoError(err) - publicKey2, err := privVal.GetFirstPubKey(context.Background()) - assert.NoError(err) - assert.Equal(proTxHash, proTxHash2, "expected privval proTxHashes to be the same") - assert.Equal(publicKey, publicKey2, "expected privval public keys to be the same") - assert.Equal(height, privVal.LastSignState.Height, "expected privval.LastHeight to have been saved") + require.NoError(t, privVal.Save()) + proTxHash, err := privVal.GetProTxHash(ctx) + require.NoError(t, err) + publicKey, err := privVal.GetFirstPubKey(ctx) + require.NoError(t, err) + privVal, err = LoadFilePV(tempKeyFileName, tempStateFileName) + require.NoError(t, err) + proTxHash2, err := privVal.GetProTxHash(ctx) + require.NoError(t, err) + publicKey2, err := privVal.GetFirstPubKey(ctx) + require.NoError(t, err) + require.Equal(t, proTxHash, proTxHash2, "expected privval proTxHashes to be the same") + require.Equal(t, publicKey, publicKey2, "expected privval public keys to be the same") + require.Equal(t, height, privVal.LastSignState.Height, "expected privval.LastHeight to have been saved") } func TestResetValidator(t *testing.T) { - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") - require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") - require.Nil(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) - require.NoError(t, err) - emptyState := FilePVLastSignState{filePath: tempStateFile.Name()} + privVal, _, tempStateFileName := newTestFilePV(t) + emptyState := FilePVLastSignState{filePath: tempStateFileName} + quorumHash, err := privVal.GetFirstQuorumHash(ctx) + assert.NoError(t, err) // new priv val has empty state assert.Equal(t, privVal.LastSignState, emptyState) @@ -69,32 +60,31 @@ func TestResetValidator(t *testing.T) { // test vote height, round := int64(10), int32(1) voteType := tmproto.PrevoteType - randBytes := tmrand.Bytes(tmhash.Size) + randBytes := tmrand.Bytes(crypto.HashSize) blockID := types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}} stateID := types.RandStateID().WithHeight(height - 1) - vote := newVote(privVal.Key.ProTxHash, 0, height, round, voteType, blockID, stateID) - quorumHash, err := privVal.GetFirstQuorumHash(context.Background()) - assert.NoError(t, err) - err = privVal.SignVote(context.Background(), "mychainid", 0, quorumHash, vote.ToProto(), stateID, nil) + vote := newVote(privVal.Key.ProTxHash, 0, height, round, voteType, blockID, nil) + err = privVal.SignVote(ctx, "mychainid", 0, quorumHash, vote.ToProto(), stateID, nil) assert.NoError(t, err, "expected no error signing vote") // priv val after signing is not same as empty assert.NotEqual(t, privVal.LastSignState, emptyState) // priv val after AcceptNewConnection is same as empty - privVal.Reset() + require.NoError(t, privVal.Reset()) assert.Equal(t, privVal.LastSignState, emptyState) } func TestLoadOrGenValidator(t *testing.T) { - assert := assert.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") - require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") - require.Nil(t, err) + tempKeyFile, err := os.CreateTemp(t.TempDir(), "priv_validator_key_") + require.NoError(t, err) + tempStateFile, err := os.CreateTemp(t.TempDir(), "priv_validator_state_") + require.NoError(t, err) tempKeyFilePath := tempKeyFile.Name() if err := os.Remove(tempKeyFilePath); err != nil { @@ -106,24 +96,22 @@ func TestLoadOrGenValidator(t *testing.T) { } privVal, err := LoadOrGenFilePV(tempKeyFilePath, tempStateFilePath) - assert.NoError(err) - proTxHash, err := privVal.GetProTxHash(context.Background()) - assert.NoError(err) - publicKey, err := privVal.GetFirstPubKey(context.Background()) - assert.NoError(err) + require.NoError(t, err) + proTxHash, err := privVal.GetProTxHash(ctx) + require.NoError(t, err) + publicKey, err := privVal.GetFirstPubKey(ctx) + require.NoError(t, err) privVal, err = LoadOrGenFilePV(tempKeyFilePath, tempStateFilePath) - assert.NoError(err) - proTxHash2, err := privVal.GetProTxHash(context.Background()) - assert.NoError(err) - publicKey2, err := privVal.GetFirstPubKey(context.Background()) - assert.NoError(err) - assert.Equal(proTxHash, proTxHash2, "expected privval proTxHashes to be the same") - assert.Equal(publicKey, publicKey2, "expected privval public keys to be the same") + require.NoError(t, err) + proTxHash2, err := privVal.GetProTxHash(ctx) + require.NoError(t, err) + publicKey2, err := privVal.GetFirstPubKey(ctx) + require.NoError(t, err) + require.Equal(t, proTxHash, proTxHash2, "expected privval proTxHashes to be the same") + require.Equal(t, publicKey, publicKey2, "expected privval public keys to be the same") } func TestUnmarshalValidatorState(t *testing.T) { - assert, require := assert.New(t), require.New(t) - // create some fixed values serialized := `{ "height": "1", @@ -132,23 +120,21 @@ func TestUnmarshalValidatorState(t *testing.T) { }` val := FilePVLastSignState{} - err := tmjson.Unmarshal([]byte(serialized), &val) - require.Nil(err, "%+v", err) + err := json.Unmarshal([]byte(serialized), &val) + require.NoError(t, err) // make sure the values match - assert.EqualValues(val.Height, 1) - assert.EqualValues(val.Round, 1) - assert.EqualValues(val.Step, 1) + assert.EqualValues(t, val.Height, 1) + assert.EqualValues(t, val.Round, 1) + assert.EqualValues(t, val.Step, 1) // export it and make sure it is the same - out, err := tmjson.Marshal(val) - require.Nil(err, "%+v", err) - assert.JSONEq(serialized, string(out)) + out, err := json.Marshal(val) + require.NoError(t, err) + assert.JSONEq(t, serialized, string(out)) } func TestUnmarshalValidatorKey(t *testing.T) { - assert, require := assert.New(t), require.New(t) - // create some fixed values privKey := bls12381.GenPrivKey() quorumHash := crypto.RandQuorumHash() @@ -183,39 +169,34 @@ func TestUnmarshalValidatorKey(t *testing.T) { }`, quorumHash, pubB64, privB64, pubB64, proTxHash) val := FilePVKey{} - err := tmjson.Unmarshal([]byte(serialized), &val) - require.Nil(err, "%+v", err) + err := json.Unmarshal([]byte(serialized), &val) + require.NoError(t, err) // make sure the values match - assert.EqualValues(proTxHash, val.ProTxHash) - assert.Len(val.PrivateKeys, 1) + assert.EqualValues(t, proTxHash, val.ProTxHash) + assert.Len(t, val.PrivateKeys, 1) for quorumHashString, quorumKeys := range val.PrivateKeys { quorumHash2, err := hex.DecodeString(quorumHashString) - assert.NoError(err) - assert.EqualValues(quorumHash, quorumHash2) - assert.EqualValues(pubKey, quorumKeys.PubKey) - assert.EqualValues(privKey, quorumKeys.PrivKey) - assert.EqualValues(pubKey, quorumKeys.ThresholdPublicKey) + require.NoError(t, err) + require.EqualValues(t, quorumHash, quorumHash2) + require.EqualValues(t, pubKey, quorumKeys.PubKey) + require.EqualValues(t, privKey, quorumKeys.PrivKey) + require.EqualValues(t, pubKey, quorumKeys.ThresholdPublicKey) } // export it and make sure it is the same - out, err := tmjson.Marshal(val) - require.Nil(err, "%+v", err) - assert.JSONEq(serialized, string(out)) + out, err := json.Marshal(val) + require.Nil(t, err, "%+v", err) + assert.JSONEq(t, serialized, string(out)) } func TestSignVote(t *testing.T) { - assert := assert.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") - require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") - require.Nil(t, err) + privVal, _, _ := newTestFilePV(t) - privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) - require.NoError(t, err) - - randbytes := tmrand.Bytes(tmhash.Size) - randbytes2 := tmrand.Bytes(tmhash.Size) + randbytes := tmrand.Bytes(crypto.HashSize) + randbytes2 := tmrand.Bytes(crypto.HashSize) block1 := types.BlockID{Hash: randbytes, PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}} @@ -228,54 +209,50 @@ func TestSignVote(t *testing.T) { stateID := types.RandStateID().WithHeight(height - 1) // sign a vote for first time - vote := newVote(privVal.Key.ProTxHash, 0, height, round, voteType, block1, stateID) + vote := newVote(privVal.Key.ProTxHash, 0, height, round, voteType, block1, nil) v := vote.ToProto() - quorumHash, err := privVal.GetFirstQuorumHash(context.Background()) - assert.NoError(err) - err = privVal.SignVote(context.Background(), "mychainid", 0, quorumHash, v, stateID, nil) - assert.NoError(err, "expected no error signing vote") + + quorumHash, err := privVal.GetFirstQuorumHash(ctx) + assert.NoError(t, err) + + err = privVal.SignVote(ctx, "mychainid", 0, quorumHash, v, stateID, nil) + assert.NoError(t, err, "expected no error signing vote") // try to sign the same vote again; should be fine - err = privVal.SignVote(context.Background(), "mychainid", 0, quorumHash, v, stateID, nil) - assert.NoError(err, "expected no error on signing same vote") + err = privVal.SignVote(ctx, "mychainid", 0, quorumHash, v, stateID, nil) + assert.NoError(t, err, "expected no error on signing same vote") // now try some bad votes cases := []*types.Vote{ - newVote(privVal.Key.ProTxHash, 0, height, round-1, voteType, block1, stateID), // round regression - newVote(privVal.Key.ProTxHash, 0, height-1, round, voteType, block1, stateID), // height regression - newVote(privVal.Key.ProTxHash, 0, height-2, round+4, voteType, block1, stateID), // height reg and diff round - newVote(privVal.Key.ProTxHash, 0, height, round, voteType, block2, stateID), // different block + newVote(privVal.Key.ProTxHash, 0, height, round-1, voteType, block1, nil), // round regression + newVote(privVal.Key.ProTxHash, 0, height-1, round, voteType, block1, nil), // height regression + newVote(privVal.Key.ProTxHash, 0, height-2, round+4, voteType, block1, nil), // height regression and different round + newVote(privVal.Key.ProTxHash, 0, height, round, voteType, block2, nil), // different block } for _, c := range cases { - cpb := c.ToProto() - err = privVal.SignVote(context.Background(), "mychainid", 0, crypto.QuorumHash{}, cpb, stateID, nil) - assert.Error(err, "expected error on signing conflicting vote") + assert.Error(t, privVal.SignVote(ctx, "mychainid", 0, crypto.QuorumHash{}, c.ToProto(), stateID, nil), + "expected error on signing conflicting vote") } // try signing a vote with a different time stamp blockSignature := vote.BlockSignature stateSignature := vote.StateSignature - err = privVal.SignVote(context.Background(), "mychainid", 0, crypto.QuorumHash{}, v, stateID, nil) - assert.NoError(err) - assert.Equal(blockSignature, vote.BlockSignature) - assert.Equal(stateSignature, vote.StateSignature) + err = privVal.SignVote(ctx, "mychainid", 0, quorumHash, v, stateID, nil) + assert.NoError(t, err) + assert.Equal(t, blockSignature, vote.BlockSignature) + assert.Equal(t, stateSignature, vote.StateSignature) } func TestSignProposal(t *testing.T) { - assert := assert.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") - require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") - require.Nil(t, err) + privVal, _, _ := newTestFilePV(t) - privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) - require.NoError(t, err) - - randbytes := tmrand.Bytes(tmhash.Size) - randbytes2 := tmrand.Bytes(tmhash.Size) + randbytes := tmrand.Bytes(crypto.HashSize) + randbytes2 := tmrand.Bytes(crypto.HashSize) block1 := types.BlockID{Hash: randbytes, PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}} @@ -283,18 +260,19 @@ func TestSignProposal(t *testing.T) { PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2}} height, round := int64(10), int32(1) - quorumHash, err := privVal.GetFirstQuorumHash(context.Background()) - assert.NoError(err) + quorumHash, err := privVal.GetFirstQuorumHash(ctx) + assert.NoError(t, err) // sign a proposal for first time proposal := newProposal(height, 1, round, block1) pbp := proposal.ToProto() - _, err = privVal.SignProposal(context.Background(), "mychainid", 0, quorumHash, pbp) - assert.NoError(err, "expected no error signing proposal") + + _, err = privVal.SignProposal(ctx, "mychainid", 0, quorumHash, pbp) + assert.NoError(t, err, "expected no error signing proposal") // try to sign the same proposal again; should be fine - _, err = privVal.SignProposal(context.Background(), "mychainid", 0, quorumHash, pbp) - assert.NoError(err, "expected no error on signing same proposal") + _, err = privVal.SignProposal(ctx, "mychainid", 0, quorumHash, pbp) + assert.NoError(t, err, "expected no error on signing same proposal") // now try some bad Proposals cases := []*types.Proposal{ @@ -302,63 +280,120 @@ func TestSignProposal(t *testing.T) { newProposal(height-1, 1, round, block1), // height regression newProposal(height-2, 1, round+4, block1), // height regression and different round newProposal(height, 1, round, block2), // different block + newProposal(height, 1, round, block1), // different timestamp } for _, c := range cases { - _, err = privVal.SignProposal(context.Background(), "mychainid", 0, crypto.QuorumHash{}, c.ToProto()) - assert.Error(err, "expected error on signing conflicting proposal") + _, err = privVal.SignProposal(ctx, "mychainid", 0, crypto.QuorumHash{}, c.ToProto()) + assert.Error(t, err, "expected error on signing conflicting proposal") } - - // try signing a proposal with a different time stamp - sig := proposal.Signature - proposal.Timestamp = proposal.Timestamp.Add(time.Duration(1000)) - _, err = privVal.SignProposal(context.Background(), "mychainid", 0, crypto.QuorumHash{}, pbp) - assert.NoError(err) - assert.Equal(sig, proposal.Signature) } func TestDifferByTimestamp(t *testing.T) { - tempKeyFile, err := ioutil.TempFile("", "priv_validator_key_") - require.Nil(t, err) - tempStateFile, err := ioutil.TempFile("", "priv_validator_state_") - require.Nil(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tempKeyFile, err := os.CreateTemp(t.TempDir(), "priv_validator_key_") + require.NoError(t, err) + tempStateFile, err := os.CreateTemp(t.TempDir(), "priv_validator_state_") + require.NoError(t, err) privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) require.NoError(t, err) - randbytes := tmrand.Bytes(tmhash.Size) + randbytes := tmrand.Bytes(crypto.HashSize) block1 := types.BlockID{Hash: randbytes, PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}} height, round := int64(10), int32(1) chainID := "mychainid" - quorumHash, err := privVal.GetFirstQuorumHash(context.Background()) + quorumHash, err := privVal.GetFirstQuorumHash(ctx) assert.NoError(t, err) // test proposal { proposal := newProposal(height, 1, round, block1) pb := proposal.ToProto() - _, err := privVal.SignProposal(context.Background(), chainID, 0, quorumHash, pb) + _, err := privVal.SignProposal(ctx, chainID, 0, quorumHash, pb) assert.NoError(t, err, "expected no error signing proposal") - signBytes := types.ProposalBlockSignBytes(chainID, pb) - sig := proposal.Signature - timeStamp := proposal.Timestamp - - // manipulate the timestamp. should get changed back + // manipulate the timestamp pb.Timestamp = pb.Timestamp.Add(time.Millisecond) var emptySig []byte proposal.Signature = emptySig - _, err = privVal.SignProposal(context.Background(), "mychainid", 0, quorumHash, pb) - assert.NoError(t, err, "expected no error on signing same proposal") + _, err = privVal.SignProposal(ctx, "mychainid", 0, quorumHash, pb) + require.Error(t, err) + } +} + +func TestVoteExtensionsAreAlwaysSigned(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const ( + chainID = "mychainid" + quorumType = btcjson.LLMQType_5_60 + ) + + logger := log.NewTestingLogger(t) - assert.Equal(t, timeStamp, pb.Timestamp) - assert.Equal(t, signBytes, types.ProposalBlockSignBytes(chainID, pb)) - assert.Equal(t, sig, proposal.Signature) + privVal, _, _ := newTestFilePV(t) + proTxHash, err := privVal.GetProTxHash(ctx) + require.NoError(t, err) + quorumHash, err := privVal.GetFirstQuorumHash(ctx) + require.NoError(t, err) + pubKey, err := privVal.GetPubKey(ctx, quorumHash) + require.NoError(t, err) + + blockID := types.BlockID{ + Hash: tmrand.Bytes(crypto.HashSize), + PartSetHeader: types.PartSetHeader{Total: 5, Hash: tmrand.Bytes(crypto.HashSize)}, } + + height, round := int64(10), int32(1) + voteType := tmproto.PrecommitType + stateID := types.RandStateID().WithHeight(height - 1) + ext := []byte("extension") + // We initially sign this vote without an extension + vote1 := newVote(proTxHash, 0, height, round, voteType, blockID, ext) + vpb1 := vote1.ToProto() + + err = privVal.SignVote(ctx, chainID, quorumType, quorumHash, vpb1, stateID, logger) + assert.NoError(t, err, "expected no error signing vote") + assert.NotNil(t, vpb1.ExtensionSignature) + + extSignID1 := types.VoteExtensionSignID(chainID, vpb1, quorumType, quorumHash) + assert.True(t, pubKey.VerifySignatureDigest(extSignID1, vpb1.ExtensionSignature)) + + // We duplicate this vote precisely, including its timestamp, but change + // its extension + vote2 := vote1.Copy() + vote2.Extension = []byte("new extension") + vpb2 := vote2.ToProto() + + err = privVal.SignVote(ctx, chainID, quorumType, quorumHash, vpb2, stateID, logger) + assert.NoError(t, err, "expected no error signing same vote with manipulated vote extension") + + // We need to ensure that a valid new extension signature has been created + // that validates against the vote extension sign bytes with the new + // extension, and does not validate against the vote extension sign bytes + // with the old extension. + extSignID2 := types.VoteExtensionSignID(chainID, vpb2, quorumType, quorumHash) + assert.True(t, pubKey.VerifySignatureDigest(extSignID2, vpb2.ExtensionSignature)) + assert.False(t, pubKey.VerifySignatureDigest(extSignID1, vpb2.ExtensionSignature)) + + vpb2.BlockSignature = nil + vpb2.StateSignature = nil + vpb2.ExtensionSignature = nil + + err = privVal.SignVote(ctx, chainID, quorumType, quorumHash, vpb2, stateID, logger) + assert.NoError(t, err, "expected no error signing same vote with manipulated timestamp and vote extension") + + extSignID3 := types.VoteExtensionSignID(chainID, vpb2, quorumType, quorumHash) + assert.True(t, pubKey.VerifySignatureDigest(extSignID3, vpb2.ExtensionSignature)) + assert.False(t, pubKey.VerifySignatureDigest(extSignID1, vpb2.ExtensionSignature)) } func newVote(proTxHash types.ProTxHash, idx int32, height int64, round int32, - typ tmproto.SignedMsgType, blockID types.BlockID, stateID types.StateID) *types.Vote { + typ tmproto.SignedMsgType, blockID types.BlockID, ext []byte) *types.Vote { return &types.Vote{ ValidatorProTxHash: proTxHash, ValidatorIndex: idx, @@ -366,6 +401,7 @@ func newVote(proTxHash types.ProTxHash, idx int32, height int64, round int32, Round: round, Type: typ, BlockID: blockID, + Extension: ext, } } @@ -375,6 +411,18 @@ func newProposal(height int64, coreChainLockedHeight uint32, round int32, blockI CoreChainLockedHeight: coreChainLockedHeight, Round: round, BlockID: blockID, - Timestamp: tmtime.Now(), + Timestamp: time.Now(), } } + +func newTestFilePV(t *testing.T) (*FilePV, string, string) { + tempKeyFile, err := os.CreateTemp(t.TempDir(), "priv_validator_key_") + require.NoError(t, err) + tempStateFile, err := os.CreateTemp(t.TempDir(), "priv_validator_state_") + require.NoError(t, err) + + privVal := GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) + require.NoError(t, err) + + return privVal, tempKeyFile.Name(), tempStateFile.Name() +} diff --git a/privval/grpc/client.go b/privval/grpc/client.go index 46182651ef..11f478ab11 100644 --- a/privval/grpc/client.go +++ b/privval/grpc/client.go @@ -11,6 +11,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/encoding" + tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -153,7 +154,7 @@ func (sc *SignerClient) SignVote( // SignProposal requests a remote signer to sign a proposal func (sc *SignerClient) SignProposal( ctx context.Context, chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, proposal *tmproto.Proposal, -) ([]byte, error) { +) (tmbytes.HexBytes, error) { resp, err := sc.client.SignProposal( ctx, &privvalproto.SignProposalRequest{ChainId: chainID, Proposal: proposal, QuorumType: int32(quorumType), QuorumHash: quorumHash}) diff --git a/privval/grpc/client_test.go b/privval/grpc/client_test.go index c48d8fc394..664d52cb77 100644 --- a/privval/grpc/client_test.go +++ b/privval/grpc/client_test.go @@ -6,16 +6,14 @@ import ( "testing" "time" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/test/bufconn" - "github.com/dashevo/dashd-go/btcjson" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/test/bufconn" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -27,20 +25,16 @@ import ( const chainID = "chain-id" -func dialer(pv types.PrivValidator, logger log.Logger) (*grpc.Server, func(context.Context, string) (net.Conn, error)) { +func dialer(t *testing.T, pv types.PrivValidator, logger log.Logger) (*grpc.Server, func(context.Context, string) (net.Conn, error)) { listener := bufconn.Listen(1024 * 1024) server := grpc.NewServer() - s := tmgrpc.NewSignerServer(chainID, pv, logger) + s := tmgrpc.NewSignerServer(logger, chainID, pv) privvalproto.RegisterPrivValidatorAPIServer(server, s) - go func() { - if err := server.Serve(listener); err != nil { - panic(err) - } - }() + go func() { require.NoError(t, server.Serve(listener)) }() return server, func(context.Context, string) (net.Conn, error) { return listener.Dial() @@ -49,19 +43,19 @@ func dialer(pv types.PrivValidator, logger log.Logger) (*grpc.Server, func(conte func TestSignerClient_GetPubKey(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + mockPV := types.NewMockPV() - logger := log.TestingLogger() - srv, dialer := dialer(mockPV, logger) + logger := log.NewTestingLogger(t) + srv, dialer := dialer(t, mockPV, logger) defer srv.Stop() conn, err := grpc.DialContext(ctx, "", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialer), ) - if err != nil { - panic(err) - } + require.NoError(t, err) defer conn.Close() quorumHash, err := mockPV.GetFirstQuorumHash(ctx) @@ -76,27 +70,26 @@ func TestSignerClient_GetPubKey(t *testing.T) { } func TestSignerClient_SignVote(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ctx := context.Background() quorumHash := crypto.RandQuorumHash() mockPV := types.NewMockPVForQuorum(quorumHash) - logger := log.TestingLogger() - srv, dialer := dialer(mockPV, logger) + logger := log.NewTestingLogger(t) + srv, dialer := dialer(t, mockPV, logger) defer srv.Stop() conn, err := grpc.DialContext(ctx, "", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialer), ) - if err != nil { - panic(err) - } + require.NoError(t, err) defer conn.Close() client, err := tmgrpc.NewSignerClient(conn, chainID, logger) require.NoError(t, err) - hash := tmrand.Bytes(tmhash.Size) + hash := tmrand.Bytes(crypto.HashSize) proTxHash := crypto.RandProTxHash() want := &types.Vote{ @@ -135,28 +128,27 @@ func TestSignerClient_SignVote(t *testing.T) { } func TestSignerClient_SignProposal(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - ctx := context.Background() quorumHash := crypto.RandQuorumHash() mockPV := types.NewMockPVForQuorum(quorumHash) - logger := log.TestingLogger() - srv, dialer := dialer(mockPV, logger) + logger := log.NewTestingLogger(t) + srv, dialer := dialer(t, mockPV, logger) defer srv.Stop() conn, err := grpc.DialContext(ctx, "", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialer), ) - if err != nil { - panic(err) - } + require.NoError(t, err) defer conn.Close() client, err := tmgrpc.NewSignerClient(conn, chainID, logger) require.NoError(t, err) ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) + hash := tmrand.Bytes(crypto.HashSize) have := &types.Proposal{ Type: tmproto.ProposalType, @@ -177,12 +169,12 @@ func TestSignerClient_SignProposal(t *testing.T) { pbHave := have.ToProto() - _, err = client.SignProposal(context.Background(), chainID, btcjson.LLMQType_5_60, quorumHash, pbHave) + _, err = client.SignProposal(ctx, chainID, btcjson.LLMQType_5_60, quorumHash, pbHave) require.NoError(t, err) pbWant := want.ToProto() - _, err = mockPV.SignProposal(context.Background(), chainID, btcjson.LLMQType_5_60, quorumHash, pbWant) + _, err = mockPV.SignProposal(ctx, chainID, btcjson.LLMQType_5_60, quorumHash, pbWant) require.NoError(t, err) assert.Equal(t, pbWant.Signature, pbHave.Signature) diff --git a/privval/grpc/server.go b/privval/grpc/server.go index 12a8a53486..e2b968e576 100644 --- a/privval/grpc/server.go +++ b/privval/grpc/server.go @@ -22,11 +22,9 @@ type SignerServer struct { privVal types.PrivValidator } -func NewSignerServer(chainID string, - privVal types.PrivValidator, log log.Logger) *SignerServer { - +func NewSignerServer(logger log.Logger, chainID string, privVal types.PrivValidator) *SignerServer { return &SignerServer{ - logger: log, + logger: logger, chainID: chainID, privVal: privVal, } @@ -94,8 +92,7 @@ func (ss *SignerServer) GetProTxHash(ctx context.Context, req *privvalproto.ProT // SignVote receives a vote sign requests, attempts to sign it // returns SignedVoteResponse on success and error on failure -func (ss *SignerServer) SignVote(ctx context.Context, req *privvalproto.SignVoteRequest) ( - *privvalproto.SignedVoteResponse, error) { +func (ss *SignerServer) SignVote(ctx context.Context, req *privvalproto.SignVoteRequest) (*privvalproto.SignedVoteResponse, error) { vote := req.Vote stateID, err := types.StateIDFromProto(req.StateId) @@ -114,8 +111,7 @@ func (ss *SignerServer) SignVote(ctx context.Context, req *privvalproto.SignVote // SignProposal receives a proposal sign requests, attempts to sign it // returns SignedProposalResponse on success and error on failure -func (ss *SignerServer) SignProposal(ctx context.Context, req *privvalproto.SignProposalRequest) ( - *privvalproto.SignedProposalResponse, error) { +func (ss *SignerServer) SignProposal(ctx context.Context, req *privvalproto.SignProposalRequest) (*privvalproto.SignedProposalResponse, error) { proposal := req.Proposal _, err := ss.privVal.SignProposal(ctx, req.ChainId, btcjson.LLMQType(req.QuorumType), req.QuorumHash, proposal) diff --git a/privval/grpc/server_test.go b/privval/grpc/server_test.go index 8c0b42a26d..e4ad908b65 100644 --- a/privval/grpc/server_test.go +++ b/privval/grpc/server_test.go @@ -10,7 +10,6 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -35,17 +34,20 @@ func TestGetPubKey(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - ctx := context.Background() - s := tmgrpc.NewSignerServer(ChainID, tc.pv, log.TestingLogger()) - quorumHash, _ := tc.pv.GetFirstQuorumHash(context.Background()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewTestingLogger(t) + s := tmgrpc.NewSignerServer(logger, ChainID, tc.pv) + quorumHash, _ := tc.pv.GetFirstQuorumHash(ctx) req := &privvalproto.PubKeyRequest{ChainId: ChainID, QuorumHash: quorumHash} - resp, err := s.GetPubKey(context.Background(), req) + resp, err := s.GetPubKey(ctx, req) if tc.err { require.Error(t, err) } else { + require.NoError(t, err) quorumHash, err := tc.pv.GetFirstQuorumHash(ctx) require.NoError(t, err) - pk, err := tc.pv.GetPubKey(context.Background(), quorumHash) + pk, err := tc.pv.GetPubKey(ctx, quorumHash) require.NoError(t, err) assert.Equal(t, resp.PubKey.GetBls12381(), pk.Bytes()) } @@ -56,7 +58,7 @@ func TestGetPubKey(t *testing.T) { func TestSignVote(t *testing.T) { - hash := tmrand.Bytes(tmhash.Size) + hash := tmrand.Bytes(crypto.HashSize) proTxHash := crypto.RandProTxHash() testCases := []struct { @@ -104,7 +106,11 @@ func TestSignVote(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - s := tmgrpc.NewSignerServer(ChainID, tc.pv, log.TestingLogger()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewTestingLogger(t) + + s := tmgrpc.NewSignerServer(logger, ChainID, tc.pv) quorumHash, _ := tc.pv.GetFirstQuorumHash(context.Background()) req := &privvalproto.SignVoteRequest{ @@ -117,13 +123,13 @@ func TestSignVote(t *testing.T) { LastAppHash: factory.RandomHash(), }, } - resp, err := s.SignVote(context.Background(), req) + resp, err := s.SignVote(ctx, req) if tc.err { require.Error(t, err) } else { pbVote := tc.want.ToProto() - require.NoError(t, tc.pv.SignVote(context.Background(), ChainID, btcjson.LLMQType_5_60, quorumHash, - pbVote, types.StateID{}, log.TestingLogger())) + require.NoError(t, tc.pv.SignVote(ctx, ChainID, btcjson.LLMQType_5_60, quorumHash, + pbVote, types.StateID{}, log.NewTestingLogger(t))) assert.Equal(t, pbVote.BlockSignature, resp.Vote.BlockSignature) } @@ -134,7 +140,7 @@ func TestSignVote(t *testing.T) { func TestSignProposal(t *testing.T) { ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) + hash := tmrand.Bytes(crypto.HashSize) quorumHash := crypto.RandQuorumHash() testCases := []struct { @@ -182,8 +188,11 @@ func TestSignProposal(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - ctx := context.Background() - s := tmgrpc.NewSignerServer(ChainID, tc.pv, log.TestingLogger()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewTestingLogger(t) + + s := tmgrpc.NewSignerServer(logger, ChainID, tc.pv) req := &privvalproto.SignProposalRequest{ Proposal: tc.have.ToProto(), diff --git a/privval/grpc/util.go b/privval/grpc/util.go index a17b766768..a3ea6c532d 100644 --- a/privval/grpc/util.go +++ b/privval/grpc/util.go @@ -4,20 +4,20 @@ import ( "context" "crypto/tls" "crypto/x509" - "io/ioutil" "os" "time" grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/libs/log" - tmnet "github.com/tendermint/tendermint/libs/net" - grpc "google.golang.org/grpc" + "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/keepalive" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" + tmnet "github.com/tendermint/tendermint/libs/net" ) // DefaultDialOptions constructs a list of grpc dial options @@ -66,7 +66,7 @@ func GenerateTLS(certPath, keyPath, ca string, log log.Logger) grpc.DialOption { } certPool := x509.NewCertPool() - bs, err := ioutil.ReadFile(ca) + bs, err := os.ReadFile(ca) if err != nil { log.Error("failed to read ca cert:", "error", err) os.Exit(1) @@ -89,6 +89,7 @@ func GenerateTLS(certPath, keyPath, ca string, log log.Logger) grpc.DialOption { // DialRemoteSigner is a generalized function to dial the gRPC server. func DialRemoteSigner( + ctx context.Context, cfg *config.PrivValidatorConfig, chainID string, logger log.Logger, @@ -111,7 +112,6 @@ func DialRemoteSigner( dialOptions = append(dialOptions, transportSecurity) - ctx := context.Background() _, address := tmnet.ProtocolAndAddress(cfg.ListenAddr) conn, err := grpc.DialContext(ctx, address, dialOptions...) if err != nil { diff --git a/privval/msgs_test.go b/privval/msgs_test.go index 849b627ced..ab0017808d 100644 --- a/privval/msgs_test.go +++ b/privval/msgs_test.go @@ -5,15 +5,13 @@ import ( "testing" "time" - "github.com/tendermint/tendermint/crypto/bls12381" - crypto2 "github.com/tendermint/tendermint/proto/tendermint/crypto" - "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/bls12381" "github.com/tendermint/tendermint/crypto/encoding" - "github.com/tendermint/tendermint/crypto/tmhash" + cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" privproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -23,18 +21,19 @@ var stamp = time.Date(2019, 10, 13, 16, 14, 44, 0, time.UTC) func exampleVote() *types.Vote { return &types.Vote{ - Type: tmproto.SignedMsgType(1), + Type: tmproto.PrecommitType, Height: 3, Round: 2, BlockID: types.BlockID{ - Hash: tmhash.Sum([]byte("blockID_hash")), + Hash: crypto.Checksum([]byte("blockID_hash")), PartSetHeader: types.PartSetHeader{ Total: 1000000, - Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + Hash: crypto.Checksum([]byte("blockID_part_set_header_hash")), }, }, ValidatorProTxHash: crypto.ProTxHashFromSeedBytes([]byte("validator_pro_tx_hash")), ValidatorIndex: 56789, + Extension: []byte("extension"), } } @@ -48,10 +47,10 @@ func exampleProposal() *types.Proposal { POLRound: 2, Signature: []byte("it's a signature"), BlockID: types.BlockID{ - Hash: tmhash.Sum([]byte("blockID_hash")), + Hash: crypto.Checksum([]byte("blockID_hash")), PartSetHeader: types.PartSetHeader{ Total: 1000000, - Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + Hash: crypto.Checksum([]byte("blockID_part_set_header_hash")), }, }, } @@ -86,9 +85,9 @@ func TestPrivvalVectors(t *testing.T) { {"ping response", &privproto.PingResponse{}, "4200"}, {"pubKey request", &privproto.PubKeyRequest{}, "0a00"}, {"pubKey response", &privproto.PubKeyResponse{PubKey: ppk, Error: nil}, "12340a32223011c7f5ac5a6d01fd9dde3840f7ebbb6a20deed6fba72a347dd66da2f8c9c977c6604b2cd2e0148206c2add9a8f5ddd74"}, - {"pubKey response with error", &privproto.PubKeyResponse{PubKey: crypto2.PublicKey{}, Error: remoteError}, "12140a0012100801120c697427732061206572726f72"}, - {"Vote Request", &privproto.SignVoteRequest{Vote: votepb, StateId: &stateIDpb}, "1aa0010a78080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a3220959a8f5ef2be68d0ed3a07ed8cff85991ee7995c2ac17030f742c135f9729fbe38d5bb032a240a2031323334353637383930313233343536373839303132333435363738393031321002"}, - {"Vote Response", &privproto.SignedVoteResponse{Vote: *votepb, Error: nil}, "227a0a78080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a3220959a8f5ef2be68d0ed3a07ed8cff85991ee7995c2ac17030f742c135f9729fbe38d5bb03"}, + {"pubKey response with error", &privproto.PubKeyResponse{PubKey: cryptoproto.PublicKey{}, Error: remoteError}, "12140a0012100801120c697427732061206572726f72"}, + {"Vote Request", &privproto.SignVoteRequest{Vote: votepb, StateId: &stateIDpb}, "1aac010a8301080210031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a3220959a8f5ef2be68d0ed3a07ed8cff85991ee7995c2ac17030f742c135f9729fbe38d5bb035a09657874656e73696f6e2a240a2031323334353637383930313233343536373839303132333435363738393031321002"}, + {"Vote Response", &privproto.SignedVoteResponse{Vote: *votepb, Error: nil}, "2286010a8301080210031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a3220959a8f5ef2be68d0ed3a07ed8cff85991ee7995c2ac17030f742c135f9729fbe38d5bb035a09657874656e73696f6e"}, {"Vote Response with error", &privproto.SignedVoteResponse{Vote: tmproto.Vote{}, Error: remoteError}, "22180a042202120012100801120c697427732061206572726f72"}, {"Proposal Request", &privproto.SignProposalRequest{Proposal: proposalpb}, "2a700a6e08011003180220022a4a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a320608f49a8ded053a10697427732061207369676e6174757265"}, {"Proposal Response", &privproto.SignedProposalResponse{Proposal: *proposalpb, Error: nil}, "32700a6e08011003180220022a4a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a320608f49a8ded053a10697427732061207369676e6174757265"}, @@ -96,12 +95,12 @@ func TestPrivvalVectors(t *testing.T) { } for _, tc := range testCases { - tc := tc - - pm := mustWrapMsg(tc.msg) - bz, err := pm.Marshal() - require.NoError(t, err, tc.testName) + t.Run(tc.testName, func(t *testing.T) { + pm := mustWrapMsg(tc.msg) + bz, err := pm.Marshal() + require.NoError(t, err, tc.testName) - require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) + require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) + }) } } diff --git a/privval/retry_signer_client.go b/privval/retry_signer_client.go index 8fb3946bcb..282f08b6b5 100644 --- a/privval/retry_signer_client.go +++ b/privval/retry_signer_client.go @@ -6,11 +6,11 @@ import ( "fmt" "time" - "github.com/tendermint/tendermint/libs/log" - "github.com/dashevo/dashd-go/btcjson" "github.com/tendermint/tendermint/crypto" + tmbytes "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) @@ -39,15 +39,15 @@ func (sc *RetrySignerClient) IsConnected() bool { return sc.next.IsConnected() } -func (sc *RetrySignerClient) WaitForConnection(maxWait time.Duration) error { - return sc.next.WaitForConnection(maxWait) +func (sc *RetrySignerClient) WaitForConnection(ctx context.Context, maxWait time.Duration) error { + return sc.next.WaitForConnection(ctx, maxWait) } //-------------------------------------------------------- // Implement PrivValidator -func (sc *RetrySignerClient) Ping() error { - return sc.next.Ping() +func (sc *RetrySignerClient) Ping(ctx context.Context) error { + return sc.next.Ping(ctx) } func (sc *RetrySignerClient) ExtractIntoValidator(ctx context.Context, quorumHash crypto.QuorumHash) *types.Validator { @@ -157,7 +157,7 @@ func (sc *RetrySignerClient) SignVote( func (sc *RetrySignerClient) SignProposal( ctx context.Context, chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, proposal *tmproto.Proposal, -) ([]byte, error) { +) (tmbytes.HexBytes, error) { var signID []byte var err error for i := 0; i < sc.retries || sc.retries == 0; i++ { diff --git a/privval/rpc_signer_connection.go b/privval/rpc_signer_connection.go deleted file mode 100644 index 8d5240d4ff..0000000000 --- a/privval/rpc_signer_connection.go +++ /dev/null @@ -1 +0,0 @@ -package privval diff --git a/privval/secret_connection.go b/privval/secret_connection.go index ffa5d36edf..1e179cf41f 100644 --- a/privval/secret_connection.go +++ b/privval/secret_connection.go @@ -11,6 +11,7 @@ import ( "io" "math" "net" + "sync" "time" gogotypes "github.com/gogo/protobuf/types" @@ -24,9 +25,8 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/libs/async" "github.com/tendermint/tendermint/internal/libs/protoio" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/async" tmprivval "github.com/tendermint/tendermint/proto/tendermint/privval" ) @@ -80,11 +80,11 @@ type SecretConnection struct { // are independent, so we can use two mtxs. // All .Read are covered by recvMtx, // all .Write are covered by sendMtx. - recvMtx tmsync.Mutex + recvMtx sync.Mutex recvBuffer []byte recvNonce *[aeadNonceSize]byte - sendMtx tmsync.Mutex + sendMtx sync.Mutex sendNonce *[aeadNonceSize]byte } @@ -99,7 +99,10 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (* ) // Generate ephemeral keys for perfect forward secrecy. - locEphPub, locEphPriv := genEphKeys() + locEphPub, locEphPriv, err := genEphKeys() + if err != nil { + return nil, err + } // Write local ephemeral pubkey and receive one too. // NOTE: every 32-byte string is accepted as a Curve25519 public key (see @@ -132,7 +135,10 @@ func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (* // Generate the secret used for receiving, sending, challenge via HKDF-SHA2 // on the transcript state (which itself also uses HKDF-SHA2 to derive a key // from the dhSecret). - recvSecret, sendSecret := deriveSecrets(dhSecret, locIsLeast) + recvSecret, sendSecret, err := deriveSecrets(dhSecret, locIsLeast) + if err != nil { + return nil, err + } const challengeSize = 32 var challenge [challengeSize]byte @@ -214,7 +220,10 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) { // encrypt the frame sc.sendAead.Seal(sealedFrame[:0], sc.sendNonce[:], frame, nil) - incrNonce(sc.sendNonce) + if err := incrNonce(sc.sendNonce); err != nil { + return err + } + // end encryption _, err = sc.conn.Write(sealedFrame) @@ -258,7 +267,9 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) { if err != nil { return n, fmt.Errorf("failed to decrypt SecretConnection: %w", err) } - incrNonce(sc.recvNonce) + if err = incrNonce(sc.recvNonce); err != nil { + return + } // end decryption // copy checkLength worth into data, @@ -288,14 +299,13 @@ func (sc *SecretConnection) SetWriteDeadline(t time.Time) error { return sc.conn.(net.Conn).SetWriteDeadline(t) } -func genEphKeys() (ephPub, ephPriv *[32]byte) { - var err error +func genEphKeys() (ephPub, ephPriv *[32]byte, err error) { // TODO: Probably not a problem but ask Tony: different from the rust implementation (uses x25519-dalek), // we do not "clamp" the private key scalar: // see: https://github.com/dalek-cryptography/x25519-dalek/blob/34676d336049df2bba763cc076a75e47ae1f170f/src/x25519.rs#L56-L74 ephPub, ephPriv, err = box.GenerateKey(crand.Reader) if err != nil { - panic("Could not generate ephemeral key-pair") + return } return } @@ -339,14 +349,14 @@ func shareEphPubKey(conn io.ReadWriter, locEphPub *[32]byte) (remEphPub *[32]byt func deriveSecrets( dhSecret *[32]byte, locIsLeast bool, -) (recvSecret, sendSecret *[aeadKeySize]byte) { +) (recvSecret, sendSecret *[aeadKeySize]byte, err error) { hash := sha256.New hkdf := hkdf.New(hash, dhSecret[:], nil, secretConnKeyAndChallengeGen) // get enough data for 2 aead keys, and a 32 byte challenge res := new([2*aeadKeySize + 32]byte) - _, err := io.ReadFull(hkdf, res[:]) + _, err = io.ReadFull(hkdf, res[:]) if err != nil { - panic(err) + return nil, nil, err } recvSecret = new([aeadKeySize]byte) @@ -454,13 +464,14 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte // Due to chacha20poly1305 expecting a 12 byte nonce we do not use the first four // bytes. We only increment a 64 bit unsigned int in the remaining 8 bytes // (little-endian in nonce[4:]). -func incrNonce(nonce *[aeadNonceSize]byte) { +func incrNonce(nonce *[aeadNonceSize]byte) error { counter := binary.LittleEndian.Uint64(nonce[4:]) if counter == math.MaxUint64 { // Terminates the session and makes sure the nonce would not re-used. // See https://github.com/tendermint/tendermint/issues/3531 - panic("can't increase nonce without overflow") + return errors.New("can't increase nonce without overflow") } counter++ binary.LittleEndian.PutUint64(nonce[4:], counter) + return nil } diff --git a/privval/signer_client.go b/privval/signer_client.go index ec241eb2a8..3da58e888e 100644 --- a/privval/signer_client.go +++ b/privval/signer_client.go @@ -6,12 +6,12 @@ import ( "fmt" "time" - "github.com/tendermint/tendermint/libs/log" - "github.com/dashevo/dashd-go/btcjson" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/encoding" + tmbytes "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/libs/log" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -20,6 +20,7 @@ import ( // SignerClient implements PrivValidator. // Handles remote validator connections that provide signing services type SignerClient struct { + logger log.Logger endpoint *SignerListenerEndpoint chainID string } @@ -28,19 +29,28 @@ var _ types.PrivValidator = (*SignerClient)(nil) // NewSignerClient returns an instance of SignerClient. // it will start the endpoint (if not already started) -func NewSignerClient(endpoint *SignerListenerEndpoint, chainID string) (*SignerClient, error) { +func NewSignerClient(ctx context.Context, endpoint *SignerListenerEndpoint, chainID string) (*SignerClient, error) { if !endpoint.IsRunning() { - if err := endpoint.Start(); err != nil { + if err := endpoint.Start(ctx); err != nil { return nil, fmt.Errorf("failed to start listener endpoint: %w", err) } } - return &SignerClient{endpoint: endpoint, chainID: chainID}, nil + return &SignerClient{ + logger: endpoint.logger, + endpoint: endpoint, + chainID: chainID, + }, nil } // Close closes the underlying connection func (sc *SignerClient) Close() error { - return sc.endpoint.Close() + sc.endpoint.Stop() + err := sc.endpoint.Close() + if err != nil { + return err + } + return nil } // IsConnected indicates with the signer is connected to a remote signing service @@ -49,18 +59,18 @@ func (sc *SignerClient) IsConnected() bool { } // WaitForConnection waits maxWait for a connection or returns a timeout error -func (sc *SignerClient) WaitForConnection(maxWait time.Duration) error { - return sc.endpoint.WaitForConnection(maxWait) +func (sc *SignerClient) WaitForConnection(ctx context.Context, maxWait time.Duration) error { + return sc.endpoint.WaitForConnection(ctx, maxWait) } //-------------------------------------------------------- // Implement PrivValidator // Ping sends a ping request to the remote signer -func (sc *SignerClient) Ping() error { - response, err := sc.endpoint.SendRequest(mustWrapMsg(&privvalproto.PingRequest{})) +func (sc *SignerClient) Ping(ctx context.Context) error { + response, err := sc.endpoint.SendRequest(ctx, mustWrapMsg(&privvalproto.PingRequest{})) if err != nil { - sc.endpoint.Logger.Error("SignerClient::Ping", "err", err) + sc.logger.Error("SignerClient::Ping", "err", err) return nil } @@ -88,7 +98,7 @@ func (sc *SignerClient) ExtractIntoValidator(ctx context.Context, quorumHash cry // GetPubKey retrieves a public key from a remote signer // returns an error if client is not able to provide the key func (sc *SignerClient) GetPubKey(ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PubKey, error) { - response, err := sc.endpoint.SendRequest( + response, err := sc.endpoint.SendRequest(ctx, mustWrapMsg(&privvalproto.PubKeyRequest{ChainId: sc.chainID, QuorumHash: quorumHash}), ) if err != nil { @@ -112,7 +122,7 @@ func (sc *SignerClient) GetPubKey(ctx context.Context, quorumHash crypto.QuorumH } func (sc *SignerClient) GetProTxHash(ctx context.Context) (crypto.ProTxHash, error) { - response, err := sc.endpoint.SendRequest(mustWrapMsg(&privvalproto.ProTxHashRequest{ChainId: sc.chainID})) + response, err := sc.endpoint.SendRequest(ctx, mustWrapMsg(&privvalproto.ProTxHashRequest{ChainId: sc.chainID})) if err != nil { return nil, fmt.Errorf("send: %w", err) } @@ -141,7 +151,7 @@ func (sc *SignerClient) GetThresholdPublicKey(ctx context.Context, quorumHash cr return nil, fmt.Errorf("quorum hash must be 32 bytes long if requesting public key from dash core") } - response, err := sc.endpoint.SendRequest( + response, err := sc.endpoint.SendRequest(ctx, mustWrapMsg(&privvalproto.ThresholdPubKeyRequest{ChainId: sc.chainID, QuorumHash: quorumHash}), ) if err != nil { @@ -169,8 +179,14 @@ func (sc *SignerClient) GetHeight(ctx context.Context, quorumHash crypto.QuorumH // SignVote requests a remote signer to sign a vote func (sc *SignerClient) SignVote( - ctx context.Context, chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, - vote *tmproto.Vote, stateID types.StateID, logger log.Logger) error { + ctx context.Context, + chainID string, + quorumType btcjson.LLMQType, + quorumHash crypto.QuorumHash, + vote *tmproto.Vote, + stateID types.StateID, + logger log.Logger, +) error { // fmt.Printf("--> sending request to sign vote (%d/%d) %v - %v", vote.Height, vote.Round, vote.BlockID, vote) stateIDProto := stateID.ToProto() @@ -182,7 +198,7 @@ func (sc *SignerClient) SignVote( StateId: &stateIDProto, } - response, err := sc.endpoint.SendRequest(mustWrapMsg(&voteRequest)) + response, err := sc.endpoint.SendRequest(ctx, mustWrapMsg(&voteRequest)) if err != nil { return err } @@ -202,9 +218,13 @@ func (sc *SignerClient) SignVote( // SignProposal requests a remote signer to sign a proposal func (sc *SignerClient) SignProposal( - ctx context.Context, chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, proposal *tmproto.Proposal, -) ([]byte, error) { - response, err := sc.endpoint.SendRequest(mustWrapMsg( + ctx context.Context, + chainID string, + quorumType btcjson.LLMQType, + quorumHash crypto.QuorumHash, + proposal *tmproto.Proposal, +) (tmbytes.HexBytes, error) { + response, err := sc.endpoint.SendRequest(ctx, mustWrapMsg( &privvalproto.SignProposalRequest{Proposal: proposal, ChainId: chainID, QuorumType: int32(quorumType), QuorumHash: quorumHash}, )) diff --git a/privval/signer_client_test.go b/privval/signer_client_test.go index 52cb4be677..3355418765 100644 --- a/privval/signer_client_test.go +++ b/privval/signer_client_test.go @@ -7,12 +7,12 @@ import ( "time" "github.com/dashevo/dashd-go/btcjson" - + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" @@ -27,393 +27,396 @@ type signerTestCase struct { mockPV types.PrivValidator signerClient *SignerClient signerServer *SignerServer + name string + closer context.CancelFunc } -func getSignerTestCases(t *testing.T) []signerTestCase { +func getSignerTestCases(ctx context.Context, t *testing.T, logger log.Logger) []signerTestCase { + t.Helper() + testCases := make([]signerTestCase, 0) // Get test cases for each possible dialer (DialTCP / DialUnix / etc) - for _, dtc := range getDialerTestCases(t) { + for idx, dtc := range getDialerTestCases(t) { chainID := tmrand.Str(12) quorumHash := crypto.RandQuorumHash() mockPV := types.NewMockPVForQuorum(quorumHash) + cctx, ccancel := context.WithCancel(ctx) // get a pair of signer listener, signer dialer endpoints - sl, sd := getMockEndpoints(t, dtc.addr, dtc.dialer) - sc, err := NewSignerClient(sl, chainID) + sl, sd := getMockEndpoints(cctx, t, logger, dtc.addr, dtc.dialer) + sc, err := NewSignerClient(cctx, sl, chainID) require.NoError(t, err) ss := NewSignerServer(sd, chainID, mockPV) - err = ss.Start() - require.NoError(t, err) + require.NoError(t, ss.Start(cctx)) - tc := signerTestCase{ + testCases = append(testCases, signerTestCase{ + name: fmt.Sprintf("Case%d%T_%s", idx, dtc.dialer, chainID), + closer: ccancel, chainID: chainID, quorumType: btcjson.LLMQType_5_60, quorumHash: quorumHash, mockPV: mockPV, signerClient: sc, signerServer: ss, - } - - testCases = append(testCases, tc) + }) + t.Cleanup(ss.Wait) + t.Cleanup(sc.endpoint.Wait) } return testCases } func TestSignerClose(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - err := tc.signerClient.Close() - assert.NoError(t, err) + t.Cleanup(leaktest.Check(t)) - err = tc.signerServer.Stop() - assert.NoError(t, err) + bctx, bcancel := context.WithCancel(context.Background()) + defer bcancel() + + logger := log.NewNopLogger() + + for _, tc := range getSignerTestCases(bctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + defer leaktest.Check(t) + defer func() { + tc.closer() + tc.signerClient.endpoint.Wait() + tc.signerServer.Wait() + }() + + assert.NoError(t, tc.signerClient.Close()) + tc.signerServer.Stop() + }) } } func TestSignerPing(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() - err := tc.signerClient.Ping() + for _, tc := range getSignerTestCases(ctx, t, logger) { + err := tc.signerClient.Ping(ctx) assert.NoError(t, err) } } func TestSignerGetPubKey(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) + t.Cleanup(leaktest.Check(t)) - pubKey, err := tc.signerClient.GetPubKey(context.Background(), tc.quorumHash) - require.NoError(t, err) - expectedPubKey, err := tc.mockPV.GetPubKey(context.Background(), tc.quorumHash) - require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - assert.Equal(t, expectedPubKey, pubKey) + logger := log.NewNopLogger() - pubKey, err = tc.signerClient.GetPubKey(context.Background(), tc.quorumHash) - require.NoError(t, err) - expectedpk, err := tc.mockPV.GetPubKey(context.Background(), tc.quorumHash) - require.NoError(t, err) - expectedAddr := expectedpk.Address() + for _, tc := range getSignerTestCases(ctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + + pubKey, err := tc.signerClient.GetPubKey(ctx, tc.quorumHash) + require.NoError(t, err) + expectedPubKey, err := tc.mockPV.GetPubKey(ctx, tc.quorumHash) + require.NoError(t, err) - assert.Equal(t, expectedAddr, pubKey.Address()) + assert.Equal(t, expectedPubKey, pubKey) + + pubKey, err = tc.signerClient.GetPubKey(ctx, tc.quorumHash) + require.NoError(t, err) + expectedpk, err := tc.mockPV.GetPubKey(ctx, tc.quorumHash) + require.NoError(t, err) + expectedAddr := expectedpk.Address() + + assert.Equal(t, expectedAddr, pubKey.Address()) + }) } } func TestSignerProposal(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - ts := time.Now() - hash := tmrand.Bytes(tmhash.Size) - have := &types.Proposal{ - Type: tmproto.ProposalType, - Height: 1, - CoreChainLockedHeight: 1, - Round: 2, - POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - } - want := &types.Proposal{ - Type: tmproto.ProposalType, - Height: 1, - CoreChainLockedHeight: 1, - Round: 2, - POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Timestamp: ts, - } - - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + for _, tc := range getSignerTestCases(ctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + + ts := time.Now() + hash := tmrand.Bytes(crypto.HashSize) + have := &types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + CoreChainLockedHeight: 1, + Round: 2, + POLRound: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + want := &types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + CoreChainLockedHeight: 1, + Round: 2, + POLRound: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Timestamp: ts, } - }) - - _, err := tc.mockPV.SignProposal(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, want.ToProto()) - require.NoError(t, err) + _, err := tc.mockPV.SignProposal(ctx, tc.chainID, tc.quorumType, tc.quorumHash, want.ToProto()) + require.NoError(t, err) + _, err = tc.signerClient.SignProposal(ctx, tc.chainID, tc.quorumType, tc.quorumHash, have.ToProto()) + require.NoError(t, err) - _, err = tc.signerClient.SignProposal(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, have.ToProto()) - require.NoError(t, err) + assert.Equal(t, want.Signature, have.Signature) + }) - assert.Equal(t, want.Signature, have.Signature) } } func TestSignerVote(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - hash := tmrand.Bytes(tmhash.Size) - valProTxHash := tmrand.Bytes(crypto.DefaultHashSize) - want := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - ValidatorProTxHash: valProTxHash, - ValidatorIndex: 1, - } - - have := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - ValidatorProTxHash: valProTxHash, - ValidatorIndex: 1, - } - - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + for _, tc := range getSignerTestCases(ctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + + hash := tmrand.Bytes(crypto.HashSize) + valProTxHash := tmrand.Bytes(crypto.DefaultHashSize) + want := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + ValidatorProTxHash: valProTxHash, + ValidatorIndex: 1, } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + + have := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + ValidatorProTxHash: valProTxHash, + ValidatorIndex: 1, } - }) - stateID := types.RandStateID().WithHeight(want.Height - 1) + stateID := types.RandStateID().WithHeight(want.Height - 1) + + require.NoError(t, tc.mockPV.SignVote(ctx, tc.chainID, tc.quorumType, tc.quorumHash, want.ToProto(), stateID, nil)) + require.NoError(t, tc.signerClient.SignVote(ctx, tc.chainID, tc.quorumType, tc.quorumHash, have.ToProto(), stateID, nil)) - require.NoError(t, tc.mockPV.SignVote(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, want.ToProto(), - stateID, nil)) - require.NoError(t, tc.signerClient.SignVote(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, have.ToProto(), - stateID, nil)) + assert.Equal(t, want.BlockSignature, have.BlockSignature) + assert.Equal(t, want.StateSignature, have.StateSignature) - assert.Equal(t, want.BlockSignature, have.BlockSignature) - assert.Equal(t, want.StateSignature, have.StateSignature) + }) } } func TestSignerVoteResetDeadline(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - hash := tmrand.Bytes(tmhash.Size) - valProTxHash := tmrand.Bytes(crypto.DefaultHashSize) - want := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - ValidatorProTxHash: valProTxHash, - ValidatorIndex: 1, - } - - have := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - ValidatorProTxHash: valProTxHash, - ValidatorIndex: 1, - } - - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + for _, tc := range getSignerTestCases(ctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + hash := tmrand.Bytes(crypto.HashSize) + valProTxHash := tmrand.Bytes(crypto.DefaultHashSize) + want := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + ValidatorProTxHash: valProTxHash, + ValidatorIndex: 1, } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + + have := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + ValidatorProTxHash: valProTxHash, + ValidatorIndex: 1, } - }) - time.Sleep(testTimeoutReadWrite2o3) + time.Sleep(testTimeoutReadWrite2o3) - stateID := types.RandStateID().WithHeight(want.Height - 1) + stateID := types.RandStateID().WithHeight(want.Height - 1) - require.NoError(t, - tc.mockPV.SignVote(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, want.ToProto(), stateID, nil)) - require.NoError(t, - tc.signerClient.SignVote(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, have.ToProto(), stateID, nil)) - assert.Equal(t, want.BlockSignature, have.BlockSignature) - assert.Equal(t, want.StateSignature, have.StateSignature) + require.NoError(t, + tc.mockPV.SignVote(ctx, tc.chainID, tc.quorumType, tc.quorumHash, want.ToProto(), stateID, nil)) + require.NoError(t, + tc.signerClient.SignVote(ctx, tc.chainID, tc.quorumType, tc.quorumHash, have.ToProto(), stateID, nil)) + assert.Equal(t, want.BlockSignature, have.BlockSignature) + assert.Equal(t, want.StateSignature, have.StateSignature) - // TODO(jleni): Clarify what is actually being tested + // TODO(jleni): Clarify what is actually being tested - // This would exceed the deadline if it was not extended by the previous message - time.Sleep(testTimeoutReadWrite2o3) + // This would exceed the deadline if it was not extended by the previous message + time.Sleep(testTimeoutReadWrite2o3) - require.NoError(t, - tc.mockPV.SignVote(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, want.ToProto(), stateID, nil)) - require.NoError(t, - tc.signerClient.SignVote(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, have.ToProto(), stateID, nil)) - assert.Equal(t, want.BlockSignature, have.BlockSignature) - assert.Equal(t, want.StateSignature, have.StateSignature) + require.NoError(t, + tc.mockPV.SignVote(ctx, tc.chainID, tc.quorumType, tc.quorumHash, want.ToProto(), stateID, nil)) + require.NoError(t, + tc.signerClient.SignVote(ctx, tc.chainID, tc.quorumType, tc.quorumHash, have.ToProto(), stateID, nil)) + assert.Equal(t, want.BlockSignature, have.BlockSignature) + assert.Equal(t, want.StateSignature, have.StateSignature) + }) } } func TestSignerVoteKeepAlive(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - hash := tmrand.Bytes(tmhash.Size) - valProTxHash := tmrand.Bytes(crypto.DefaultHashSize) - want := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - ValidatorProTxHash: valProTxHash, - ValidatorIndex: 1, - } - - have := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - ValidatorProTxHash: valProTxHash, - ValidatorIndex: 1, - } - - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + for _, tc := range getSignerTestCases(ctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + + hash := tmrand.Bytes(crypto.HashSize) + valProTxHash := tmrand.Bytes(crypto.DefaultHashSize) + want := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + ValidatorProTxHash: valProTxHash, + ValidatorIndex: 1, } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + + have := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + ValidatorProTxHash: valProTxHash, + ValidatorIndex: 1, } - }) - stateID := types.RandStateID().WithHeight(want.Height - 1) + stateID := types.RandStateID().WithHeight(want.Height - 1) - // Check that even if the client does not request a - // signature for a long time. The service is still available + // Check that even if the client does not request a + // signature for a long time. The service is still available - // in this particular case, we use the dialer logger to ensure that - // test messages are properly interleaved in the test logs - tc.signerServer.Logger.Debug("TEST: Forced Wait -------------------------------------------------") - time.Sleep(testTimeoutReadWrite * 3) - tc.signerServer.Logger.Debug("TEST: Forced Wait DONE---------------------------------------------") + // in this particular case, we use the dialer logger to ensure that + // test messages are properly interleaved in the test logs + time.Sleep(testTimeoutReadWrite * 3) - require.NoError(t, - tc.mockPV.SignVote(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, want.ToProto(), stateID, nil)) - require.NoError(t, - tc.signerClient.SignVote(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, have.ToProto(), stateID, nil)) + require.NoError(t, + tc.mockPV.SignVote(ctx, tc.chainID, tc.quorumType, tc.quorumHash, want.ToProto(), stateID, nil)) + require.NoError(t, + tc.signerClient.SignVote(ctx, tc.chainID, tc.quorumType, tc.quorumHash, have.ToProto(), stateID, nil)) - assert.Equal(t, want.BlockSignature, have.BlockSignature) - assert.Equal(t, want.StateSignature, have.StateSignature) + assert.Equal(t, want.BlockSignature, have.BlockSignature) + assert.Equal(t, want.StateSignature, have.StateSignature) + }) } } func TestSignerSignProposalErrors(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - // Replace service with a mock that always fails - tc.signerServer.privVal = types.NewErroringMockPV() - tc.mockPV = types.NewErroringMockPV() - - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + for _, tc := range getSignerTestCases(ctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + // Replace service with a mock that always fails + tc.signerServer.privVal = types.NewErroringMockPV() + tc.mockPV = types.NewErroringMockPV() + + hash := tmrand.Bytes(crypto.HashSize) + proposal := &types.Proposal{ + Type: tmproto.ProposalType, + Height: 1, + CoreChainLockedHeight: 1, + Round: 2, + POLRound: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + Signature: []byte("signature"), } - }) - hash := tmrand.Bytes(tmhash.Size) - proposal := &types.Proposal{ - Type: tmproto.ProposalType, - Height: 1, - CoreChainLockedHeight: 1, - Round: 2, - POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - Signature: []byte("signature"), - } - - _, err := tc.signerClient.SignProposal(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, proposal.ToProto()) - require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) - - _, err = tc.mockPV.SignProposal(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, proposal.ToProto()) - require.Error(t, err) - - _, err = tc.signerClient.SignProposal(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, proposal.ToProto()) - require.Error(t, err) + _, err := tc.signerClient.SignProposal(ctx, tc.chainID, tc.quorumType, tc.quorumHash, proposal.ToProto()) + rserr, ok := err.(*RemoteSignerError) + require.True(t, ok, "%T", err) + require.Contains(t, rserr.Error(), types.ErroringMockPVErr.Error()) + + _, err = tc.mockPV.SignProposal(ctx, tc.chainID, tc.quorumType, tc.quorumHash, proposal.ToProto()) + require.Error(t, err) + + _, err = tc.signerClient.SignProposal(ctx, tc.chainID, tc.quorumType, tc.quorumHash, proposal.ToProto()) + require.Error(t, err) + }) } } func TestSignerSignVoteErrors(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - hash := tmrand.Bytes(tmhash.Size) - valProTxHash := tmrand.Bytes(crypto.DefaultHashSize) - vote := &types.Vote{ - Type: tmproto.PrecommitType, - Height: 1, - Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, - ValidatorProTxHash: valProTxHash, - ValidatorIndex: 1, - BlockSignature: []byte("signature"), - StateSignature: []byte("stateSignature"), - } - - // Replace signer service privval with one that always fails - tc.signerServer.privVal = types.NewErroringMockPV() - tc.mockPV = types.NewErroringMockPV() - - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + + for _, tc := range getSignerTestCases(ctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() + + hash := tmrand.Bytes(crypto.HashSize) + valProTxHash := tmrand.Bytes(crypto.DefaultHashSize) + vote := &types.Vote{ + Type: tmproto.PrecommitType, + Height: 1, + Round: 2, + BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + ValidatorProTxHash: valProTxHash, + ValidatorIndex: 1, + BlockSignature: []byte("signature"), + StateSignature: []byte("stateSignature"), } - }) - stateID := types.RandStateID().WithHeight(vote.Height - 1) + stateID := types.RandStateID().WithHeight(vote.Height - 1) - err := tc.signerClient.SignVote(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, vote.ToProto(), stateID, nil) - require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) + // Replace signer service privval with one that always fails + tc.signerServer.privVal = types.NewErroringMockPV() + tc.mockPV = types.NewErroringMockPV() - err = tc.mockPV.SignVote(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, vote.ToProto(), stateID, nil) - require.Error(t, err) + err := tc.signerClient.SignVote(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, vote.ToProto(), stateID, nil) + rserr, ok := err.(*RemoteSignerError) + require.True(t, ok, "%T", err) + require.Contains(t, rserr.Error(), types.ErroringMockPVErr.Error()) - err = tc.signerClient.SignVote(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, vote.ToProto(), stateID, nil) - require.Error(t, err) + err = tc.mockPV.SignVote(ctx, tc.chainID, tc.quorumType, tc.quorumHash, vote.ToProto(), stateID, nil) + require.Error(t, err) + + err = tc.signerClient.SignVote(ctx, tc.chainID, tc.quorumType, tc.quorumHash, vote.ToProto(), stateID, nil) + require.Error(t, err) + }) } } -func brokenHandler(ctx context.Context, privVal types.PrivValidator, request privvalproto.Message, - chainID string) (privvalproto.Message, error) { +func brokenHandler(ctx context.Context, privVal types.PrivValidator, request privvalproto.Message, chainID string) (privvalproto.Message, error) { var res privvalproto.Message var err error @@ -439,29 +442,28 @@ func brokenHandler(ctx context.Context, privVal types.PrivValidator, request pri } func TestSignerUnexpectedResponse(t *testing.T) { - for _, tc := range getSignerTestCases(t) { - tc.signerServer.privVal = types.NewMockPVForQuorum(tc.quorumHash) - tc.mockPV = types.NewMockPVForQuorum(tc.quorumHash) + t.Cleanup(leaktest.Check(t)) - tc.signerServer.SetRequestHandler(brokenHandler) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - tc := tc - t.Cleanup(func() { - if err := tc.signerServer.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := tc.signerClient.Close(); err != nil { - t.Error(err) - } - }) + logger := log.NewNopLogger() - want := &types.Vote{Type: tmproto.PrecommitType, Height: 1} + for _, tc := range getSignerTestCases(ctx, t, logger) { + t.Run(tc.name, func(t *testing.T) { + defer tc.closer() - stateID := types.RandStateID().WithHeight(want.Height - 1) + tc.signerServer.privVal = types.NewMockPVForQuorum(tc.quorumHash) + tc.mockPV = types.NewMockPVForQuorum(tc.quorumHash) - e := tc.signerClient.SignVote(context.Background(), tc.chainID, tc.quorumType, tc.quorumHash, want.ToProto(), stateID, nil) - assert.EqualError(t, e, "empty response") + tc.signerServer.SetRequestHandler(brokenHandler) + + want := &types.Vote{Type: tmproto.PrecommitType, Height: 1} + + stateID := types.RandStateID().WithHeight(want.Height - 1) + + e := tc.signerClient.SignVote(ctx, tc.chainID, tc.quorumType, tc.quorumHash, want.ToProto(), stateID, nil) + assert.EqualError(t, e, "empty response") + }) } } diff --git a/privval/signer_dialer_endpoint.go b/privval/signer_dialer_endpoint.go index 93d26b0439..b291a7ef5e 100644 --- a/privval/signer_dialer_endpoint.go +++ b/privval/signer_dialer_endpoint.go @@ -1,6 +1,7 @@ package privval import ( + "context" "time" "github.com/tendermint/tendermint/libs/log" @@ -58,6 +59,7 @@ func NewSignerDialerEndpoint( retryWait: defaultRetryWaitMilliseconds * time.Millisecond, maxConnRetries: defaultMaxDialRetries, } + sd.signerEndpoint.logger = logger sd.BaseService = *service.NewBaseService(logger, "SignerDialerEndpoint", sd) sd.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second @@ -69,28 +71,42 @@ func NewSignerDialerEndpoint( return sd } -func (sd *SignerDialerEndpoint) ensureConnection() error { +func (sd *SignerDialerEndpoint) OnStart(context.Context) error { return nil } +func (sd *SignerDialerEndpoint) OnStop() {} + +func (sd *SignerDialerEndpoint) ensureConnection(ctx context.Context) error { if sd.IsConnected() { return nil } + timer := time.NewTimer(0) + defer timer.Stop() retries := 0 for retries < sd.maxConnRetries { + if err := ctx.Err(); err != nil { + return err + } conn, err := sd.dialer() if err != nil { retries++ - sd.Logger.Debug("SignerDialer: Reconnection failed", "retries", retries, "max", sd.maxConnRetries, "err", err) + sd.logger.Debug("SignerDialer: Reconnection failed", "retries", retries, "max", sd.maxConnRetries, "err", err) + // Wait between retries - time.Sleep(sd.retryWait) + timer.Reset(sd.retryWait) + select { + case <-ctx.Done(): + return ctx.Err() + case <-timer.C: + } } else { sd.SetConnection(conn) - sd.Logger.Debug("SignerDialer: Connection Ready") + sd.logger.Debug("SignerDialer: Connection Ready") return nil } } - sd.Logger.Debug("SignerDialer: Max retries exceeded", "retries", retries, "max", sd.maxConnRetries) + sd.logger.Debug("SignerDialer: Max retries exceeded", "retries", retries, "max", sd.maxConnRetries) return ErrNoConnection } diff --git a/privval/signer_endpoint.go b/privval/signer_endpoint.go index 0d46ca6925..8810bdf85b 100644 --- a/privval/signer_endpoint.go +++ b/privval/signer_endpoint.go @@ -1,12 +1,14 @@ package privval import ( + "context" "fmt" "net" + "sync" "time" "github.com/tendermint/tendermint/internal/libs/protoio" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" ) @@ -17,8 +19,9 @@ const ( type signerEndpoint struct { service.BaseService + logger log.Logger - connMtx tmsync.Mutex + connMtx sync.Mutex conn net.Conn timeoutReadWrite time.Duration @@ -52,11 +55,13 @@ func (se *signerEndpoint) GetAvailableConnection(connectionAvailableCh chan net. } // TryGetConnection retrieves a connection if it is already available -func (se *signerEndpoint) WaitConnection(connectionAvailableCh chan net.Conn, maxWait time.Duration) error { +func (se *signerEndpoint) WaitConnection(ctx context.Context, connectionAvailableCh chan net.Conn, maxWait time.Duration) error { se.connMtx.Lock() defer se.connMtx.Unlock() select { + case <-ctx.Done(): + return ctx.Err() case se.conn = <-connectionAvailableCh: case <-time.After(maxWait): return ErrConnectionTimeout @@ -104,7 +109,7 @@ func (se *signerEndpoint) ReadMessage() (msg privvalproto.Message, err error) { err = fmt.Errorf("empty error: %w", ErrReadTimeout) } - se.Logger.Debug("Dropping [read]", "obj", se) + se.logger.Debug("Dropping [read]", "obj", se) se.dropConnection() } @@ -149,7 +154,7 @@ func (se *signerEndpoint) isConnected() bool { func (se *signerEndpoint) dropConnection() { if se.conn != nil { if err := se.conn.Close(); err != nil { - se.Logger.Error("signerEndpoint::dropConnection", "err", err) + se.logger.Error("signerEndpoint::dropConnection", "err", err) } se.conn = nil } diff --git a/privval/signer_listener_endpoint.go b/privval/signer_listener_endpoint.go index 292e7a4762..12c9159735 100644 --- a/privval/signer_listener_endpoint.go +++ b/privval/signer_listener_endpoint.go @@ -1,11 +1,12 @@ package privval import ( + "context" "fmt" "net" + "sync" "time" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" @@ -38,7 +39,7 @@ type SignerListenerEndpoint struct { pingTimer *time.Ticker pingInterval time.Duration - instanceMtx tmsync.Mutex // Ensures instance public methods access, i.e. SendRequest + instanceMtx sync.Mutex // Ensures instance public methods access, i.e. SendRequest } // NewSignerListenerEndpoint returns an instance of SignerListenerEndpoint. @@ -52,6 +53,7 @@ func NewSignerListenerEndpoint( timeoutAccept: defaultTimeoutAcceptSeconds * time.Second, } + sl.signerEndpoint.logger = logger sl.BaseService = *service.NewBaseService(logger, "SignerListenerEndpoint", sl) sl.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second @@ -63,7 +65,7 @@ func NewSignerListenerEndpoint( } // OnStart implements service.Service. -func (sl *SignerListenerEndpoint) OnStart() error { +func (sl *SignerListenerEndpoint) OnStart(ctx context.Context) error { sl.connectRequestCh = make(chan struct{}) sl.connectionAvailableCh = make(chan net.Conn) @@ -71,8 +73,8 @@ func (sl *SignerListenerEndpoint) OnStart() error { sl.pingInterval = time.Duration(sl.signerEndpoint.timeoutReadWrite.Milliseconds()*2/3) * time.Millisecond sl.pingTimer = time.NewTicker(sl.pingInterval) - go sl.serviceLoop() - go sl.pingLoop() + go sl.serviceLoop(ctx) + go sl.pingLoop(ctx) sl.connectRequestCh <- struct{}{} @@ -88,7 +90,7 @@ func (sl *SignerListenerEndpoint) OnStop() { // Stop listening if sl.listener != nil { if err := sl.listener.Close(); err != nil { - sl.Logger.Error("Closing Listener", "err", err) + sl.logger.Error("Closing Listener", "err", err) sl.listener = nil } } @@ -97,18 +99,18 @@ func (sl *SignerListenerEndpoint) OnStop() { } // WaitForConnection waits maxWait for a connection or returns a timeout error -func (sl *SignerListenerEndpoint) WaitForConnection(maxWait time.Duration) error { +func (sl *SignerListenerEndpoint) WaitForConnection(ctx context.Context, maxWait time.Duration) error { sl.instanceMtx.Lock() defer sl.instanceMtx.Unlock() - return sl.ensureConnection(maxWait) + return sl.ensureConnection(ctx, maxWait) } // SendRequest ensures there is a connection, sends a request and waits for a response -func (sl *SignerListenerEndpoint) SendRequest(request privvalproto.Message) (*privvalproto.Message, error) { +func (sl *SignerListenerEndpoint) SendRequest(ctx context.Context, request privvalproto.Message) (*privvalproto.Message, error) { sl.instanceMtx.Lock() defer sl.instanceMtx.Unlock() - err := sl.ensureConnection(sl.timeoutAccept) + err := sl.ensureConnection(ctx, sl.timeoutAccept) if err != nil { return nil, err } @@ -129,7 +131,7 @@ func (sl *SignerListenerEndpoint) SendRequest(request privvalproto.Message) (*pr return &res, nil } -func (sl *SignerListenerEndpoint) ensureConnection(maxWait time.Duration) error { +func (sl *SignerListenerEndpoint) ensureConnection(ctx context.Context, maxWait time.Duration) error { if sl.IsConnected() { return nil } @@ -140,9 +142,9 @@ func (sl *SignerListenerEndpoint) ensureConnection(maxWait time.Duration) error } // block until connected or timeout - sl.Logger.Info("SignerListener: Blocking for connection") + sl.logger.Info("SignerListener: Blocking for connection") sl.triggerConnect() - return sl.WaitConnection(sl.connectionAvailableCh, maxWait) + return sl.WaitConnection(ctx, sl.connectionAvailableCh, maxWait) } func (sl *SignerListenerEndpoint) acceptNewConnection() (net.Conn, error) { @@ -151,7 +153,7 @@ func (sl *SignerListenerEndpoint) acceptNewConnection() (net.Conn, error) { } // wait for a new conn - sl.Logger.Info("SignerListener: Listening for new connection") + sl.logger.Info("SignerListener: Listening for new connection") conn, err := sl.listener.Accept() if err != nil { return nil, err @@ -172,19 +174,19 @@ func (sl *SignerListenerEndpoint) triggerReconnect() { sl.triggerConnect() } -func (sl *SignerListenerEndpoint) serviceLoop() { +func (sl *SignerListenerEndpoint) serviceLoop(ctx context.Context) { for { select { case <-sl.connectRequestCh: { conn, err := sl.acceptNewConnection() if err == nil { - sl.Logger.Info("SignerListener: Connected") + sl.logger.Info("SignerListener: Connected") // We have a good connection, wait for someone that needs one otherwise cancellation select { case sl.connectionAvailableCh <- conn: - case <-sl.Quit(): + case <-ctx.Done(): return } } @@ -194,24 +196,24 @@ func (sl *SignerListenerEndpoint) serviceLoop() { default: } } - case <-sl.Quit(): + case <-ctx.Done(): return } } } -func (sl *SignerListenerEndpoint) pingLoop() { +func (sl *SignerListenerEndpoint) pingLoop(ctx context.Context) { for { select { case <-sl.pingTimer.C: { - _, err := sl.SendRequest(mustWrapMsg(&privvalproto.PingRequest{})) + _, err := sl.SendRequest(ctx, mustWrapMsg(&privvalproto.PingRequest{})) if err != nil { - sl.Logger.Error("SignerListener: Ping timeout") + sl.logger.Error("SignerListener: Ping timeout") sl.triggerReconnect() } } - case <-sl.Quit(): + case <-ctx.Done(): return } } diff --git a/privval/signer_listener_endpoint_test.go b/privval/signer_listener_endpoint_test.go index cbd45e6cee..6049c62450 100644 --- a/privval/signer_listener_endpoint_test.go +++ b/privval/signer_listener_endpoint_test.go @@ -1,10 +1,12 @@ package privval import ( + "context" "net" "testing" "time" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -38,6 +40,13 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) { retries = 10 ) + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + ln, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(t, err) @@ -60,8 +69,7 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) { } }(ln, attemptCh) - dialerEndpoint := NewSignerDialerEndpoint( - log.TestingLogger(), + dialerEndpoint := NewSignerDialerEndpoint(logger, DialTCPFn(ln.Addr().String(), testTimeoutReadWrite, ed25519.GenPrivKey()), ) SignerDialerEndpointTimeoutReadWrite(time.Millisecond)(dialerEndpoint) @@ -71,13 +79,9 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) { mockPV := types.NewMockPV() signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV) - err = signerServer.Start() + err = signerServer.Start(ctx) require.NoError(t, err) - t.Cleanup(func() { - if err := signerServer.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(signerServer.Wait) select { case attempts := <-attemptCh: @@ -88,15 +92,22 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) { } func TestRetryConnToRemoteSigner(t *testing.T) { + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + for _, tc := range getDialerTestCases(t) { var ( - logger = log.TestingLogger() chainID = tmrand.Str(12) mockPV = types.NewMockPV() endpointIsOpenCh = make(chan struct{}) thisConnTimeout = testTimeoutReadWrite - listenerEndpoint = newSignerListenerEndpoint(logger, tc.addr, thisConnTimeout) + listenerEndpoint = newSignerListenerEndpoint(t, logger, tc.addr, thisConnTimeout) ) + t.Cleanup(listenerEndpoint.Wait) dialerEndpoint := NewSignerDialerEndpoint( logger, @@ -107,19 +118,14 @@ func TestRetryConnToRemoteSigner(t *testing.T) { signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV) - startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh) - t.Cleanup(func() { - if err := listenerEndpoint.Stop(); err != nil { - t.Error(err) - } - }) + startListenerEndpointAsync(ctx, t, listenerEndpoint, endpointIsOpenCh) - require.NoError(t, signerServer.Start()) + require.NoError(t, signerServer.Start(ctx)) assert.True(t, signerServer.IsRunning()) + t.Cleanup(signerServer.Wait) + <-endpointIsOpenCh - if err := signerServer.Stop(); err != nil { - t.Error(err) - } + signerServer.Stop() dialerEndpoint2 := NewSignerDialerEndpoint( logger, @@ -128,13 +134,10 @@ func TestRetryConnToRemoteSigner(t *testing.T) { signerServer2 := NewSignerServer(dialerEndpoint2, chainID, mockPV) // let some pings pass - require.NoError(t, signerServer2.Start()) + require.NoError(t, signerServer2.Start(ctx)) assert.True(t, signerServer2.IsRunning()) - t.Cleanup(func() { - if err := signerServer2.Stop(); err != nil { - t.Error(err) - } - }) + t.Cleanup(signerServer2.Stop) + t.Cleanup(signerServer2.Wait) // give the client some time to re-establish the conn to the remote signer // should see sth like this in the logs: @@ -145,14 +148,11 @@ func TestRetryConnToRemoteSigner(t *testing.T) { } } -func newSignerListenerEndpoint(logger log.Logger, addr string, timeoutReadWrite time.Duration) *SignerListenerEndpoint { +func newSignerListenerEndpoint(t *testing.T, logger log.Logger, addr string, timeoutReadWrite time.Duration) *SignerListenerEndpoint { proto, address := tmnet.ProtocolAndAddress(addr) ln, err := net.Listen(proto, address) - logger.Info("SignerListener: Listening", "proto", proto, "address", address) - if err != nil { - panic(err) - } + require.NoError(t, err) var listener net.Listener @@ -175,22 +175,30 @@ func newSignerListenerEndpoint(logger log.Logger, addr string, timeoutReadWrite ) } -func startListenerEndpointAsync(t *testing.T, sle *SignerListenerEndpoint, endpointIsOpenCh chan struct{}) { +func startListenerEndpointAsync( + ctx context.Context, + t *testing.T, + sle *SignerListenerEndpoint, + endpointIsOpenCh chan struct{}, +) { + t.Helper() + go func(sle *SignerListenerEndpoint) { - require.NoError(t, sle.Start()) + require.NoError(t, sle.Start(ctx)) assert.True(t, sle.IsRunning()) close(endpointIsOpenCh) }(sle) } func getMockEndpoints( + ctx context.Context, t *testing.T, + logger log.Logger, addr string, socketDialer SocketDialer, ) (*SignerListenerEndpoint, *SignerDialerEndpoint) { var ( - logger = log.TestingLogger() endpointIsOpenCh = make(chan struct{}) dialerEndpoint = NewSignerDialerEndpoint( @@ -198,15 +206,15 @@ func getMockEndpoints( socketDialer, ) - listenerEndpoint = newSignerListenerEndpoint(logger, addr, testTimeoutReadWrite) + listenerEndpoint = newSignerListenerEndpoint(t, logger, addr, testTimeoutReadWrite) ) SignerDialerEndpointTimeoutReadWrite(testTimeoutReadWrite)(dialerEndpoint) SignerDialerEndpointConnRetries(1e6)(dialerEndpoint) - startListenerEndpointAsync(t, listenerEndpoint, endpointIsOpenCh) + startListenerEndpointAsync(ctx, t, listenerEndpoint, endpointIsOpenCh) - require.NoError(t, dialerEndpoint.Start()) + require.NoError(t, dialerEndpoint.Start(ctx)) assert.True(t, dialerEndpoint.IsRunning()) <-endpointIsOpenCh diff --git a/privval/signer_server.go b/privval/signer_server.go index 24bf67cc5f..4945b81506 100644 --- a/privval/signer_server.go +++ b/privval/signer_server.go @@ -3,8 +3,8 @@ package privval import ( "context" "io" + "sync" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/service" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" "github.com/tendermint/tendermint/types" @@ -24,7 +24,7 @@ type SignerServer struct { chainID string privVal types.PrivValidator - handlerMtx tmsync.Mutex + handlerMtx sync.Mutex validationRequestHandler ValidationRequestHandlerFunc } @@ -36,20 +36,20 @@ func NewSignerServer(endpoint *SignerDialerEndpoint, chainID string, privVal typ validationRequestHandler: DefaultValidationRequestHandler, } - ss.BaseService = *service.NewBaseService(endpoint.Logger, "SignerServer", ss) + ss.BaseService = *service.NewBaseService(endpoint.logger, "SignerServer", ss) return ss } // OnStart implements service.Service. -func (ss *SignerServer) OnStart() error { - go ss.serviceLoop() +func (ss *SignerServer) OnStart(ctx context.Context) error { + go ss.serviceLoop(ctx) return nil } // OnStop implements service.Service. func (ss *SignerServer) OnStop() { - ss.endpoint.Logger.Debug("SignerServer: OnStop calling Close") + ss.endpoint.logger.Debug("SignerServer: OnStop calling Close") _ = ss.endpoint.Close() } @@ -60,7 +60,7 @@ func (ss *SignerServer) SetRequestHandler(validationRequestHandler ValidationReq ss.validationRequestHandler = validationRequestHandler } -func (ss *SignerServer) servicePendingRequest() { +func (ss *SignerServer) servicePendingRequest(ctx context.Context) { if !ss.IsRunning() { return // Ignore error from closing. } @@ -68,7 +68,7 @@ func (ss *SignerServer) servicePendingRequest() { req, err := ss.endpoint.ReadMessage() if err != nil { if err != io.EOF { - ss.Logger.Error("SignerServer: HandleMessage", "err", err) + ss.endpoint.logger.Error("SignerServer: HandleMessage", "err", err) } return } @@ -78,31 +78,29 @@ func (ss *SignerServer) servicePendingRequest() { // limit the scope of the lock ss.handlerMtx.Lock() defer ss.handlerMtx.Unlock() - res, err = ss.validationRequestHandler(context.TODO(), ss.privVal, req, ss.chainID) // todo + res, err = ss.validationRequestHandler(ctx, ss.privVal, req, ss.chainID) // todo if err != nil { // only log the error; we'll reply with an error in res - ss.Logger.Error("SignerServer: handleMessage", "err", err) + ss.endpoint.logger.Error("SignerServer: handleMessage", "err", err) } } err = ss.endpoint.WriteMessage(res) if err != nil { - ss.Logger.Error("SignerServer: writeMessage", "err", err) + ss.endpoint.logger.Error("SignerServer: writeMessage", "err", err) } } -func (ss *SignerServer) serviceLoop() { +func (ss *SignerServer) serviceLoop(ctx context.Context) { for { select { + case <-ctx.Done(): + return default: - err := ss.endpoint.ensureConnection() - if err != nil { + if err := ss.endpoint.ensureConnection(ctx); err != nil { return } - ss.servicePendingRequest() - - case <-ss.Quit(): - return + ss.servicePendingRequest(ctx) } } } diff --git a/privval/socket_dialers_test.go b/privval/socket_dialers_test.go index 32c07c5911..7ec8fe30f6 100644 --- a/privval/socket_dialers_test.go +++ b/privval/socket_dialers_test.go @@ -9,11 +9,21 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/ed25519" + tmnet "github.com/tendermint/tendermint/libs/net" ) +// getFreeLocalhostAddrPort returns a free localhost:port address +func getFreeLocalhostAddrPort(t *testing.T) string { + t.Helper() + port, err := tmnet.GetFreePort() + require.NoError(t, err) + + return fmt.Sprintf("127.0.0.1:%d", port) +} + func getDialerTestCases(t *testing.T) []dialerTestCase { - tcpAddr := GetFreeLocalhostAddrPort() - unixFilePath, err := testUnixAddr() + tcpAddr := getFreeLocalhostAddrPort(t) + unixFilePath, err := testUnixAddr(t) require.NoError(t, err) unixAddr := fmt.Sprintf("unix://%s", unixFilePath) @@ -31,7 +41,7 @@ func getDialerTestCases(t *testing.T) []dialerTestCase { func TestIsConnTimeoutForFundamentalTimeouts(t *testing.T) { // Generate a networking timeout - tcpAddr := GetFreeLocalhostAddrPort() + tcpAddr := getFreeLocalhostAddrPort(t) dialer := DialTCPFn(tcpAddr, time.Millisecond, ed25519.GenPrivKey()) _, err := dialer() assert.Error(t, err) @@ -39,7 +49,7 @@ func TestIsConnTimeoutForFundamentalTimeouts(t *testing.T) { } func TestIsConnTimeoutForWrappedConnTimeouts(t *testing.T) { - tcpAddr := GetFreeLocalhostAddrPort() + tcpAddr := getFreeLocalhostAddrPort(t) dialer := DialTCPFn(tcpAddr, time.Millisecond, ed25519.GenPrivKey()) _, err := dialer() assert.Error(t, err) diff --git a/privval/socket_listeners_test.go b/privval/socket_listeners_test.go index 5e95ec10ce..e91d111d00 100644 --- a/privval/socket_listeners_test.go +++ b/privval/socket_listeners_test.go @@ -1,12 +1,13 @@ package privval import ( - "io/ioutil" "net" "os" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" ) @@ -28,14 +29,17 @@ type listenerTestCase struct { // testUnixAddr will attempt to obtain a platform-independent temporary file // name for a Unix socket -func testUnixAddr() (string, error) { - f, err := ioutil.TempFile("", "tendermint-privval-test-*") +func testUnixAddr(t *testing.T) (string, error) { + // N.B. We can't use t.TempDir here because socket filenames have a + // restrictive length limit (~100 bytes) for silly historical reasons. + f, err := os.CreateTemp("", "tendermint-privval-test-*.sock") if err != nil { return "", err } addr := f.Name() f.Close() - os.Remove(addr) + os.Remove(addr) // remove so the test can bind it + t.Cleanup(func() { os.Remove(addr) }) // clean up after the test return addr, nil } @@ -56,7 +60,7 @@ func tcpListenerTestCase(t *testing.T, timeoutAccept, timeoutReadWrite time.Dura } func unixListenerTestCase(t *testing.T, timeoutAccept, timeoutReadWrite time.Duration) listenerTestCase { - addr, err := testUnixAddr() + addr, err := testUnixAddr(t) if err != nil { t.Fatal(err) } @@ -108,9 +112,7 @@ func TestListenerTimeoutReadWrite(t *testing.T) { for _, tc := range listenerTestCases(t, timeoutAccept, timeoutReadWrite) { go func(dialer SocketDialer) { _, err := dialer() - if err != nil { - panic(err) - } + require.NoError(t, err) }(tc.dialer) c, err := tc.listener.Accept() diff --git a/privval/utils.go b/privval/utils.go index 0b8cced342..1d6681b452 100644 --- a/privval/utils.go +++ b/privval/utils.go @@ -51,12 +51,3 @@ func NewSignerListener(listenAddr string, logger log.Logger) (*SignerListenerEnd return pve, nil } - -// GetFreeLocalhostAddrPort returns a free localhost:port address -func GetFreeLocalhostAddrPort() string { - port, err := tmnet.GetFreePort() - if err != nil { - panic(err) - } - return fmt.Sprintf("127.0.0.1:%d", port) -} diff --git a/proto/README.md b/proto/README.md new file mode 100644 index 0000000000..a0701d3bca --- /dev/null +++ b/proto/README.md @@ -0,0 +1,21 @@ +# Protocol Buffers + +This sections defines the protocol buffers used in Tendermint. This is split into two directories: `spec`, the types required for all implementations and `tendermint`, a set of types internal to the Go implementation. All generated go code is also stored in `tendermint`. +More descriptions of the data structures are located in the spec directory as follows: + +- [Block](../spec/core/data_structures.md) +- [ABCI](../spec/abci/README.md) +- [P2P](../spec/p2p/messages/README.md) + +## Process to generate protos + +The `.proto` files within this section are core to the protocol and updates must be treated as such. + +### Steps + +1. Make an issue with the proposed change. + - Within the issue members, from the Tendermint team will leave comments. If there is not consensus on the change an [RFC](../docs/rfc/README.md) may be requested. + 1a. Submission of an RFC as a pull request should be made to facilitate further discussion. + 1b. Merge the RFC. +2. Make the necessary changes to the `.proto` file(s), [core data structures](../spec/core/data_structures.md) and/or [ABCI protocol](../spec/abci/apps.md). +3. Rebuild the Go protocol buffers by running `make proto-gen`. Ensure that the project builds correctly by running `make build`. diff --git a/proto/buf.lock b/proto/buf.lock new file mode 100644 index 0000000000..8c415e1af0 --- /dev/null +++ b/proto/buf.lock @@ -0,0 +1,7 @@ +# Generated by buf. DO NOT EDIT. +version: v1 +deps: + - remote: buf.build + owner: gogo + repository: protobuf + commit: 4df00b267f944190a229ce3695781e99 diff --git a/buf.yaml b/proto/buf.yaml similarity index 50% rename from buf.yaml rename to proto/buf.yaml index cc4aced576..816db10f76 100644 --- a/buf.yaml +++ b/proto/buf.yaml @@ -1,16 +1,11 @@ -version: v1beta1 - -build: - roots: - - proto - - third_party/proto +version: v1 +deps: + - buf.build/gogo/protobuf +breaking: + use: + - FILE lint: use: - BASIC - FILE_LOWER_SNAKE_CASE - UNARY_RPC - ignore: - - gogoproto -breaking: - use: - - FILE diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 3e1bb867a2..fac3d6220f 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -3,8 +3,6 @@ package tendermint.abci; option go_package = "github.com/tendermint/tendermint/abci/types"; -// For more information on gogo.proto, see: -// https://github.com/gogo/protobuf/blob/master/extensions.md import "tendermint/crypto/proof.proto"; import "tendermint/types/types.proto"; import "tendermint/crypto/keys.proto"; @@ -21,20 +19,25 @@ import "gogoproto/gogo.proto"; message Request { oneof value { - RequestEcho echo = 1; - RequestFlush flush = 2; - RequestInfo info = 3; - RequestInitChain init_chain = 4; - RequestQuery query = 5; - RequestBeginBlock begin_block = 6; - RequestCheckTx check_tx = 7; - RequestDeliverTx deliver_tx = 8; - RequestEndBlock end_block = 9; - RequestCommit commit = 10; - RequestListSnapshots list_snapshots = 11; - RequestOfferSnapshot offer_snapshot = 12; - RequestLoadSnapshotChunk load_snapshot_chunk = 13; - RequestApplySnapshotChunk apply_snapshot_chunk = 14; + RequestEcho echo = 1; + RequestFlush flush = 2; + RequestInfo info = 3; + RequestInitChain init_chain = 4; + RequestQuery query = 5; + RequestBeginBlock begin_block = 6 [deprecated = true]; + RequestCheckTx check_tx = 7; + RequestDeliverTx deliver_tx = 8 [deprecated = true]; + RequestEndBlock end_block = 9 [deprecated = true]; + RequestCommit commit = 10; + RequestListSnapshots list_snapshots = 11; + RequestOfferSnapshot offer_snapshot = 12; + RequestLoadSnapshotChunk load_snapshot_chunk = 13; + RequestApplySnapshotChunk apply_snapshot_chunk = 14; + RequestPrepareProposal prepare_proposal = 15; + RequestProcessProposal process_proposal = 16; + RequestExtendVote extend_vote = 17; + RequestVerifyVoteExtension verify_vote_extension = 18; + RequestFinalizeBlock finalize_block = 19; } } @@ -72,8 +75,8 @@ message RequestQuery { message RequestBeginBlock { bytes hash = 1; tendermint.types.Header header = 2 [(gogoproto.nullable) = false]; - LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false]; - repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false]; + CommitInfo last_commit_info = 3 [(gogoproto.nullable) = false]; + repeated Misbehavior byzantine_validators = 4 [(gogoproto.nullable) = false]; } enum CheckTxType { @@ -119,26 +122,86 @@ message RequestApplySnapshotChunk { string sender = 3; } +message RequestPrepareProposal { + // the modified transactions cannot exceed this size. + int64 max_tx_bytes = 1; + // txs is an array of transactions that will be included in a block, + // sent to the app for possible modifications. + repeated bytes txs = 2; + ExtendedCommitInfo local_last_commit = 3 [(gogoproto.nullable) = false]; + repeated Misbehavior byzantine_validators = 4 [(gogoproto.nullable) = false]; + int64 height = 5; + google.protobuf.Timestamp time = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes next_validators_hash = 7; + + bytes proposer_pro_tx_hash = 8; +} + +message RequestProcessProposal { + repeated bytes txs = 1; + CommitInfo proposed_last_commit = 2 [(gogoproto.nullable) = false]; + repeated Misbehavior byzantine_validators = 3 [(gogoproto.nullable) = false]; + // hash is the merkle root hash of the fields of the proposed block. + bytes hash = 4; + int64 height = 5; + google.protobuf.Timestamp time = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes next_validators_hash = 7; + + bytes proposer_pro_tx_hash = 8; +} + +// Extends a vote with application-side injection +message RequestExtendVote { + bytes hash = 1; + int64 height = 2; +} + +// Verify the vote extension +message RequestVerifyVoteExtension { + bytes hash = 1; + bytes validator_pro_tx_hash = 2; + int64 height = 3; + bytes vote_extension = 4; +} + +message RequestFinalizeBlock { + repeated bytes txs = 1; + CommitInfo decided_last_commit = 2 [(gogoproto.nullable) = false]; + repeated Misbehavior byzantine_validators = 3 [(gogoproto.nullable) = false]; + // hash is the merkle root hash of the fields of the proposed block. + bytes hash = 4; + int64 height = 5; + google.protobuf.Timestamp time = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + bytes next_validators_hash = 7; + + bytes proposer_pro_tx_hash = 8; +} + //---------------------------------------- // Response types message Response { oneof value { - ResponseException exception = 1; - ResponseEcho echo = 2; - ResponseFlush flush = 3; - ResponseInfo info = 4; - ResponseInitChain init_chain = 5; - ResponseQuery query = 6; - ResponseBeginBlock begin_block = 7; - ResponseCheckTx check_tx = 8; - ResponseDeliverTx deliver_tx = 9; - ResponseEndBlock end_block = 10; - ResponseCommit commit = 11; - ResponseListSnapshots list_snapshots = 12; - ResponseOfferSnapshot offer_snapshot = 13; - ResponseLoadSnapshotChunk load_snapshot_chunk = 14; - ResponseApplySnapshotChunk apply_snapshot_chunk = 15; + ResponseException exception = 1; + ResponseEcho echo = 2; + ResponseFlush flush = 3; + ResponseInfo info = 4; + ResponseInitChain init_chain = 5; + ResponseQuery query = 6; + ResponseBeginBlock begin_block = 7 [deprecated = true]; + ResponseCheckTx check_tx = 8; + ResponseDeliverTx deliver_tx = 9 [deprecated = true]; + ResponseEndBlock end_block = 10 [deprecated = true]; + ResponseCommit commit = 11; + ResponseListSnapshots list_snapshots = 12; + ResponseOfferSnapshot offer_snapshot = 13; + ResponseLoadSnapshotChunk load_snapshot_chunk = 14; + ResponseApplySnapshotChunk apply_snapshot_chunk = 15; + ResponsePrepareProposal prepare_proposal = 16; + ResponseProcessProposal process_proposal = 17; + ResponseExtendVote extend_vote = 18; + ResponseVerifyVoteExtension verify_vote_extension = 19; + ResponseFinalizeBlock finalize_block = 20; } } @@ -197,15 +260,16 @@ message ResponseCheckTx { bytes data = 2; string log = 3; // nondeterministic string info = 4; // nondeterministic - int64 gas_wanted = 5 [json_name = "gas_wanted"]; - int64 gas_used = 6 [json_name = "gas_used"]; + int64 gas_wanted = 5; + int64 gas_used = 6; repeated Event events = 7 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; string codespace = 8; string sender = 9; int64 priority = 10; // mempool_error is set by Tendermint. - // ABCI applictions creating a ResponseCheckTX should not set mempool_error. + + // ABCI applications creating a ResponseCheckTX should not set mempool_error. string mempool_error = 11; } @@ -271,16 +335,75 @@ message ResponseApplySnapshotChunk { } } +message ResponsePrepareProposal { + repeated TxRecord tx_records = 1; + bytes app_hash = 2; + repeated ExecTxResult tx_results = 3; + repeated ValidatorUpdate validator_updates = 4; + tendermint.types.ConsensusParams consensus_param_updates = 5; +} + +message ResponseProcessProposal { + ProposalStatus status = 1; + bytes app_hash = 2; + repeated ExecTxResult tx_results = 3; + repeated ValidatorUpdate validator_updates = 4; + tendermint.types.ConsensusParams consensus_param_updates = 5; + + enum ProposalStatus { + UNKNOWN = 0; + ACCEPT = 1; + REJECT = 2; + } +} + +message ResponseExtendVote { + bytes vote_extension = 1; +} + +message ResponseVerifyVoteExtension { + VerifyStatus status = 1; + + enum VerifyStatus { + UNKNOWN = 0; + ACCEPT = 1; + REJECT = 2; + } +} + +message ResponseFinalizeBlock { + repeated Event events = 1 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; + repeated ExecTxResult tx_results = 2; + tendermint.types.ConsensusParams consensus_param_updates = 4; + bytes app_hash = 5; + int64 retain_height = 6; + + tendermint.types.CoreChainLock next_core_chain_lock_update = 100; + ValidatorSetUpdate validator_set_update = 101 [(gogoproto.nullable) = true]; +} + //---------------------------------------- // Misc. -message LastCommitInfo { +message CommitInfo { int32 round = 1; bytes quorum_hash = 3; bytes block_signature = 4; bytes state_signature = 5; } +// ExtendedCommitInfo is similar to CommitInfo except that it is only used in +// the PrepareProposal request such that Tendermint can provide vote extensions +// to the application. +message ExtendedCommitInfo { + // The round at which the block proposer decided in the previous height. + int32 round = 1; + // List of validators' addresses in the last validator set with their voting + // information, including vote extensions. + repeated ExtendedVoteInfo votes = 2 [(gogoproto.nullable) = false]; +} + // Event allows application developers to attach additional information to // ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and ResponseDeliverTx. // Later, transactions may be queried using these events. @@ -296,14 +419,42 @@ message EventAttribute { bool index = 3; // nondeterministic } +// ExecTxResult contains results of executing one individual transaction. +// +// * Its structure is equivalent to #ResponseDeliverTx which will be deprecated/deleted +message ExecTxResult { + uint32 code = 1; + bytes data = 2; + string log = 3; // nondeterministic + string info = 4; // nondeterministic + int64 gas_wanted = 5; + int64 gas_used = 6; + repeated Event events = 7 + [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; // nondeterministic + string codespace = 8; +} + // TxResult contains results of executing the transaction. // // One usage is indexing transaction results. message TxResult { - int64 height = 1; - uint32 index = 2; - bytes tx = 3; - ResponseDeliverTx result = 4 [(gogoproto.nullable) = false]; + int64 height = 1; + uint32 index = 2; + bytes tx = 3; + ExecTxResult result = 4 [(gogoproto.nullable) = false]; +} + +message TxRecord { + TxAction action = 1; + bytes tx = 2; + + // TxAction contains App-provided information on what to do with a transaction that is part of a raw proposal + enum TxAction { + UNKNOWN = 0; // Unknown action + UNMODIFIED = 1; // The Application did not modify this transaction. + ADDED = 2; // The Application added this transaction. + REMOVED = 3; // The Application wants this transaction removed from the proposal and the mempool. + } } //---------------------------------------- @@ -345,14 +496,24 @@ message VoteInfo { bool signed_last_block = 2; } -enum EvidenceType { +// ExtendedVoteInfo +message ExtendedVoteInfo { + // The validator that sent the vote. + Validator validator = 1 [(gogoproto.nullable) = false]; + // Indicates whether the validator signed the last block, allowing for rewards based on validator availability. + bool signed_last_block = 2; + // Non-deterministic extension provided by the sending validator's application. + bytes vote_extension = 3; +} + +enum MisbehaviorType { UNKNOWN = 0; DUPLICATE_VOTE = 1; LIGHT_CLIENT_ATTACK = 2; } -message Evidence { - EvidenceType type = 1; +message Misbehavior { + MisbehaviorType type = 1; // The offending validator Validator validator = 2 [(gogoproto.nullable) = false]; // The height when the offense occurred @@ -384,15 +545,17 @@ service ABCIApplication { rpc Echo(RequestEcho) returns (ResponseEcho); rpc Flush(RequestFlush) returns (ResponseFlush); rpc Info(RequestInfo) returns (ResponseInfo); - rpc DeliverTx(RequestDeliverTx) returns (ResponseDeliverTx); rpc CheckTx(RequestCheckTx) returns (ResponseCheckTx); rpc Query(RequestQuery) returns (ResponseQuery); rpc Commit(RequestCommit) returns (ResponseCommit); rpc InitChain(RequestInitChain) returns (ResponseInitChain); - rpc BeginBlock(RequestBeginBlock) returns (ResponseBeginBlock); - rpc EndBlock(RequestEndBlock) returns (ResponseEndBlock); rpc ListSnapshots(RequestListSnapshots) returns (ResponseListSnapshots); rpc OfferSnapshot(RequestOfferSnapshot) returns (ResponseOfferSnapshot); rpc LoadSnapshotChunk(RequestLoadSnapshotChunk) returns (ResponseLoadSnapshotChunk); rpc ApplySnapshotChunk(RequestApplySnapshotChunk) returns (ResponseApplySnapshotChunk); + rpc PrepareProposal(RequestPrepareProposal) returns (ResponsePrepareProposal); + rpc ProcessProposal(RequestProcessProposal) returns (ResponseProcessProposal); + rpc ExtendVote(RequestExtendVote) returns (ResponseExtendVote); + rpc VerifyVoteExtension(RequestVerifyVoteExtension) returns (ResponseVerifyVoteExtension); + rpc FinalizeBlock(RequestFinalizeBlock) returns (ResponseFinalizeBlock); } diff --git a/proto/tendermint/blocksync/types.pb.go b/proto/tendermint/blocksync/types.pb.go index fcbef7107a..c002003228 100644 --- a/proto/tendermint/blocksync/types.pb.go +++ b/proto/tendermint/blocksync/types.pb.go @@ -68,7 +68,8 @@ func (m *BlockRequest) GetHeight() int64 { return 0 } -// NoBlockResponse informs the node that the peer does not have block at the requested height +// NoBlockResponse informs the node that the peer does not have block at the +// requested height type NoBlockResponse struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` } diff --git a/proto/tendermint/blocksync/types.proto b/proto/tendermint/blocksync/types.proto index 8c187c793e..4febfd145c 100644 --- a/proto/tendermint/blocksync/types.proto +++ b/proto/tendermint/blocksync/types.proto @@ -10,7 +10,8 @@ message BlockRequest { int64 height = 1; } -// NoBlockResponse informs the node that the peer does not have block at the requested height +// NoBlockResponse informs the node that the peer does not have block at the +// requested height message NoBlockResponse { int64 height = 1; } @@ -21,8 +22,7 @@ message BlockResponse { } // StatusRequest requests the status of a peer. -message StatusRequest { -} +message StatusRequest {} // StatusResponse is a peer response to inform their status. message StatusResponse { diff --git a/proto/tendermint/consensus/types.pb.go b/proto/tendermint/consensus/types.pb.go index 7d14ca246a..859ddabc70 100644 --- a/proto/tendermint/consensus/types.pb.go +++ b/proto/tendermint/consensus/types.pb.go @@ -103,8 +103,10 @@ func (m *NewRoundStep) GetLastCommitRound() int32 { return 0 } -// NewValidBlock is sent when a validator observes a valid block B in some round r, -//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// NewValidBlock is sent when a validator observes a valid block B in some round +// r, +// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in +// the round r. // In case the block is also committed, then IsCommit flag is set to true. type NewValidBlock struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` @@ -630,7 +632,8 @@ func (m *VoteSetMaj23) GetBlockID() types.BlockID { return types.BlockID{} } -// VoteSetBits is sent to communicate the bit-array of votes seen for the BlockID. +// VoteSetBits is sent to communicate the bit-array of votes seen for the +// BlockID. type VoteSetBits struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` diff --git a/proto/tendermint/consensus/types.proto b/proto/tendermint/consensus/types.proto index 0e1e052bf8..034cf503dd 100644 --- a/proto/tendermint/consensus/types.proto +++ b/proto/tendermint/consensus/types.proto @@ -17,15 +17,18 @@ message NewRoundStep { int32 last_commit_round = 5; } -// NewValidBlock is sent when a validator observes a valid block B in some round r, -//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// NewValidBlock is sent when a validator observes a valid block B in some round +// r, +// i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in +// the round r. // In case the block is also committed, then IsCommit flag is set to true. message NewValidBlock { int64 height = 1; int32 round = 2; - tendermint.types.PartSetHeader block_part_set_header = 3 [(gogoproto.nullable) = false]; - tendermint.libs.bits.BitArray block_parts = 4; - bool is_commit = 5; + tendermint.types.PartSetHeader block_part_set_header = 3 + [(gogoproto.nullable) = false]; + tendermint.libs.bits.BitArray block_parts = 4; + bool is_commit = 5; } // Proposal is sent when a new block is proposed. @@ -37,7 +40,8 @@ message Proposal { message ProposalPOL { int64 height = 1; int32 proposal_pol_round = 2; - tendermint.libs.bits.BitArray proposal_pol = 3 [(gogoproto.nullable) = false]; + tendermint.libs.bits.BitArray proposal_pol = 3 + [(gogoproto.nullable) = false]; } // BlockPart is sent when gossipping a piece of the proposed block. @@ -76,16 +80,19 @@ message VoteSetMaj23 { int64 height = 1; int32 round = 2; tendermint.types.SignedMsgType type = 3; - tendermint.types.BlockID block_id = 4 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + tendermint.types.BlockID block_id = 4 + [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; } -// VoteSetBits is sent to communicate the bit-array of votes seen for the BlockID. +// VoteSetBits is sent to communicate the bit-array of votes seen for the +// BlockID. message VoteSetBits { int64 height = 1; int32 round = 2; tendermint.types.SignedMsgType type = 3; - tendermint.types.BlockID block_id = 4 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; - tendermint.libs.bits.BitArray votes = 5 [(gogoproto.nullable) = false]; + tendermint.types.BlockID block_id = 4 + [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + tendermint.libs.bits.BitArray votes = 5 [(gogoproto.nullable) = false]; } message Message { diff --git a/proto/tendermint/crypto/crypto.go b/proto/tendermint/crypto/crypto.go new file mode 100644 index 0000000000..50d0769edb --- /dev/null +++ b/proto/tendermint/crypto/crypto.go @@ -0,0 +1,8 @@ +package crypto + +// These functions export type tags for use with internal/jsontypes. + +func (*PublicKey) TypeTag() string { return "tendermint.crypto.PublicKey" } +func (*PublicKey_Ed25519) TypeTag() string { return "tendermint.crypto.PublicKey_Ed25519" } +func (*PublicKey_Secp256K1) TypeTag() string { return "tendermint.crypto.PublicKey_Secp256K1" } +func (*PublicKey_Bls12381) TypeTag() string { return "tendermint.crypto.PublicKey_Bls12381" } diff --git a/proto/tendermint/p2p/pex.go b/proto/tendermint/p2p/pex.go index 38c8239dde..61036142fb 100644 --- a/proto/tendermint/p2p/pex.go +++ b/proto/tendermint/p2p/pex.go @@ -13,10 +13,6 @@ func (m *PexMessage) Wrap(pb proto.Message) error { m.Sum = &PexMessage_PexRequest{PexRequest: msg} case *PexResponse: m.Sum = &PexMessage_PexResponse{PexResponse: msg} - case *PexRequestV2: - m.Sum = &PexMessage_PexRequestV2{PexRequestV2: msg} - case *PexResponseV2: - m.Sum = &PexMessage_PexResponseV2{PexResponseV2: msg} default: return fmt.Errorf("unknown pex message: %T", msg) } @@ -31,10 +27,6 @@ func (m *PexMessage) Unwrap() (proto.Message, error) { return msg.PexRequest, nil case *PexMessage_PexResponse: return msg.PexResponse, nil - case *PexMessage_PexRequestV2: - return msg.PexRequestV2, nil - case *PexMessage_PexResponseV2: - return msg.PexResponseV2, nil default: return nil, fmt.Errorf("unknown pex message: %T", msg) } diff --git a/proto/tendermint/p2p/pex.pb.go b/proto/tendermint/p2p/pex.pb.go index 63882c3643..15ccce15e5 100644 --- a/proto/tendermint/p2p/pex.pb.go +++ b/proto/tendermint/p2p/pex.pb.go @@ -24,9 +24,7 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type PexAddress struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - IP string `protobuf:"bytes,2,opt,name=ip,proto3" json:"ip,omitempty"` - Port uint32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"` + URL string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` } func (m *PexAddress) Reset() { *m = PexAddress{} } @@ -62,27 +60,13 @@ func (m *PexAddress) XXX_DiscardUnknown() { var xxx_messageInfo_PexAddress proto.InternalMessageInfo -func (m *PexAddress) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *PexAddress) GetIP() string { +func (m *PexAddress) GetURL() string { if m != nil { - return m.IP + return m.URL } return "" } -func (m *PexAddress) GetPort() uint32 { - if m != nil { - return m.Port - } - return 0 -} - type PexRequest struct { } @@ -163,136 +147,10 @@ func (m *PexResponse) GetAddresses() []PexAddress { return nil } -type PexAddressV2 struct { - URL string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` -} - -func (m *PexAddressV2) Reset() { *m = PexAddressV2{} } -func (m *PexAddressV2) String() string { return proto.CompactTextString(m) } -func (*PexAddressV2) ProtoMessage() {} -func (*PexAddressV2) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{3} -} -func (m *PexAddressV2) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PexAddressV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PexAddressV2.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PexAddressV2) XXX_Merge(src proto.Message) { - xxx_messageInfo_PexAddressV2.Merge(m, src) -} -func (m *PexAddressV2) XXX_Size() int { - return m.Size() -} -func (m *PexAddressV2) XXX_DiscardUnknown() { - xxx_messageInfo_PexAddressV2.DiscardUnknown(m) -} - -var xxx_messageInfo_PexAddressV2 proto.InternalMessageInfo - -func (m *PexAddressV2) GetURL() string { - if m != nil { - return m.URL - } - return "" -} - -type PexRequestV2 struct { -} - -func (m *PexRequestV2) Reset() { *m = PexRequestV2{} } -func (m *PexRequestV2) String() string { return proto.CompactTextString(m) } -func (*PexRequestV2) ProtoMessage() {} -func (*PexRequestV2) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{4} -} -func (m *PexRequestV2) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PexRequestV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PexRequestV2.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PexRequestV2) XXX_Merge(src proto.Message) { - xxx_messageInfo_PexRequestV2.Merge(m, src) -} -func (m *PexRequestV2) XXX_Size() int { - return m.Size() -} -func (m *PexRequestV2) XXX_DiscardUnknown() { - xxx_messageInfo_PexRequestV2.DiscardUnknown(m) -} - -var xxx_messageInfo_PexRequestV2 proto.InternalMessageInfo - -type PexResponseV2 struct { - Addresses []PexAddressV2 `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses"` -} - -func (m *PexResponseV2) Reset() { *m = PexResponseV2{} } -func (m *PexResponseV2) String() string { return proto.CompactTextString(m) } -func (*PexResponseV2) ProtoMessage() {} -func (*PexResponseV2) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{5} -} -func (m *PexResponseV2) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PexResponseV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PexResponseV2.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PexResponseV2) XXX_Merge(src proto.Message) { - xxx_messageInfo_PexResponseV2.Merge(m, src) -} -func (m *PexResponseV2) XXX_Size() int { - return m.Size() -} -func (m *PexResponseV2) XXX_DiscardUnknown() { - xxx_messageInfo_PexResponseV2.DiscardUnknown(m) -} - -var xxx_messageInfo_PexResponseV2 proto.InternalMessageInfo - -func (m *PexResponseV2) GetAddresses() []PexAddressV2 { - if m != nil { - return m.Addresses - } - return nil -} - type PexMessage struct { // Types that are valid to be assigned to Sum: // *PexMessage_PexRequest // *PexMessage_PexResponse - // *PexMessage_PexRequestV2 - // *PexMessage_PexResponseV2 Sum isPexMessage_Sum `protobuf_oneof:"sum"` } @@ -300,7 +158,7 @@ func (m *PexMessage) Reset() { *m = PexMessage{} } func (m *PexMessage) String() string { return proto.CompactTextString(m) } func (*PexMessage) ProtoMessage() {} func (*PexMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_81c2f011fd13be57, []int{6} + return fileDescriptor_81c2f011fd13be57, []int{3} } func (m *PexMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -336,22 +194,14 @@ type isPexMessage_Sum interface { } type PexMessage_PexRequest struct { - PexRequest *PexRequest `protobuf:"bytes,1,opt,name=pex_request,json=pexRequest,proto3,oneof" json:"pex_request,omitempty"` + PexRequest *PexRequest `protobuf:"bytes,3,opt,name=pex_request,json=pexRequest,proto3,oneof" json:"pex_request,omitempty"` } type PexMessage_PexResponse struct { - PexResponse *PexResponse `protobuf:"bytes,2,opt,name=pex_response,json=pexResponse,proto3,oneof" json:"pex_response,omitempty"` -} -type PexMessage_PexRequestV2 struct { - PexRequestV2 *PexRequestV2 `protobuf:"bytes,3,opt,name=pex_request_v2,json=pexRequestV2,proto3,oneof" json:"pex_request_v2,omitempty"` -} -type PexMessage_PexResponseV2 struct { - PexResponseV2 *PexResponseV2 `protobuf:"bytes,4,opt,name=pex_response_v2,json=pexResponseV2,proto3,oneof" json:"pex_response_v2,omitempty"` + PexResponse *PexResponse `protobuf:"bytes,4,opt,name=pex_response,json=pexResponse,proto3,oneof" json:"pex_response,omitempty"` } -func (*PexMessage_PexRequest) isPexMessage_Sum() {} -func (*PexMessage_PexResponse) isPexMessage_Sum() {} -func (*PexMessage_PexRequestV2) isPexMessage_Sum() {} -func (*PexMessage_PexResponseV2) isPexMessage_Sum() {} +func (*PexMessage_PexRequest) isPexMessage_Sum() {} +func (*PexMessage_PexResponse) isPexMessage_Sum() {} func (m *PexMessage) GetSum() isPexMessage_Sum { if m != nil { @@ -374,27 +224,11 @@ func (m *PexMessage) GetPexResponse() *PexResponse { return nil } -func (m *PexMessage) GetPexRequestV2() *PexRequestV2 { - if x, ok := m.GetSum().(*PexMessage_PexRequestV2); ok { - return x.PexRequestV2 - } - return nil -} - -func (m *PexMessage) GetPexResponseV2() *PexResponseV2 { - if x, ok := m.GetSum().(*PexMessage_PexResponseV2); ok { - return x.PexResponseV2 - } - return nil -} - // XXX_OneofWrappers is for the internal use of the proto package. func (*PexMessage) XXX_OneofWrappers() []interface{} { return []interface{}{ (*PexMessage_PexRequest)(nil), (*PexMessage_PexResponse)(nil), - (*PexMessage_PexRequestV2)(nil), - (*PexMessage_PexResponseV2)(nil), } } @@ -402,42 +236,33 @@ func init() { proto.RegisterType((*PexAddress)(nil), "tendermint.p2p.PexAddress") proto.RegisterType((*PexRequest)(nil), "tendermint.p2p.PexRequest") proto.RegisterType((*PexResponse)(nil), "tendermint.p2p.PexResponse") - proto.RegisterType((*PexAddressV2)(nil), "tendermint.p2p.PexAddressV2") - proto.RegisterType((*PexRequestV2)(nil), "tendermint.p2p.PexRequestV2") - proto.RegisterType((*PexResponseV2)(nil), "tendermint.p2p.PexResponseV2") proto.RegisterType((*PexMessage)(nil), "tendermint.p2p.PexMessage") } func init() { proto.RegisterFile("tendermint/p2p/pex.proto", fileDescriptor_81c2f011fd13be57) } var fileDescriptor_81c2f011fd13be57 = []byte{ - // 407 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xdd, 0x8a, 0xda, 0x40, - 0x14, 0xc7, 0xf3, 0x61, 0x2d, 0x9e, 0x44, 0x0b, 0x43, 0x29, 0xa9, 0x6d, 0xa3, 0xe4, 0xca, 0xde, - 0x24, 0x30, 0xa5, 0x97, 0x2d, 0x36, 0x08, 0xb5, 0x50, 0xa9, 0x1d, 0xd8, 0x5c, 0xec, 0x8d, 0xe8, - 0x66, 0xc8, 0x06, 0x56, 0x33, 0x9b, 0x49, 0x16, 0x1f, 0x63, 0xdf, 0x61, 0x5f, 0xc6, 0x4b, 0x2f, - 0xf7, 0x4a, 0x96, 0xf8, 0x22, 0x8b, 0x13, 0x31, 0x23, 0xba, 0x7b, 0x37, 0xe7, 0x7f, 0xbe, 0x7e, - 0xe7, 0xcc, 0x01, 0x2b, 0xa3, 0x8b, 0x90, 0xa6, 0xf3, 0x78, 0x91, 0x79, 0x0c, 0x33, 0x8f, 0xd1, - 0xa5, 0xcb, 0xd2, 0x24, 0x4b, 0x50, 0xab, 0xf2, 0xb8, 0x0c, 0xb3, 0xf6, 0xfb, 0x28, 0x89, 0x12, - 0xe1, 0xf2, 0x76, 0xaf, 0x32, 0xca, 0x19, 0x03, 0x8c, 0xe9, 0xf2, 0x57, 0x18, 0xa6, 0x94, 0x73, - 0xf4, 0x01, 0xb4, 0x38, 0xb4, 0xd4, 0xae, 0xda, 0x6b, 0xf8, 0xf5, 0x62, 0xd3, 0xd1, 0xfe, 0x0c, - 0x88, 0x16, 0x87, 0x42, 0x67, 0x96, 0x26, 0xe9, 0x63, 0xa2, 0xc5, 0x0c, 0x21, 0xa8, 0xb1, 0x24, - 0xcd, 0x2c, 0xbd, 0xab, 0xf6, 0x9a, 0x44, 0xbc, 0x1d, 0x53, 0x54, 0x24, 0xf4, 0x36, 0xa7, 0x3c, - 0x73, 0x46, 0x60, 0x08, 0x8b, 0xb3, 0x64, 0xc1, 0x29, 0xfa, 0x09, 0x8d, 0x69, 0xd9, 0x8b, 0x72, - 0x4b, 0xed, 0xea, 0x3d, 0x03, 0xb7, 0xdd, 0x63, 0x50, 0xb7, 0xe2, 0xf1, 0x6b, 0xab, 0x4d, 0x47, - 0x21, 0x55, 0x8a, 0xf3, 0x15, 0xcc, 0xca, 0x1d, 0x60, 0xf4, 0x11, 0xf4, 0x3c, 0xbd, 0xd9, 0x13, - 0xbf, 0x2d, 0x36, 0x1d, 0xfd, 0x82, 0xfc, 0x25, 0x3b, 0xcd, 0x69, 0x89, 0xd0, 0x3d, 0x47, 0x80, - 0x9d, 0xff, 0xd0, 0x94, 0x48, 0x02, 0x8c, 0xfa, 0xa7, 0x2c, 0x9f, 0x5f, 0x66, 0x09, 0xf0, 0x29, - 0xcd, 0x83, 0x26, 0x66, 0x1d, 0x51, 0xce, 0xa7, 0x11, 0x45, 0x3f, 0xc0, 0x60, 0x74, 0x39, 0x49, - 0xcb, 0x96, 0x02, 0xea, 0xfc, 0x78, 0x7b, 0xa8, 0xa1, 0x42, 0x80, 0x1d, 0x2c, 0xd4, 0x07, 0xb3, - 0x4c, 0x2f, 0x09, 0xc5, 0xba, 0x0d, 0xfc, 0xe9, 0x6c, 0x7e, 0x19, 0x32, 0x54, 0x88, 0xc1, 0xa4, - 0xed, 0x0e, 0xa0, 0x25, 0x01, 0x4c, 0xee, 0xb0, 0xf8, 0x98, 0xf3, 0x63, 0x1d, 0x16, 0x33, 0x54, - 0x88, 0xc9, 0x24, 0x1b, 0xfd, 0x86, 0x77, 0x32, 0xc7, 0xae, 0x4c, 0x4d, 0x94, 0xf9, 0xf2, 0x0a, - 0x8a, 0xa8, 0xd3, 0x64, 0xb2, 0xe0, 0xbf, 0x01, 0x9d, 0xe7, 0x73, 0xff, 0xdf, 0xaa, 0xb0, 0xd5, - 0x75, 0x61, 0xab, 0x4f, 0x85, 0xad, 0xde, 0x6f, 0x6d, 0x65, 0xbd, 0xb5, 0x95, 0xc7, 0xad, 0xad, - 0x5c, 0x7e, 0x8f, 0xe2, 0xec, 0x3a, 0x9f, 0xb9, 0x57, 0xc9, 0xdc, 0x93, 0xee, 0x58, 0x3e, 0x69, - 0x71, 0xaf, 0xc7, 0x37, 0x3e, 0xab, 0x0b, 0xf5, 0xdb, 0x73, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9f, - 0x9b, 0xfd, 0x75, 0xfc, 0x02, 0x00, 0x00, + // 311 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x28, 0x49, 0xcd, 0x4b, + 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x30, 0x2a, 0xd0, 0x2f, 0x48, 0xad, 0xd0, 0x2b, + 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x43, 0xc8, 0xe8, 0x15, 0x18, 0x15, 0x48, 0x89, 0xa4, 0xe7, + 0xa7, 0xe7, 0x83, 0xa5, 0xf4, 0x41, 0x2c, 0x88, 0x2a, 0x25, 0x63, 0x2e, 0xae, 0x80, 0xd4, 0x0a, + 0xc7, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x21, 0x49, 0x2e, 0xe6, 0xd2, 0xa2, 0x1c, 0x09, 0x46, + 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xf6, 0x47, 0xf7, 0xe4, 0x99, 0x43, 0x83, 0x7c, 0x82, 0x40, 0x62, + 0x5e, 0x2c, 0x1c, 0x4c, 0x02, 0xcc, 0x5e, 0x2c, 0x1c, 0xcc, 0x02, 0x2c, 0x4a, 0x3c, 0x60, 0x4d, + 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x4a, 0xbe, 0x5c, 0xdc, 0x60, 0x5e, 0x71, 0x41, 0x7e, + 0x5e, 0x71, 0xaa, 0x90, 0x1d, 0x17, 0x67, 0x22, 0xc4, 0xb8, 0xd4, 0x62, 0x09, 0x46, 0x05, 0x66, + 0x0d, 0x6e, 0x23, 0x29, 0x3d, 0x54, 0xb7, 0xe8, 0x21, 0xac, 0x74, 0x62, 0x39, 0x71, 0x4f, 0x9e, + 0x21, 0x08, 0xa1, 0x45, 0x69, 0x01, 0x23, 0xd8, 0x74, 0xdf, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, + 0x21, 0x5b, 0x2e, 0xee, 0x82, 0xd4, 0x8a, 0xf8, 0x22, 0x88, 0x65, 0x12, 0xcc, 0x0a, 0x8c, 0x38, + 0x0c, 0x84, 0x3a, 0xc7, 0x83, 0x21, 0x88, 0xab, 0x00, 0xce, 0x13, 0x72, 0xe0, 0xe2, 0x81, 0x68, + 0x87, 0xb8, 0x4e, 0x82, 0x05, 0xac, 0x5f, 0x1a, 0xab, 0x7e, 0x88, 0x12, 0x0f, 0x86, 0x20, 0xee, + 0x02, 0x04, 0xd7, 0x89, 0x95, 0x8b, 0xb9, 0xb8, 0x34, 0xd7, 0x8b, 0x85, 0x83, 0x51, 0x80, 0x09, + 0x12, 0x0a, 0x4e, 0xfe, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, + 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x65, 0x9a, + 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x8f, 0x14, 0x33, 0xc8, 0x91, 0x04, + 0x8e, 0x01, 0xd4, 0x58, 0x4b, 0x62, 0x03, 0x8b, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa7, + 0x1d, 0xdd, 0x6f, 0xce, 0x01, 0x00, 0x00, } func (m *PexAddress) Marshal() (dAtA []byte, err error) { @@ -460,22 +285,10 @@ func (m *PexAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.Port != 0 { - i = encodeVarintPex(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x18 - } - if len(m.IP) > 0 { - i -= len(m.IP) - copy(dAtA[i:], m.IP) - i = encodeVarintPex(dAtA, i, uint64(len(m.IP))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintPex(dAtA, i, uint64(len(m.ID))) + if len(m.URL) > 0 { + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintPex(dAtA, i, uint64(len(m.URL))) i-- dAtA[i] = 0xa } @@ -542,96 +355,6 @@ func (m *PexResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *PexAddressV2) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PexAddressV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexAddressV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.URL) > 0 { - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintPex(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PexRequestV2) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PexRequestV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexRequestV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *PexResponseV2) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PexResponseV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexResponseV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Addresses) > 0 { - for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *PexMessage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -681,7 +404,7 @@ func (m *PexMessage_PexRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintPex(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x1a } return len(dAtA) - i, nil } @@ -702,48 +425,6 @@ func (m *PexMessage_PexResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) i = encodeVarintPex(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *PexMessage_PexRequestV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexMessage_PexRequestV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.PexRequestV2 != nil { - { - size, err := m.PexRequestV2.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPex(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *PexMessage_PexResponseV2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PexMessage_PexResponseV2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.PexResponseV2 != nil { - { - size, err := m.PexResponseV2.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintPex(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x22 } return len(dAtA) - i, nil @@ -765,17 +446,10 @@ func (m *PexAddress) Size() (n int) { } var l int _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovPex(uint64(l)) - } - l = len(m.IP) + l = len(m.URL) if l > 0 { n += 1 + l + sovPex(uint64(l)) } - if m.Port != 0 { - n += 1 + sovPex(uint64(m.Port)) - } return n } @@ -803,100 +477,39 @@ func (m *PexResponse) Size() (n int) { return n } -func (m *PexAddressV2) Size() (n int) { +func (m *PexMessage) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.URL) - if l > 0 { - n += 1 + l + sovPex(uint64(l)) + if m.Sum != nil { + n += m.Sum.Size() } return n } -func (m *PexRequestV2) Size() (n int) { +func (m *PexMessage_PexRequest) Size() (n int) { if m == nil { return 0 } var l int _ = l + if m.PexRequest != nil { + l = m.PexRequest.Size() + n += 1 + l + sovPex(uint64(l)) + } return n } - -func (m *PexResponseV2) Size() (n int) { +func (m *PexMessage_PexResponse) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovPex(uint64(l)) - } - } - return n -} - -func (m *PexMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Sum != nil { - n += m.Sum.Size() - } - return n -} - -func (m *PexMessage_PexRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PexRequest != nil { - l = m.PexRequest.Size() - n += 1 + l + sovPex(uint64(l)) - } - return n -} -func (m *PexMessage_PexResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PexResponse != nil { - l = m.PexResponse.Size() - n += 1 + l + sovPex(uint64(l)) - } - return n -} -func (m *PexMessage_PexRequestV2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PexRequestV2 != nil { - l = m.PexRequestV2.Size() - n += 1 + l + sovPex(uint64(l)) - } - return n -} -func (m *PexMessage_PexResponseV2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PexResponseV2 != nil { - l = m.PexResponseV2.Size() - n += 1 + l + sovPex(uint64(l)) + if m.PexResponse != nil { + l = m.PexResponse.Size() + n += 1 + l + sovPex(uint64(l)) } return n } @@ -938,39 +551,7 @@ func (m *PexAddress) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -998,27 +579,8 @@ func (m *PexAddress) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.URL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipPex(dAtA[iNdEx:]) @@ -1174,222 +736,6 @@ func (m *PexResponse) Unmarshal(dAtA []byte) error { } return nil } -func (m *PexAddressV2) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PexAddressV2: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PexAddressV2: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPex(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PexRequestV2) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PexRequestV2: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PexRequestV2: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipPex(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PexResponseV2) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PexResponseV2: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PexResponseV2: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addresses = append(m.Addresses, PexAddressV2{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPex(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPex - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *PexMessage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1419,7 +765,7 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: PexMessage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PexRequest", wireType) } @@ -1454,7 +800,7 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { } m.Sum = &PexMessage_PexRequest{v} iNdEx = postIndex - case 2: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PexResponse", wireType) } @@ -1489,76 +835,6 @@ func (m *PexMessage) Unmarshal(dAtA []byte) error { } m.Sum = &PexMessage_PexResponse{v} iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PexRequestV2", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PexRequestV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Sum = &PexMessage_PexRequestV2{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PexResponseV2", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPex - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPex - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthPex - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &PexResponseV2{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Sum = &PexMessage_PexResponseV2{v} - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipPex(dAtA[iNdEx:]) diff --git a/proto/tendermint/p2p/pex.proto b/proto/tendermint/p2p/pex.proto index 1f78c98643..5457434441 100644 --- a/proto/tendermint/p2p/pex.proto +++ b/proto/tendermint/p2p/pex.proto @@ -6,9 +6,9 @@ option go_package = "github.com/tendermint/tendermint/proto/tendermint/p2p"; import "gogoproto/gogo.proto"; message PexAddress { - string id = 1 [(gogoproto.customname) = "ID"]; - string ip = 2 [(gogoproto.customname) = "IP"]; - uint32 port = 3; + string url = 1 [(gogoproto.customname) = "URL"]; + + reserved 2, 3; // See https://github.com/tendermint/spec/pull/352 } message PexRequest {} @@ -17,21 +17,10 @@ message PexResponse { repeated PexAddress addresses = 1 [(gogoproto.nullable) = false]; } -message PexAddressV2 { - string url = 1 [(gogoproto.customname) = "URL"]; -} - -message PexRequestV2 {} - -message PexResponseV2 { - repeated PexAddressV2 addresses = 1 [(gogoproto.nullable) = false]; -} - message PexMessage { + reserved 1, 2; // See https://github.com/tendermint/spec/pull/352 oneof sum { - PexRequest pex_request = 1; - PexResponse pex_response = 2; - PexRequestV2 pex_request_v2 = 3; - PexResponseV2 pex_response_v2 = 4; + PexRequest pex_request = 3; + PexResponse pex_response = 4; } } diff --git a/proto/tendermint/p2p/types.proto b/proto/tendermint/p2p/types.proto index 0000baee69..2afed8494f 100644 --- a/proto/tendermint/p2p/types.proto +++ b/proto/tendermint/p2p/types.proto @@ -38,7 +38,9 @@ message PeerInfo { message PeerAddressInfo { string address = 1; - google.protobuf.Timestamp last_dial_success = 2 [(gogoproto.stdtime) = true]; - google.protobuf.Timestamp last_dial_failure = 3 [(gogoproto.stdtime) = true]; - uint32 dial_failures = 4; + google.protobuf.Timestamp last_dial_success = 2 + [(gogoproto.stdtime) = true]; + google.protobuf.Timestamp last_dial_failure = 3 + [(gogoproto.stdtime) = true]; + uint32 dial_failures = 4; } diff --git a/proto/tendermint/privval/service.proto b/proto/tendermint/privval/service.proto index 4ceac5695a..c42eba64d8 100644 --- a/proto/tendermint/privval/service.proto +++ b/proto/tendermint/privval/service.proto @@ -1,6 +1,6 @@ syntax = "proto3"; package tendermint.privval; -option go_package = "github.com/tendermint/tendermint/proto/tendermint/privval"; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/privval"; import "tendermint/privval/types.proto"; diff --git a/proto/tendermint/rpc/grpc/types.proto b/proto/tendermint/rpc/grpc/types.proto deleted file mode 100644 index ee948a4065..0000000000 --- a/proto/tendermint/rpc/grpc/types.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; -package tendermint.rpc.grpc; -option go_package = "github.com/tendermint/tendermint/rpc/grpc;coregrpc"; - -import "tendermint/abci/types.proto"; - -//---------------------------------------- -// Request types - -message RequestPing {} - -message RequestBroadcastTx { - bytes tx = 1; -} - -//---------------------------------------- -// Response types - -message ResponsePing {} - -message ResponseBroadcastTx { - tendermint.abci.ResponseCheckTx check_tx = 1; - tendermint.abci.ResponseDeliverTx deliver_tx = 2; -} - -//---------------------------------------- -// Service Definition - -service BroadcastAPI { - rpc Ping(RequestPing) returns (ResponsePing); - rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx); -} diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index 4bb1439c32..e9c3754518 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -34,9 +34,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // of the various ABCI calls during block processing. // It is persisted to disk for each height before calling Commit. type ABCIResponses struct { - DeliverTxs []*types.ResponseDeliverTx `protobuf:"bytes,1,rep,name=deliver_txs,json=deliverTxs,proto3" json:"deliver_txs,omitempty"` - EndBlock *types.ResponseEndBlock `protobuf:"bytes,2,opt,name=end_block,json=endBlock,proto3" json:"end_block,omitempty"` - BeginBlock *types.ResponseBeginBlock `protobuf:"bytes,3,opt,name=begin_block,json=beginBlock,proto3" json:"begin_block,omitempty"` + FinalizeBlock *types.ResponseFinalizeBlock `protobuf:"bytes,2,opt,name=finalize_block,json=finalizeBlock,proto3" json:"finalize_block,omitempty"` } func (m *ABCIResponses) Reset() { *m = ABCIResponses{} } @@ -72,23 +70,9 @@ func (m *ABCIResponses) XXX_DiscardUnknown() { var xxx_messageInfo_ABCIResponses proto.InternalMessageInfo -func (m *ABCIResponses) GetDeliverTxs() []*types.ResponseDeliverTx { +func (m *ABCIResponses) GetFinalizeBlock() *types.ResponseFinalizeBlock { if m != nil { - return m.DeliverTxs - } - return nil -} - -func (m *ABCIResponses) GetEndBlock() *types.ResponseEndBlock { - if m != nil { - return m.EndBlock - } - return nil -} - -func (m *ABCIResponses) GetBeginBlock() *types.ResponseBeginBlock { - if m != nil { - return m.BeginBlock + return m.FinalizeBlock } return nil } @@ -446,60 +430,57 @@ func init() { func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) } var fileDescriptor_ccfacf933f22bf93 = []byte{ - // 843 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x96, 0xcf, 0x6f, 0xe2, 0x46, - 0x14, 0xc7, 0x71, 0xd9, 0x5d, 0x60, 0x1c, 0x60, 0x77, 0xb2, 0x07, 0x2f, 0xdb, 0x35, 0x2c, 0xdb, - 0x56, 0xa8, 0x07, 0x23, 0x6d, 0x0f, 0x55, 0x2f, 0x91, 0x62, 0x53, 0x35, 0xa8, 0x51, 0xd5, 0x3a, - 0x51, 0x0e, 0xbd, 0x58, 0x83, 0x3d, 0xd8, 0x56, 0xc1, 0xb6, 0x3c, 0x03, 0x4d, 0xff, 0x80, 0xde, - 0x73, 0xed, 0x7f, 0x94, 0x63, 0x8e, 0x55, 0x0e, 0x69, 0x45, 0xfe, 0x91, 0x6a, 0x7e, 0xd8, 0x1e, - 0x20, 0x91, 0x52, 0xed, 0x6d, 0x3c, 0xef, 0xfb, 0x3e, 0xf3, 0xf5, 0xf3, 0xbc, 0x07, 0xe0, 0x73, - 0x8a, 0x93, 0x00, 0xe7, 0xcb, 0x38, 0xa1, 0x63, 0x42, 0x11, 0xc5, 0x63, 0xfa, 0x47, 0x86, 0x89, - 0x95, 0xe5, 0x29, 0x4d, 0xe1, 0xcb, 0x2a, 0x6a, 0xf1, 0x68, 0xef, 0x75, 0x98, 0x86, 0x29, 0x0f, - 0x8e, 0xd9, 0x4a, 0xe8, 0x7a, 0x6f, 0x15, 0x0a, 0x9a, 0xf9, 0xb1, 0x0a, 0xe9, 0xa9, 0x47, 0xf0, - 0xfd, 0xad, 0xe8, 0x60, 0x2f, 0xba, 0x46, 0x8b, 0x38, 0x40, 0x34, 0xcd, 0xa5, 0xe2, 0xdd, 0x9e, - 0x22, 0x43, 0x39, 0x5a, 0x16, 0x00, 0x53, 0x09, 0xaf, 0x71, 0x4e, 0xe2, 0x34, 0xd9, 0x3a, 0xa0, - 0x1f, 0xa6, 0x69, 0xb8, 0xc0, 0x63, 0xfe, 0x34, 0x5b, 0xcd, 0xc7, 0x34, 0x5e, 0x62, 0x42, 0xd1, - 0x32, 0x13, 0x82, 0xe1, 0xad, 0x06, 0xda, 0xc7, 0xb6, 0x33, 0x75, 0x31, 0xc9, 0xd2, 0x84, 0x60, - 0x02, 0x1d, 0xa0, 0x07, 0x78, 0x11, 0xaf, 0x71, 0xee, 0xd1, 0x4b, 0x62, 0x68, 0x83, 0xfa, 0x48, - 0xff, 0x38, 0xb4, 0x94, 0x62, 0xb0, 0x97, 0xb4, 0x8a, 0x84, 0x89, 0xd0, 0x9e, 0x5f, 0xba, 0x20, - 0x28, 0x96, 0x04, 0x1e, 0x81, 0x16, 0x4e, 0x02, 0x6f, 0xb6, 0x48, 0xfd, 0xdf, 0x8c, 0xcf, 0x06, - 0xda, 0x48, 0xff, 0xf8, 0xfe, 0x51, 0xc4, 0xf7, 0x49, 0x60, 0x33, 0xa1, 0xdb, 0xc4, 0x72, 0x05, - 0x27, 0x40, 0x9f, 0xe1, 0x30, 0x4e, 0x24, 0xa1, 0xce, 0x09, 0x1f, 0x1e, 0x25, 0xd8, 0x4c, 0x2b, - 0x18, 0x60, 0x56, 0xae, 0x87, 0x7f, 0x6a, 0xa0, 0x73, 0x51, 0x14, 0x94, 0x4c, 0x93, 0x79, 0x0a, - 0x1d, 0xd0, 0x2e, 0x4b, 0xec, 0x11, 0x4c, 0x0d, 0x8d, 0xa3, 0x4d, 0x15, 0x2d, 0x0a, 0x58, 0x26, - 0x9e, 0x61, 0xea, 0x1e, 0xac, 0x95, 0x27, 0x68, 0x81, 0xc3, 0x05, 0x22, 0xd4, 0x8b, 0x70, 0x1c, - 0x46, 0xd4, 0xf3, 0x23, 0x94, 0x84, 0x38, 0xe0, 0xef, 0x59, 0x77, 0x5f, 0xb1, 0xd0, 0x09, 0x8f, - 0x38, 0x22, 0x30, 0xfc, 0x4b, 0x03, 0x87, 0x0e, 0xf3, 0x99, 0x90, 0x15, 0xf9, 0x99, 0x7f, 0x3f, - 0x6e, 0xc6, 0x05, 0x2f, 0xfd, 0x62, 0xdb, 0x13, 0xdf, 0x55, 0xfa, 0x79, 0xbf, 0xef, 0x67, 0x07, - 0x60, 0x3f, 0xbb, 0xbe, 0xeb, 0xd7, 0xdc, 0xae, 0xbf, 0xbd, 0xfd, 0xbf, 0xbd, 0x45, 0xa0, 0x71, - 0x21, 0x2e, 0x0e, 0x3c, 0x06, 0xad, 0x92, 0x26, 0x7d, 0xbc, 0x53, 0x7d, 0xc8, 0x0b, 0x56, 0x39, - 0x91, 0x1e, 0xaa, 0x2c, 0xd8, 0x03, 0x4d, 0x92, 0xce, 0xe9, 0xef, 0x28, 0xc7, 0xfc, 0xc8, 0x96, - 0x5b, 0x3e, 0x0f, 0x6f, 0x9b, 0xe0, 0xf9, 0x19, 0xeb, 0x23, 0xf8, 0x1d, 0x68, 0x48, 0x96, 0x3c, - 0xe6, 0x8d, 0xb5, 0xdb, 0x6b, 0x96, 0x34, 0x25, 0x8f, 0x28, 0xf4, 0xf0, 0x2b, 0xd0, 0xf4, 0x23, - 0x14, 0x27, 0x5e, 0x2c, 0xde, 0xa9, 0x65, 0xeb, 0x9b, 0xbb, 0x7e, 0xc3, 0x61, 0x7b, 0xd3, 0x89, - 0xdb, 0xe0, 0xc1, 0x69, 0x00, 0xbf, 0x04, 0x9d, 0x38, 0x89, 0x69, 0x8c, 0x16, 0xb2, 0x12, 0x46, - 0x87, 0x57, 0xa0, 0x2d, 0x77, 0x45, 0x11, 0xe0, 0xd7, 0x80, 0x97, 0x44, 0x5c, 0xb3, 0x42, 0x59, - 0xe7, 0xca, 0x2e, 0x0b, 0xf0, 0x7b, 0x24, 0xb5, 0x2e, 0x68, 0x2b, 0xda, 0x38, 0x30, 0x9e, 0xed, - 0x7b, 0x17, 0x9f, 0x8a, 0x67, 0x4d, 0x27, 0xf6, 0x21, 0xf3, 0xbe, 0xb9, 0xeb, 0xeb, 0xa7, 0x05, - 0x6a, 0x3a, 0x71, 0xf5, 0x92, 0x3b, 0x0d, 0x4a, 0x26, 0x7f, 0x67, 0xc6, 0x9c, 0x3f, 0xc6, 0xe4, - 0x95, 0xdb, 0x65, 0xca, 0x4d, 0xc1, 0x14, 0x0f, 0x01, 0x3c, 0x05, 0x5d, 0xc5, 0x27, 0x6b, 0x78, - 0xe3, 0x39, 0xa7, 0xf6, 0x2c, 0x31, 0x0d, 0xac, 0x62, 0x1a, 0x58, 0xe7, 0xc5, 0x34, 0xb0, 0x9b, - 0x0c, 0x7b, 0xf5, 0x4f, 0x5f, 0x73, 0xdb, 0xa5, 0x3f, 0x16, 0x85, 0x3f, 0x82, 0x0f, 0x9c, 0xe6, - 0xa7, 0x39, 0xf6, 0x44, 0xe9, 0x59, 0x0c, 0x07, 0xdb, 0x35, 0x0b, 0x06, 0xda, 0xa8, 0xed, 0x9a, - 0x4c, 0xea, 0xa4, 0x39, 0xe6, 0xdf, 0xe3, 0x94, 0xeb, 0xd4, 0x12, 0x5e, 0x80, 0xd7, 0x09, 0xbe, - 0xdc, 0x83, 0x19, 0x98, 0xfb, 0xeb, 0x3f, 0x74, 0xe9, 0x15, 0x16, 0xbf, 0x0b, 0x9a, 0xfb, 0x8a, - 0x21, 0xb6, 0x02, 0xf0, 0x07, 0xd0, 0xe5, 0xdc, 0xb2, 0x4b, 0x89, 0xf1, 0xe2, 0x49, 0x7d, 0xdd, - 0x61, 0x69, 0xd5, 0x88, 0x80, 0x47, 0x00, 0x28, 0x8c, 0xc6, 0x93, 0x18, 0x4a, 0x06, 0x33, 0xc2, - 0xab, 0xa5, 0x40, 0x9a, 0x4f, 0x33, 0xc2, 0xd2, 0x14, 0x23, 0x0e, 0x30, 0xd5, 0x36, 0xae, 0x78, - 0x65, 0x47, 0xb7, 0xf8, 0x2d, 0x7d, 0x5b, 0x75, 0x74, 0x95, 0x2d, 0x7b, 0xfb, 0xc1, 0xf9, 0x02, - 0x3e, 0x71, 0xbe, 0xfc, 0x04, 0xbe, 0xd8, 0x9a, 0x2f, 0x3b, 0xfc, 0xd2, 0x9e, 0xce, 0xed, 0x0d, - 0x94, 0x81, 0xb3, 0x0d, 0x2a, 0x3c, 0x16, 0x1d, 0x98, 0x63, 0xb2, 0x5a, 0x50, 0xe2, 0x45, 0x88, - 0x44, 0xc6, 0xc1, 0x40, 0x1b, 0x1d, 0x88, 0x0e, 0x74, 0xc5, 0xfe, 0x09, 0x22, 0x11, 0x7c, 0x03, - 0x9a, 0x28, 0xcb, 0x84, 0xa4, 0xcd, 0x25, 0x0d, 0x94, 0x65, 0x2c, 0x64, 0xff, 0x72, 0xbd, 0x31, - 0xb5, 0x9b, 0x8d, 0xa9, 0xfd, 0xbb, 0x31, 0xb5, 0xab, 0x7b, 0xb3, 0x76, 0x73, 0x6f, 0xd6, 0xfe, - 0xbe, 0x37, 0x6b, 0xbf, 0x7e, 0x1b, 0xc6, 0x34, 0x5a, 0xcd, 0x2c, 0x3f, 0x5d, 0x8e, 0xd5, 0x1f, - 0xd3, 0x6a, 0x29, 0x7e, 0xd1, 0x77, 0xff, 0x0b, 0xcc, 0x5e, 0xf0, 0xfd, 0x6f, 0xfe, 0x0b, 0x00, - 0x00, 0xff, 0xff, 0xe3, 0xad, 0x37, 0x54, 0x26, 0x08, 0x00, 0x00, + // 791 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4b, 0x6f, 0xd3, 0x4c, + 0x14, 0x8d, 0xbf, 0x3e, 0x92, 0x4c, 0xea, 0xa4, 0x75, 0xbb, 0x70, 0xd3, 0xaf, 0x4e, 0x08, 0x50, + 0x55, 0x2c, 0x1c, 0x09, 0x16, 0x88, 0x0d, 0x52, 0x93, 0x0a, 0x1a, 0x51, 0x10, 0xb8, 0xa8, 0x0b, + 0x16, 0x58, 0x13, 0x7b, 0x12, 0x5b, 0x24, 0xb6, 0xe5, 0x99, 0x94, 0xc7, 0x9e, 0x7d, 0xb7, 0xfc, + 0xa3, 0x2e, 0xbb, 0x44, 0x2c, 0x0a, 0x4a, 0xff, 0x08, 0x9a, 0x3b, 0x7e, 0x4c, 0x92, 0x22, 0x15, + 0xb1, 0xf3, 0xdc, 0x73, 0xee, 0x99, 0x33, 0x77, 0xee, 0x1d, 0xa3, 0xff, 0x19, 0x09, 0x5c, 0x12, + 0x8f, 0xfd, 0x80, 0xb5, 0x29, 0xc3, 0x8c, 0xb4, 0xd9, 0xe7, 0x88, 0x50, 0x33, 0x8a, 0x43, 0x16, + 0x6a, 0xeb, 0x39, 0x6a, 0x02, 0x5a, 0xdf, 0x1a, 0x86, 0xc3, 0x10, 0xc0, 0x36, 0xff, 0x12, 0xbc, + 0xfa, 0x8e, 0xa4, 0x82, 0xfb, 0x8e, 0x2f, 0x8b, 0xd4, 0xe5, 0x2d, 0x20, 0x3e, 0x83, 0x36, 0x17, + 0xd0, 0x33, 0x3c, 0xf2, 0x5d, 0xcc, 0xc2, 0x38, 0x61, 0xec, 0x2e, 0x30, 0x22, 0x1c, 0xe3, 0x71, + 0x2a, 0x60, 0x48, 0xf0, 0x19, 0x89, 0xa9, 0x1f, 0x06, 0x33, 0x1b, 0x34, 0x86, 0x61, 0x38, 0x1c, + 0x91, 0x36, 0xac, 0xfa, 0x93, 0x41, 0x9b, 0xf9, 0x63, 0x42, 0x19, 0x1e, 0x47, 0x82, 0xd0, 0x7a, + 0x8f, 0xd4, 0x83, 0x4e, 0xb7, 0x67, 0x11, 0x1a, 0x85, 0x01, 0x25, 0x54, 0x7b, 0x89, 0xaa, 0x03, + 0x3f, 0xc0, 0x23, 0xff, 0x0b, 0xb1, 0xfb, 0xa3, 0xd0, 0xf9, 0xa0, 0xff, 0xd7, 0x54, 0xf6, 0x2b, + 0x0f, 0xf7, 0x4c, 0xa9, 0x1c, 0xfc, 0x98, 0x66, 0x9a, 0xf3, 0x2c, 0xa1, 0x77, 0x38, 0xdb, 0x52, + 0x07, 0xf2, 0xb2, 0xf5, 0x55, 0x41, 0xd5, 0xd3, 0xf4, 0x4c, 0xb4, 0x17, 0x0c, 0x42, 0xad, 0x8b, + 0xd4, 0xec, 0x94, 0x36, 0x25, 0x4c, 0x57, 0x60, 0x03, 0x43, 0xde, 0x40, 0x9c, 0x21, 0x4b, 0x3c, + 0x21, 0xcc, 0x5a, 0x3b, 0x93, 0x56, 0x9a, 0x89, 0x36, 0x47, 0x98, 0x32, 0xdb, 0x23, 0xfe, 0xd0, + 0x63, 0xb6, 0xe3, 0xe1, 0x60, 0x48, 0x5c, 0xf0, 0xba, 0x64, 0x6d, 0x70, 0xe8, 0x08, 0x90, 0xae, + 0x00, 0x5a, 0xdf, 0x14, 0xb4, 0xd9, 0xe5, 0x6e, 0x03, 0x3a, 0xa1, 0xaf, 0xa1, 0x84, 0x60, 0xc6, + 0x42, 0xeb, 0x4e, 0x1a, 0xb6, 0x45, 0x69, 0x13, 0x3f, 0x77, 0x16, 0xfd, 0xcc, 0x09, 0x74, 0x96, + 0x2f, 0xae, 0x1a, 0x05, 0xab, 0xe6, 0xcc, 0x86, 0xff, 0xda, 0x9b, 0x87, 0x8a, 0xa7, 0xe2, 0xee, + 0xb4, 0x03, 0x54, 0xce, 0xd4, 0x12, 0x1f, 0xbb, 0xb2, 0x8f, 0xe4, 0x8e, 0x73, 0x27, 0x89, 0x87, + 0x3c, 0x4b, 0xab, 0xa3, 0x12, 0x0d, 0x07, 0xec, 0x23, 0x8e, 0x09, 0x6c, 0x59, 0xb6, 0xb2, 0x75, + 0xeb, 0x47, 0x09, 0xad, 0x9c, 0xf0, 0x56, 0xd6, 0x9e, 0xa0, 0x62, 0xa2, 0x95, 0x6c, 0xb3, 0x6d, + 0xce, 0xb7, 0xbb, 0x99, 0x98, 0x4a, 0xb6, 0x48, 0xf9, 0xda, 0x1e, 0x2a, 0x39, 0x1e, 0xf6, 0x03, + 0xdb, 0x17, 0x67, 0x2a, 0x77, 0x2a, 0xd3, 0xab, 0x46, 0xb1, 0xcb, 0x63, 0xbd, 0x43, 0xab, 0x08, + 0x60, 0xcf, 0xd5, 0xee, 0xa3, 0xaa, 0x1f, 0xf8, 0xcc, 0xc7, 0xa3, 0xa4, 0x12, 0x7a, 0x15, 0x2a, + 0xa0, 0x26, 0x51, 0x51, 0x04, 0xed, 0x01, 0x82, 0x92, 0x88, 0x66, 0x4b, 0x99, 0x4b, 0xc0, 0xac, + 0x71, 0x00, 0xfa, 0x28, 0xe1, 0x5a, 0x48, 0x95, 0xb8, 0xbe, 0xab, 0x2f, 0x2f, 0x7a, 0x17, 0x57, + 0x05, 0x59, 0xbd, 0xc3, 0xce, 0x26, 0xf7, 0x3e, 0xbd, 0x6a, 0x54, 0x8e, 0x53, 0xa9, 0xde, 0xa1, + 0x55, 0xc9, 0x74, 0x7b, 0x6e, 0xa6, 0x09, 0x67, 0xe6, 0x9a, 0x83, 0x3f, 0x69, 0x42, 0xe5, 0xe6, + 0x35, 0x93, 0xa0, 0xd0, 0x14, 0x0b, 0x57, 0x3b, 0x46, 0x35, 0xc9, 0x27, 0x9f, 0x39, 0x7d, 0x05, + 0x54, 0xeb, 0xa6, 0x18, 0x48, 0x33, 0x1d, 0x48, 0xf3, 0x6d, 0x3a, 0x90, 0x9d, 0x12, 0x97, 0x3d, + 0xff, 0xd9, 0x50, 0x2c, 0x35, 0xf3, 0xc7, 0x51, 0xed, 0x05, 0xba, 0x0b, 0x6a, 0x4e, 0x18, 0x13, + 0x5b, 0x94, 0x9e, 0x63, 0xc4, 0x9d, 0xad, 0x99, 0xdb, 0x54, 0xf6, 0x55, 0xcb, 0xe0, 0xd4, 0x6e, + 0x18, 0x13, 0xb8, 0x8f, 0x63, 0xe0, 0xc9, 0x25, 0x3c, 0x45, 0x5b, 0x01, 0xf9, 0xb4, 0x20, 0xa6, + 0x13, 0xf0, 0xd7, 0xb8, 0xa9, 0xe9, 0x25, 0x2d, 0xe8, 0x05, 0xc5, 0xda, 0xe0, 0x12, 0x33, 0x80, + 0xf6, 0x1c, 0xd5, 0x40, 0x37, 0x9b, 0x52, 0xaa, 0xaf, 0xde, 0x6a, 0xae, 0xab, 0x3c, 0x2d, 0x7f, + 0x22, 0xb4, 0xa7, 0x08, 0x49, 0x1a, 0xc5, 0x5b, 0x69, 0x48, 0x19, 0xdc, 0x08, 0x54, 0x4b, 0x12, + 0x29, 0xdd, 0xce, 0x08, 0x4f, 0x93, 0x8c, 0x74, 0x91, 0x21, 0x8f, 0x71, 0xae, 0x97, 0x4d, 0x74, + 0x19, 0xba, 0x74, 0x27, 0x9f, 0xe8, 0x3c, 0x3b, 0x99, 0xed, 0x1b, 0xdf, 0x17, 0xf4, 0x8f, 0xef, + 0xcb, 0x2b, 0x74, 0x6f, 0xe6, 0x7d, 0x99, 0xd3, 0xcf, 0xec, 0x55, 0xc0, 0x5e, 0x53, 0x7a, 0x70, + 0x66, 0x85, 0x52, 0x8f, 0xe9, 0x04, 0xc6, 0x84, 0x4e, 0x46, 0x8c, 0xda, 0x1e, 0xa6, 0x9e, 0xbe, + 0xd6, 0x54, 0xf6, 0xd7, 0xc4, 0x04, 0x5a, 0x22, 0x7e, 0x84, 0xa9, 0xa7, 0x6d, 0xa3, 0x12, 0x8e, + 0x22, 0x41, 0x51, 0x81, 0x52, 0xc4, 0x51, 0xc4, 0xa1, 0xce, 0x9b, 0x8b, 0xa9, 0xa1, 0x5c, 0x4e, + 0x0d, 0xe5, 0xd7, 0xd4, 0x50, 0xce, 0xaf, 0x8d, 0xc2, 0xe5, 0xb5, 0x51, 0xf8, 0x7e, 0x6d, 0x14, + 0xde, 0x3d, 0x1e, 0xfa, 0xcc, 0x9b, 0xf4, 0x4d, 0x27, 0x1c, 0xb7, 0xe5, 0xff, 0x59, 0xfe, 0x29, + 0x7e, 0xaa, 0xf3, 0xbf, 0xe3, 0xfe, 0x2a, 0xc4, 0x1f, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xd4, + 0xe7, 0x3a, 0x65, 0xa9, 0x07, 0x00, 0x00, } func (m *ABCIResponses) Marshal() (dAtA []byte, err error) { @@ -522,21 +503,9 @@ func (m *ABCIResponses) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.BeginBlock != nil { + if m.FinalizeBlock != nil { { - size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.EndBlock != nil { - { - size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.FinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -546,20 +515,6 @@ func (m *ABCIResponses) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if len(m.DeliverTxs) > 0 { - for iNdEx := len(m.DeliverTxs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DeliverTxs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } return len(dAtA) - i, nil } @@ -809,12 +764,12 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x32 } - n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) - if err12 != nil { - return 0, err12 + n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) + if err11 != nil { + return 0, err11 } - i -= n12 - i = encodeVarintTypes(dAtA, i, uint64(n12)) + i -= n11 + i = encodeVarintTypes(dAtA, i, uint64(n11)) i-- dAtA[i] = 0x2a { @@ -869,18 +824,8 @@ func (m *ABCIResponses) Size() (n int) { } var l int _ = l - if len(m.DeliverTxs) > 0 { - for _, e := range m.DeliverTxs { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } - if m.EndBlock != nil { - l = m.EndBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } - if m.BeginBlock != nil { - l = m.BeginBlock.Size() + if m.FinalizeBlock != nil { + l = m.FinalizeBlock.Size() n += 1 + l + sovTypes(uint64(l)) } return n @@ -1028,79 +973,9 @@ func (m *ABCIResponses) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: ABCIResponses: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTxs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeliverTxs = append(m.DeliverTxs, &types.ResponseDeliverTx{}) - if err := m.DeliverTxs[len(m.DeliverTxs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.EndBlock == nil { - m.EndBlock = &types.ResponseEndBlock{} - } - if err := m.EndBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FinalizeBlock", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1127,10 +1002,10 @@ func (m *ABCIResponses) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.BeginBlock == nil { - m.BeginBlock = &types.ResponseBeginBlock{} + if m.FinalizeBlock == nil { + m.FinalizeBlock = &types.ResponseFinalizeBlock{} } - if err := m.BeginBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.FinalizeBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/proto/tendermint/state/types.proto b/proto/tendermint/state/types.proto index 851c5b3fcc..046031eacb 100644 --- a/proto/tendermint/state/types.proto +++ b/proto/tendermint/state/types.proto @@ -15,9 +15,7 @@ import "google/protobuf/timestamp.proto"; // of the various ABCI calls during block processing. // It is persisted to disk for each height before calling Commit. message ABCIResponses { - repeated tendermint.abci.ResponseDeliverTx deliver_txs = 1; - tendermint.abci.ResponseEndBlock end_block = 2; - tendermint.abci.ResponseBeginBlock begin_block = 3; + tendermint.abci.ResponseFinalizeBlock finalize_block = 2; } // ValidatorsInfo represents the latest validator set, or the last height it changed diff --git a/proto/tendermint/statesync/message_test.go b/proto/tendermint/statesync/message_test.go index 13bd633af9..f04c46e86a 100644 --- a/proto/tendermint/statesync/message_test.go +++ b/proto/tendermint/statesync/message_test.go @@ -3,13 +3,14 @@ package statesync_test import ( "encoding/hex" "testing" + "time" "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/bls12381" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) func TestValidateMsg(t *testing.T) { @@ -186,21 +187,42 @@ func TestStateSyncVectors(t *testing.T) { { "ParamsResponse", &ssproto.ParamsResponse{ - Height: 9001, - ConsensusParams: types.DefaultConsensusParams().ToProto(), + Height: 9001, + ConsensusParams: tmproto.ConsensusParams{ + Block: &tmproto.BlockParams{ + MaxBytes: 10, + MaxGas: 20, + }, + Evidence: &tmproto.EvidenceParams{ + MaxAgeNumBlocks: 10, + MaxAgeDuration: 300, + MaxBytes: 100, + }, + Validator: &tmproto.ValidatorParams{ + PubKeyTypes: []string{bls12381.KeyType}, + }, + Synchrony: &tmproto.SynchronyParams{ + MessageDelay: durationPtr(550), + Precision: durationPtr(90), + }, + }, }, - "423508a94612300a10088080c00a10ffffffffffffffffff01120e08a08d0612040880c60a188080401a0a0a08626c7331323338312200", + "422d08a94612280a04080a10141209080a120310ac0218641a0a0a08626c7331323338312a090a0310a6041202105a", }, } for _, tc := range testCases { - tc := tc - - msg := new(ssproto.Message) - require.NoError(t, msg.Wrap(tc.msg)) + t.Run(tc.testName, func(t *testing.T) { + msg := new(ssproto.Message) + require.NoError(t, msg.Wrap(tc.msg)) - bz, err := msg.Marshal() - require.NoError(t, err) - require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) + bz, err := msg.Marshal() + require.NoError(t, err) + require.Equal(t, tc.expBytes, hex.EncodeToString(bz)) + }) } } + +func durationPtr(t time.Duration) *time.Duration { + return &t +} diff --git a/proto/tendermint/statesync/types.proto b/proto/tendermint/statesync/types.proto index fcfd05f687..94e22e8340 100644 --- a/proto/tendermint/statesync/types.proto +++ b/proto/tendermint/statesync/types.proto @@ -1,12 +1,12 @@ syntax = "proto3"; package tendermint.statesync; +option go_package = "github.com/tendermint/tendermint/proto/tendermint/statesync"; + import "gogoproto/gogo.proto"; import "tendermint/types/types.proto"; import "tendermint/types/params.proto"; -option go_package = "github.com/tendermint/tendermint/proto/tendermint/statesync"; - message Message { oneof sum { SnapshotsRequest snapshots_request = 1; @@ -58,5 +58,6 @@ message ParamsRequest { message ParamsResponse { uint64 height = 1; - tendermint.types.ConsensusParams consensus_params = 2 [(gogoproto.nullable) = false]; -} \ No newline at end of file + tendermint.types.ConsensusParams consensus_params = 2 + [(gogoproto.nullable) = false]; +} diff --git a/proto/tendermint/types/canonical.pb.go b/proto/tendermint/types/canonical.pb.go index 0bdbe94864..ef66fdc351 100644 --- a/proto/tendermint/types/canonical.pb.go +++ b/proto/tendermint/types/canonical.pb.go @@ -300,48 +300,121 @@ func (m *CanonicalVote) GetChainID() string { return "" } +// CanonicalVoteExtension provides us a way to serialize a vote extension from +// a particular validator such that we can sign over those serialized bytes. +type CanonicalVoteExtension struct { + Extension []byte `protobuf:"bytes,1,opt,name=extension,proto3" json:"extension,omitempty"` + Height int64 `protobuf:"fixed64,2,opt,name=height,proto3" json:"height,omitempty"` + Round int64 `protobuf:"fixed64,3,opt,name=round,proto3" json:"round,omitempty"` + ChainId string `protobuf:"bytes,4,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *CanonicalVoteExtension) Reset() { *m = CanonicalVoteExtension{} } +func (m *CanonicalVoteExtension) String() string { return proto.CompactTextString(m) } +func (*CanonicalVoteExtension) ProtoMessage() {} +func (*CanonicalVoteExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_8d1a1a84ff7267ed, []int{4} +} +func (m *CanonicalVoteExtension) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CanonicalVoteExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CanonicalVoteExtension.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CanonicalVoteExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_CanonicalVoteExtension.Merge(m, src) +} +func (m *CanonicalVoteExtension) XXX_Size() int { + return m.Size() +} +func (m *CanonicalVoteExtension) XXX_DiscardUnknown() { + xxx_messageInfo_CanonicalVoteExtension.DiscardUnknown(m) +} + +var xxx_messageInfo_CanonicalVoteExtension proto.InternalMessageInfo + +func (m *CanonicalVoteExtension) GetExtension() []byte { + if m != nil { + return m.Extension + } + return nil +} + +func (m *CanonicalVoteExtension) GetHeight() int64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *CanonicalVoteExtension) GetRound() int64 { + if m != nil { + return m.Round + } + return 0 +} + +func (m *CanonicalVoteExtension) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + func init() { proto.RegisterType((*CanonicalBlockID)(nil), "tendermint.types.CanonicalBlockID") proto.RegisterType((*CanonicalPartSetHeader)(nil), "tendermint.types.CanonicalPartSetHeader") proto.RegisterType((*CanonicalProposal)(nil), "tendermint.types.CanonicalProposal") proto.RegisterType((*CanonicalVote)(nil), "tendermint.types.CanonicalVote") + proto.RegisterType((*CanonicalVoteExtension)(nil), "tendermint.types.CanonicalVoteExtension") } func init() { proto.RegisterFile("tendermint/types/canonical.proto", fileDescriptor_8d1a1a84ff7267ed) } var fileDescriptor_8d1a1a84ff7267ed = []byte{ - // 485 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x53, 0xbf, 0x6f, 0xd3, 0x40, - 0x14, 0xce, 0xa5, 0x6e, 0xe2, 0x5c, 0x1b, 0x08, 0xa7, 0xaa, 0xb2, 0x22, 0x64, 0x5b, 0x1e, 0x90, - 0x59, 0x6c, 0xa9, 0x1d, 0xd8, 0x5d, 0x06, 0x82, 0x40, 0x14, 0xb7, 0xea, 0xc0, 0x12, 0x5d, 0xec, - 0xc3, 0xb6, 0x70, 0x7c, 0x27, 0xfb, 0x32, 0x74, 0xe1, 0x6f, 0xe8, 0x9f, 0xd5, 0xb1, 0x23, 0x2c, - 0x01, 0x39, 0x12, 0x7f, 0x07, 0xba, 0xe7, 0x24, 0x8e, 0x5a, 0x60, 0x41, 0xea, 0x62, 0xbd, 0x1f, - 0x9f, 0xbf, 0xf7, 0xdd, 0xf7, 0xf4, 0xb0, 0x2d, 0x59, 0x11, 0xb3, 0x72, 0x9e, 0x15, 0xd2, 0x97, - 0xd7, 0x82, 0x55, 0x7e, 0x44, 0x0b, 0x5e, 0x64, 0x11, 0xcd, 0x3d, 0x51, 0x72, 0xc9, 0xc9, 0xa8, - 0x45, 0x78, 0x80, 0x18, 0x1f, 0x25, 0x3c, 0xe1, 0xd0, 0xf4, 0x55, 0xd4, 0xe0, 0xc6, 0xcf, 0x1f, - 0x30, 0xc1, 0x77, 0xdd, 0xb5, 0x12, 0xce, 0x93, 0x9c, 0xf9, 0x90, 0xcd, 0x16, 0x9f, 0x7d, 0x99, - 0xcd, 0x59, 0x25, 0xe9, 0x5c, 0x34, 0x00, 0xe7, 0x2b, 0x1e, 0x9d, 0x6d, 0x26, 0x07, 0x39, 0x8f, - 0xbe, 0x4c, 0x5e, 0x13, 0x82, 0xb5, 0x94, 0x56, 0xa9, 0x81, 0x6c, 0xe4, 0x1e, 0x86, 0x10, 0x93, - 0x2b, 0xfc, 0x54, 0xd0, 0x52, 0x4e, 0x2b, 0x26, 0xa7, 0x29, 0xa3, 0x31, 0x2b, 0x8d, 0xae, 0x8d, - 0xdc, 0x83, 0x13, 0xd7, 0xbb, 0x2f, 0xd4, 0xdb, 0x12, 0x9e, 0xd3, 0x52, 0x5e, 0x30, 0xf9, 0x06, - 0xf0, 0x81, 0x76, 0xbb, 0xb4, 0x3a, 0xe1, 0x50, 0xec, 0x16, 0x9d, 0x00, 0x1f, 0xff, 0x19, 0x4e, - 0x8e, 0xf0, 0xbe, 0xe4, 0x92, 0xe6, 0x20, 0x63, 0x18, 0x36, 0xc9, 0x56, 0x5b, 0xb7, 0xd5, 0xe6, - 0x7c, 0xef, 0xe2, 0x67, 0x2d, 0x49, 0xc9, 0x05, 0xaf, 0x68, 0x4e, 0x4e, 0xb1, 0xa6, 0xe4, 0xc0, - 0xef, 0x4f, 0x4e, 0xac, 0x87, 0x32, 0x2f, 0xb2, 0xa4, 0x60, 0xf1, 0xfb, 0x2a, 0xb9, 0xbc, 0x16, - 0x2c, 0x04, 0x30, 0x39, 0xc6, 0xbd, 0x94, 0x65, 0x49, 0x2a, 0x61, 0xc0, 0x28, 0x5c, 0x67, 0x4a, - 0x4c, 0xc9, 0x17, 0x45, 0x6c, 0xec, 0x41, 0xb9, 0x49, 0xc8, 0x4b, 0x3c, 0x10, 0x3c, 0x9f, 0x36, - 0x1d, 0xcd, 0x46, 0xee, 0x5e, 0x70, 0x58, 0x2f, 0x2d, 0xfd, 0xfc, 0xc3, 0xbb, 0x50, 0xd5, 0x42, - 0x5d, 0xf0, 0x1c, 0x22, 0xf2, 0x16, 0xeb, 0x33, 0x65, 0xef, 0x34, 0x8b, 0x8d, 0x7d, 0x30, 0xce, - 0xf9, 0x87, 0x71, 0xeb, 0x4d, 0x04, 0x07, 0xf5, 0xd2, 0xea, 0xaf, 0x93, 0xb0, 0x0f, 0x04, 0x93, - 0x98, 0x04, 0x78, 0xb0, 0x5d, 0xa3, 0xd1, 0x03, 0xb2, 0xb1, 0xd7, 0x2c, 0xda, 0xdb, 0x2c, 0xda, - 0xbb, 0xdc, 0x20, 0x02, 0x5d, 0xf9, 0x7e, 0xf3, 0xc3, 0x42, 0x61, 0xfb, 0x1b, 0x79, 0x81, 0xf5, - 0x28, 0xa5, 0x59, 0xa1, 0xf4, 0xf4, 0x6d, 0xe4, 0x0e, 0x9a, 0x59, 0x67, 0xaa, 0xa6, 0x66, 0x41, - 0x73, 0x12, 0x3b, 0xbf, 0x10, 0x1e, 0x6e, 0x65, 0x5d, 0x71, 0xc9, 0x1e, 0xc3, 0xd7, 0x5d, 0xb3, - 0xb4, 0xff, 0x34, 0x6b, 0xf7, 0xa1, 0xbd, 0xbf, 0x3f, 0x34, 0xf8, 0x78, 0x5b, 0x9b, 0xe8, 0xae, - 0x36, 0xd1, 0xcf, 0xda, 0x44, 0x37, 0x2b, 0xb3, 0x73, 0xb7, 0x32, 0x3b, 0xdf, 0x56, 0x66, 0xe7, - 0xd3, 0xab, 0x24, 0x93, 0xe9, 0x62, 0xe6, 0x45, 0x7c, 0xee, 0xef, 0x1e, 0x5b, 0x1b, 0x36, 0x47, - 0x79, 0xff, 0x10, 0x67, 0x3d, 0xa8, 0x9f, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x80, 0x68, 0xba, - 0x81, 0xed, 0x03, 0x00, 0x00, + // 520 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0x4f, 0x6b, 0x9c, 0x40, + 0x14, 0xdf, 0xd9, 0x98, 0x5d, 0x9d, 0x64, 0xdb, 0xed, 0x10, 0x82, 0x5d, 0x82, 0x8a, 0x87, 0x62, + 0x2f, 0x0a, 0xc9, 0xa1, 0x77, 0xd3, 0x42, 0xb7, 0xb4, 0x34, 0x35, 0x21, 0x87, 0x5e, 0x96, 0x59, + 0x9d, 0xaa, 0xd4, 0x75, 0x44, 0x67, 0xa1, 0xb9, 0xb4, 0x5f, 0x21, 0x1f, 0x2b, 0xc7, 0x1c, 0xdb, + 0xcb, 0xb6, 0xb8, 0xd0, 0xcf, 0x51, 0x66, 0x74, 0x55, 0x92, 0xb2, 0x50, 0x0a, 0xb9, 0xc8, 0xfb, + 0xf3, 0x9b, 0xf7, 0x7e, 0xef, 0xf7, 0x9c, 0x81, 0x06, 0x23, 0x69, 0x40, 0xf2, 0x45, 0x9c, 0x32, + 0x87, 0x5d, 0x65, 0xa4, 0x70, 0x7c, 0x9c, 0xd2, 0x34, 0xf6, 0x71, 0x62, 0x67, 0x39, 0x65, 0x14, + 0x8d, 0x5b, 0x84, 0x2d, 0x10, 0x93, 0x83, 0x90, 0x86, 0x54, 0x24, 0x1d, 0x6e, 0x55, 0xb8, 0xc9, + 0xd1, 0xbd, 0x4a, 0xe2, 0x5b, 0x67, 0xf5, 0x90, 0xd2, 0x30, 0x21, 0x8e, 0xf0, 0xe6, 0xcb, 0x4f, + 0x0e, 0x8b, 0x17, 0xa4, 0x60, 0x78, 0x91, 0x55, 0x00, 0xf3, 0x2b, 0x1c, 0x9f, 0x6e, 0x3a, 0xbb, + 0x09, 0xf5, 0x3f, 0x4f, 0x5f, 0x22, 0x04, 0xa5, 0x08, 0x17, 0x91, 0x0a, 0x0c, 0x60, 0xed, 0x7b, + 0xc2, 0x46, 0x97, 0xf0, 0x71, 0x86, 0x73, 0x36, 0x2b, 0x08, 0x9b, 0x45, 0x04, 0x07, 0x24, 0x57, + 0xfb, 0x06, 0xb0, 0xf6, 0x8e, 0x2d, 0xfb, 0x2e, 0x51, 0xbb, 0x29, 0x78, 0x86, 0x73, 0x76, 0x4e, + 0xd8, 0x6b, 0x81, 0x77, 0xa5, 0x9b, 0x95, 0xde, 0xf3, 0x46, 0x59, 0x37, 0x68, 0xba, 0xf0, 0xf0, + 0xef, 0x70, 0x74, 0x00, 0x77, 0x19, 0x65, 0x38, 0x11, 0x34, 0x46, 0x5e, 0xe5, 0x34, 0xdc, 0xfa, + 0x2d, 0x37, 0xf3, 0x47, 0x1f, 0x3e, 0x69, 0x8b, 0xe4, 0x34, 0xa3, 0x05, 0x4e, 0xd0, 0x09, 0x94, + 0x38, 0x1d, 0x71, 0xfc, 0xd1, 0xb1, 0x7e, 0x9f, 0xe6, 0x79, 0x1c, 0xa6, 0x24, 0x78, 0x57, 0x84, + 0x17, 0x57, 0x19, 0xf1, 0x04, 0x18, 0x1d, 0xc2, 0x41, 0x44, 0xe2, 0x30, 0x62, 0xa2, 0xc1, 0xd8, + 0xab, 0x3d, 0x4e, 0x26, 0xa7, 0xcb, 0x34, 0x50, 0x77, 0x44, 0xb8, 0x72, 0xd0, 0x73, 0xa8, 0x64, + 0x34, 0x99, 0x55, 0x19, 0xc9, 0x00, 0xd6, 0x8e, 0xbb, 0x5f, 0xae, 0x74, 0xf9, 0xec, 0xfd, 0x5b, + 0x8f, 0xc7, 0x3c, 0x39, 0xa3, 0x89, 0xb0, 0xd0, 0x1b, 0x28, 0xcf, 0xb9, 0xbc, 0xb3, 0x38, 0x50, + 0x77, 0x85, 0x70, 0xe6, 0x16, 0xe1, 0xea, 0x4d, 0xb8, 0x7b, 0xe5, 0x4a, 0x1f, 0xd6, 0x8e, 0x37, + 0x14, 0x05, 0xa6, 0x01, 0x72, 0xa1, 0xd2, 0xac, 0x51, 0x1d, 0x88, 0x62, 0x13, 0xbb, 0x5a, 0xb4, + 0xbd, 0x59, 0xb4, 0x7d, 0xb1, 0x41, 0xb8, 0x32, 0xd7, 0xfd, 0xfa, 0xa7, 0x0e, 0xbc, 0xf6, 0x18, + 0x7a, 0x06, 0x65, 0x3f, 0xc2, 0x71, 0xca, 0xf9, 0x0c, 0x0d, 0x60, 0x29, 0x55, 0xaf, 0x53, 0x1e, + 0xe3, 0xbd, 0x44, 0x72, 0x1a, 0x98, 0xbf, 0x01, 0x1c, 0x35, 0xb4, 0x2e, 0x29, 0x23, 0x0f, 0xa1, + 0x6b, 0x57, 0x2c, 0xe9, 0x3f, 0xc5, 0xea, 0x0e, 0x3a, 0xd8, 0x32, 0xe8, 0xb7, 0xce, 0x8f, 0xc8, + 0xe7, 0x7c, 0xf5, 0x85, 0x91, 0xb4, 0x88, 0x69, 0x8a, 0x8e, 0xa0, 0x42, 0x36, 0x4e, 0x7d, 0x27, + 0xda, 0xc0, 0x3f, 0x4e, 0xf6, 0xb4, 0xc3, 0x86, 0x4f, 0xa6, 0x34, 0x04, 0xdc, 0x0f, 0x37, 0xa5, + 0x06, 0x6e, 0x4b, 0x0d, 0xfc, 0x2a, 0x35, 0x70, 0xbd, 0xd6, 0x7a, 0xb7, 0x6b, 0xad, 0xf7, 0x7d, + 0xad, 0xf5, 0x3e, 0xbe, 0x08, 0x63, 0x16, 0x2d, 0xe7, 0xb6, 0x4f, 0x17, 0x4e, 0xf7, 0xb6, 0xb7, + 0x66, 0xf5, 0x2a, 0xdc, 0x7d, 0x09, 0xe6, 0x03, 0x11, 0x3f, 0xf9, 0x13, 0x00, 0x00, 0xff, 0xff, + 0x2a, 0x19, 0x0f, 0x11, 0x6e, 0x04, 0x00, 0x00, } func (m *CanonicalBlockID) Marshal() (dAtA []byte, err error) { @@ -550,6 +623,55 @@ func (m *CanonicalVote) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *CanonicalVoteExtension) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CanonicalVoteExtension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CanonicalVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintCanonical(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0x22 + } + if m.Round != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Round)) + i-- + dAtA[i] = 0x19 + } + if m.Height != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Height)) + i-- + dAtA[i] = 0x11 + } + if len(m.Extension) > 0 { + i -= len(m.Extension) + copy(dAtA[i:], m.Extension) + i = encodeVarintCanonical(dAtA, i, uint64(len(m.Extension))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintCanonical(dAtA []byte, offset int, v uint64) int { offset -= sovCanonical(v) base := offset @@ -649,6 +771,29 @@ func (m *CanonicalVote) Size() (n int) { return n } +func (m *CanonicalVoteExtension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Extension) + if l > 0 { + n += 1 + l + sovCanonical(uint64(l)) + } + if m.Height != 0 { + n += 9 + } + if m.Round != 0 { + n += 9 + } + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovCanonical(uint64(l)) + } + return n +} + func sovCanonical(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1241,6 +1386,142 @@ func (m *CanonicalVote) Unmarshal(dAtA []byte) error { } return nil } +func (m *CanonicalVoteExtension) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CanonicalVoteExtension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CanonicalVoteExtension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Extension = append(m.Extension[:0], dAtA[iNdEx:postIndex]...) + if m.Extension == nil { + m.Extension = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Height = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) + } + m.Round = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Round = int64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCanonical + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCanonical + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCanonical + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCanonical(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCanonical + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipCanonical(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/proto/tendermint/types/canonical.proto b/proto/tendermint/types/canonical.proto index ebbc5d8a0d..930cda496b 100644 --- a/proto/tendermint/types/canonical.proto +++ b/proto/tendermint/types/canonical.proto @@ -34,3 +34,12 @@ message CanonicalVote { CanonicalBlockID block_id = 4 [(gogoproto.customname) = "BlockID"]; string chain_id = 6 [(gogoproto.customname) = "ChainID"]; } + +// CanonicalVoteExtension provides us a way to serialize a vote extension from +// a particular validator such that we can sign over those serialized bytes. +message CanonicalVoteExtension { + bytes extension = 1; + sfixed64 height = 2; + sfixed64 round = 3; + string chain_id = 4; +} diff --git a/proto/tendermint/types/evidence.pb.go b/proto/tendermint/types/evidence.pb.go index 753d62a3ce..7de47caa19 100644 --- a/proto/tendermint/types/evidence.pb.go +++ b/proto/tendermint/types/evidence.pb.go @@ -99,7 +99,8 @@ func (*Evidence) XXX_OneofWrappers() []interface{} { } } -// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. +// DuplicateVoteEvidence contains evidence of a validator signed two conflicting +// votes. type DuplicateVoteEvidence struct { VoteA *Vote `protobuf:"bytes,1,opt,name=vote_a,json=voteA,proto3" json:"vote_a,omitempty"` VoteB *Vote `protobuf:"bytes,2,opt,name=vote_b,json=voteB,proto3" json:"vote_b,omitempty"` diff --git a/proto/tendermint/types/evidence.proto b/proto/tendermint/types/evidence.proto index f7dca3f9ad..b09ae65715 100644 --- a/proto/tendermint/types/evidence.proto +++ b/proto/tendermint/types/evidence.proto @@ -6,7 +6,6 @@ option go_package = "github.com/tendermint/tendermint/proto/tendermint/types"; import "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "tendermint/types/types.proto"; -import "tendermint/types/validator.proto"; message Evidence { oneof sum { @@ -14,13 +13,15 @@ message Evidence { } } -// DuplicateVoteEvidence contains evidence of a validator signed two conflicting votes. +// DuplicateVoteEvidence contains evidence of a validator signed two conflicting +// votes. message DuplicateVoteEvidence { tendermint.types.Vote vote_a = 1; tendermint.types.Vote vote_b = 2; int64 total_voting_power = 3; int64 validator_power = 4; - google.protobuf.Timestamp timestamp = 5 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + google.protobuf.Timestamp timestamp = 5 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; } message EvidenceList { diff --git a/proto/tendermint/types/params.pb.go b/proto/tendermint/types/params.pb.go index 5a9f103a9c..41d417b915 100644 --- a/proto/tendermint/types/params.pb.go +++ b/proto/tendermint/types/params.pb.go @@ -34,6 +34,8 @@ type ConsensusParams struct { Evidence *EvidenceParams `protobuf:"bytes,2,opt,name=evidence,proto3" json:"evidence,omitempty"` Validator *ValidatorParams `protobuf:"bytes,3,opt,name=validator,proto3" json:"validator,omitempty"` Version *VersionParams `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + Synchrony *SynchronyParams `protobuf:"bytes,5,opt,name=synchrony,proto3" json:"synchrony,omitempty"` + Timeout *TimeoutParams `protobuf:"bytes,6,opt,name=timeout,proto3" json:"timeout,omitempty"` } func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } @@ -97,6 +99,20 @@ func (m *ConsensusParams) GetVersion() *VersionParams { return nil } +func (m *ConsensusParams) GetSynchrony() *SynchronyParams { + if m != nil { + return m.Synchrony + } + return nil +} + +func (m *ConsensusParams) GetTimeout() *TimeoutParams { + if m != nil { + return m.Timeout + } + return nil +} + // BlockParams contains limits on the block size. type BlockParams struct { // Max block size, in bytes. @@ -167,8 +183,8 @@ type EvidenceParams struct { // mechanism for handling [Nothing-At-Stake // attacks](https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). MaxAgeDuration time.Duration `protobuf:"bytes,2,opt,name=max_age_duration,json=maxAgeDuration,proto3,stdduration" json:"max_age_duration"` - // This sets the maximum size of total evidence in bytes that can be committed in a single block. - // and should fall comfortably under the max block bytes. + // This sets the maximum size of total evidence in bytes that can be committed + // in a single block. and should fall comfortably under the max block bytes. // Default is 1048576 or 1MB MaxBytes int64 `protobuf:"varint,3,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` } @@ -373,6 +389,183 @@ func (m *HashedParams) GetBlockMaxGas() int64 { return 0 } +// SynchronyParams configure the bounds under which a proposed block's timestamp is considered valid. +// These parameters are part of the proposer-based timestamps algorithm. For more information, +// see the specification of proposer-based timestamps: +// https://github.com/tendermint/tendermint/tree/master/spec/consensus/proposer-based-timestamp +type SynchronyParams struct { + // message_delay bounds how long a proposal message may take to reach all validators on a network + // and still be considered valid. + MessageDelay *time.Duration `protobuf:"bytes,1,opt,name=message_delay,json=messageDelay,proto3,stdduration" json:"message_delay,omitempty"` + // precision bounds how skewed a proposer's clock may be from any validator + // on the network while still producing valid proposals. + Precision *time.Duration `protobuf:"bytes,2,opt,name=precision,proto3,stdduration" json:"precision,omitempty"` +} + +func (m *SynchronyParams) Reset() { *m = SynchronyParams{} } +func (m *SynchronyParams) String() string { return proto.CompactTextString(m) } +func (*SynchronyParams) ProtoMessage() {} +func (*SynchronyParams) Descriptor() ([]byte, []int) { + return fileDescriptor_e12598271a686f57, []int{6} +} +func (m *SynchronyParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SynchronyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SynchronyParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SynchronyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_SynchronyParams.Merge(m, src) +} +func (m *SynchronyParams) XXX_Size() int { + return m.Size() +} +func (m *SynchronyParams) XXX_DiscardUnknown() { + xxx_messageInfo_SynchronyParams.DiscardUnknown(m) +} + +var xxx_messageInfo_SynchronyParams proto.InternalMessageInfo + +func (m *SynchronyParams) GetMessageDelay() *time.Duration { + if m != nil { + return m.MessageDelay + } + return nil +} + +func (m *SynchronyParams) GetPrecision() *time.Duration { + if m != nil { + return m.Precision + } + return nil +} + +// TimeoutParams configure the timeouts for the steps of the Tendermint consensus algorithm. +type TimeoutParams struct { + // These fields configure the timeouts for the propose step of the Tendermint + // consensus algorithm: propose is the initial timeout and propose_delta + // determines how much the timeout grows in subsequent rounds. + // For the first round, this propose timeout is used and for every subsequent + // round, the timeout grows by propose_delta. + // + // For example: + // With propose = 10ms, propose_delta = 5ms, the first round's propose phase + // timeout would be 10ms, the second round's would be 15ms, the third 20ms and so on. + // + // If a node waiting for a proposal message does not receive one matching its + // current height and round before this timeout, the node will issue a + // nil prevote for the round and advance to the next step. + Propose *time.Duration `protobuf:"bytes,1,opt,name=propose,proto3,stdduration" json:"propose,omitempty"` + ProposeDelta *time.Duration `protobuf:"bytes,2,opt,name=propose_delta,json=proposeDelta,proto3,stdduration" json:"propose_delta,omitempty"` + // vote along with vote_delta configure the timeout for both of the prevote and + // precommit steps of the Tendermint consensus algorithm. + // + // These parameters influence the vote step timeouts in the the same way that + // the propose and propose_delta parameters do to the proposal step. + // + // The vote timeout does not begin until a quorum of votes has been received. Once + // a quorum of votes has been seen and this timeout elapses, Tendermint will + // procced to the next step of the consensus algorithm. If Tendermint receives + // all of the remaining votes before the end of the timeout, it will proceed + // to the next step immediately. + Vote *time.Duration `protobuf:"bytes,3,opt,name=vote,proto3,stdduration" json:"vote,omitempty"` + VoteDelta *time.Duration `protobuf:"bytes,4,opt,name=vote_delta,json=voteDelta,proto3,stdduration" json:"vote_delta,omitempty"` + // commit configures how long Tendermint will wait after receiving a quorum of + // precommits before beginning consensus for the next height. This can be + // used to allow slow precommits to arrive for inclusion in the next height before progressing. + Commit *time.Duration `protobuf:"bytes,5,opt,name=commit,proto3,stdduration" json:"commit,omitempty"` + // bypass_commit_timeout configures the node to proceed immediately to + // the next height once the node has received all precommits for a block, forgoing + // the remaining commit timeout. + // Setting bypass_commit_timeout false (the default) causes Tendermint to wait + // for the full commit timeout. + BypassCommitTimeout bool `protobuf:"varint,6,opt,name=bypass_commit_timeout,json=bypassCommitTimeout,proto3" json:"bypass_commit_timeout,omitempty"` +} + +func (m *TimeoutParams) Reset() { *m = TimeoutParams{} } +func (m *TimeoutParams) String() string { return proto.CompactTextString(m) } +func (*TimeoutParams) ProtoMessage() {} +func (*TimeoutParams) Descriptor() ([]byte, []int) { + return fileDescriptor_e12598271a686f57, []int{7} +} +func (m *TimeoutParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TimeoutParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TimeoutParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TimeoutParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeoutParams.Merge(m, src) +} +func (m *TimeoutParams) XXX_Size() int { + return m.Size() +} +func (m *TimeoutParams) XXX_DiscardUnknown() { + xxx_messageInfo_TimeoutParams.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeoutParams proto.InternalMessageInfo + +func (m *TimeoutParams) GetPropose() *time.Duration { + if m != nil { + return m.Propose + } + return nil +} + +func (m *TimeoutParams) GetProposeDelta() *time.Duration { + if m != nil { + return m.ProposeDelta + } + return nil +} + +func (m *TimeoutParams) GetVote() *time.Duration { + if m != nil { + return m.Vote + } + return nil +} + +func (m *TimeoutParams) GetVoteDelta() *time.Duration { + if m != nil { + return m.VoteDelta + } + return nil +} + +func (m *TimeoutParams) GetCommit() *time.Duration { + if m != nil { + return m.Commit + } + return nil +} + +func (m *TimeoutParams) GetBypassCommitTimeout() bool { + if m != nil { + return m.BypassCommitTimeout + } + return false +} + func init() { proto.RegisterType((*ConsensusParams)(nil), "tendermint.types.ConsensusParams") proto.RegisterType((*BlockParams)(nil), "tendermint.types.BlockParams") @@ -380,44 +573,57 @@ func init() { proto.RegisterType((*ValidatorParams)(nil), "tendermint.types.ValidatorParams") proto.RegisterType((*VersionParams)(nil), "tendermint.types.VersionParams") proto.RegisterType((*HashedParams)(nil), "tendermint.types.HashedParams") + proto.RegisterType((*SynchronyParams)(nil), "tendermint.types.SynchronyParams") + proto.RegisterType((*TimeoutParams)(nil), "tendermint.types.TimeoutParams") } func init() { proto.RegisterFile("tendermint/types/params.proto", fileDescriptor_e12598271a686f57) } var fileDescriptor_e12598271a686f57 = []byte{ - // 498 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x93, 0xc1, 0x6a, 0xd4, 0x40, - 0x1c, 0xc6, 0x77, 0x9a, 0xda, 0xee, 0xfe, 0xe3, 0x76, 0xcb, 0x20, 0x18, 0x2b, 0xcd, 0xae, 0x39, - 0x48, 0x41, 0x48, 0xc4, 0x22, 0x22, 0x08, 0xe2, 0x56, 0xa9, 0x20, 0x15, 0x09, 0xea, 0xa1, 0x97, - 0x30, 0xd9, 0x8c, 0x69, 0xe8, 0x4e, 0x66, 0xc8, 0x24, 0xcb, 0xee, 0xcd, 0x47, 0xf0, 0xe8, 0x23, - 0xe8, 0x9b, 0xf4, 0xd8, 0xa3, 0x27, 0x95, 0xdd, 0x17, 0x91, 0x4c, 0x32, 0xa6, 0x9b, 0xf6, 0x36, - 0x33, 0xdf, 0xef, 0x9b, 0xe1, 0xfb, 0x86, 0x3f, 0xec, 0xe7, 0x34, 0x8d, 0x68, 0xc6, 0x92, 0x34, - 0xf7, 0xf2, 0x85, 0xa0, 0xd2, 0x13, 0x24, 0x23, 0x4c, 0xba, 0x22, 0xe3, 0x39, 0xc7, 0xbb, 0x8d, - 0xec, 0x2a, 0x79, 0xef, 0x4e, 0xcc, 0x63, 0xae, 0x44, 0xaf, 0x5c, 0x55, 0xdc, 0x9e, 0x1d, 0x73, - 0x1e, 0x4f, 0xa9, 0xa7, 0x76, 0x61, 0xf1, 0xc5, 0x8b, 0x8a, 0x8c, 0xe4, 0x09, 0x4f, 0x2b, 0xdd, - 0xf9, 0xba, 0x01, 0x83, 0x23, 0x9e, 0x4a, 0x9a, 0xca, 0x42, 0x7e, 0x50, 0x2f, 0xe0, 0x43, 0xb8, - 0x15, 0x4e, 0xf9, 0xe4, 0xdc, 0x42, 0x23, 0x74, 0x60, 0x3e, 0xd9, 0x77, 0xdb, 0x6f, 0xb9, 0xe3, - 0x52, 0xae, 0x68, 0xbf, 0x62, 0xf1, 0x0b, 0xe8, 0xd2, 0x59, 0x12, 0xd1, 0x74, 0x42, 0xad, 0x0d, - 0xe5, 0x1b, 0x5d, 0xf7, 0xbd, 0xa9, 0x89, 0xda, 0xfa, 0xdf, 0x81, 0x5f, 0x42, 0x6f, 0x46, 0xa6, - 0x49, 0x44, 0x72, 0x9e, 0x59, 0x86, 0xb2, 0x3f, 0xb8, 0x6e, 0xff, 0xac, 0x91, 0xda, 0xdf, 0x78, - 0xf0, 0x73, 0xd8, 0x9e, 0xd1, 0x4c, 0x26, 0x3c, 0xb5, 0x36, 0x95, 0x7d, 0x78, 0x83, 0xbd, 0x02, - 0x6a, 0xb3, 0xe6, 0x9d, 0x23, 0x30, 0xaf, 0xe4, 0xc1, 0xf7, 0xa1, 0xc7, 0xc8, 0x3c, 0x08, 0x17, - 0x39, 0x95, 0xaa, 0x01, 0xc3, 0xef, 0x32, 0x32, 0x1f, 0x97, 0x7b, 0x7c, 0x17, 0xb6, 0x4b, 0x31, - 0x26, 0x52, 0x85, 0x34, 0xfc, 0x2d, 0x46, 0xe6, 0xc7, 0x44, 0x3a, 0x3f, 0x11, 0xec, 0xac, 0xa7, - 0xc3, 0x8f, 0x00, 0x97, 0x2c, 0x89, 0x69, 0x90, 0x16, 0x2c, 0x50, 0x35, 0xe9, 0x1b, 0x07, 0x8c, - 0xcc, 0x5f, 0xc5, 0xf4, 0x7d, 0xc1, 0xd4, 0xd3, 0x12, 0x9f, 0xc0, 0xae, 0x86, 0xf5, 0x0f, 0xd5, - 0x35, 0xde, 0x73, 0xab, 0x2f, 0x74, 0xf5, 0x17, 0xba, 0xaf, 0x6b, 0x60, 0xdc, 0xbd, 0xf8, 0x3d, - 0xec, 0x7c, 0xff, 0x33, 0x44, 0xfe, 0x4e, 0x75, 0x9f, 0x56, 0xd6, 0x43, 0x18, 0xeb, 0x21, 0x9c, - 0xa7, 0x30, 0x68, 0x35, 0x89, 0x1d, 0xe8, 0x8b, 0x22, 0x0c, 0xce, 0xe9, 0x22, 0x50, 0x5d, 0x59, - 0x68, 0x64, 0x1c, 0xf4, 0x7c, 0x53, 0x14, 0xe1, 0x3b, 0xba, 0xf8, 0x58, 0x1e, 0x39, 0x8f, 0xa1, - 0xbf, 0xd6, 0x20, 0x1e, 0x82, 0x49, 0x84, 0x08, 0x74, 0xef, 0x65, 0xb2, 0x4d, 0x1f, 0x88, 0x10, - 0x35, 0xe6, 0x9c, 0xc2, 0xed, 0xb7, 0x44, 0x9e, 0xd1, 0xa8, 0x36, 0x3c, 0x84, 0x81, 0x6a, 0x21, - 0x68, 0x17, 0xdc, 0x57, 0xc7, 0x27, 0xba, 0x65, 0x07, 0xfa, 0x0d, 0xd7, 0x74, 0x6d, 0x6a, 0xea, - 0x98, 0xc8, 0xf1, 0xa7, 0x1f, 0x4b, 0x1b, 0x5d, 0x2c, 0x6d, 0x74, 0xb9, 0xb4, 0xd1, 0xdf, 0xa5, - 0x8d, 0xbe, 0xad, 0xec, 0xce, 0xe5, 0xca, 0xee, 0xfc, 0x5a, 0xd9, 0x9d, 0xd3, 0x67, 0x71, 0x92, - 0x9f, 0x15, 0xa1, 0x3b, 0xe1, 0xcc, 0xbb, 0x3a, 0x48, 0xcd, 0xb2, 0x9a, 0x94, 0xf6, 0x90, 0x85, - 0x5b, 0xea, 0xfc, 0xf0, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x18, 0x54, 0x4f, 0xe1, 0x7f, 0x03, - 0x00, 0x00, + // 680 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xcf, 0x6e, 0xd3, 0x4a, + 0x14, 0xc6, 0xe3, 0x26, 0x4d, 0x93, 0x93, 0xa6, 0xa9, 0xe6, 0xde, 0xab, 0xeb, 0xdb, 0xab, 0x3a, + 0xc5, 0x0b, 0x54, 0x09, 0xc9, 0x41, 0xad, 0x50, 0x85, 0xc4, 0x1f, 0x91, 0x06, 0x81, 0x84, 0x8a, + 0x90, 0x29, 0x2c, 0xba, 0xb1, 0xc6, 0xc9, 0xe0, 0x5a, 0x8d, 0x3d, 0x96, 0xc7, 0x8e, 0xe2, 0xb7, + 0x60, 0x85, 0x78, 0x04, 0x78, 0x93, 0x2e, 0xbb, 0x64, 0x05, 0x28, 0x7d, 0x03, 0xd6, 0x2c, 0xd0, + 0xfc, 0x6b, 0x9a, 0x94, 0xd2, 0xac, 0xe2, 0xcc, 0xf9, 0x7e, 0xfe, 0x3c, 0xdf, 0x39, 0x33, 0xb0, + 0x99, 0x91, 0x78, 0x40, 0xd2, 0x28, 0x8c, 0xb3, 0x4e, 0x56, 0x24, 0x84, 0x75, 0x12, 0x9c, 0xe2, + 0x88, 0x39, 0x49, 0x4a, 0x33, 0x8a, 0xd6, 0xa7, 0x65, 0x47, 0x94, 0x37, 0xfe, 0x0e, 0x68, 0x40, + 0x45, 0xb1, 0xc3, 0x9f, 0xa4, 0x6e, 0xc3, 0x0a, 0x28, 0x0d, 0x86, 0xa4, 0x23, 0xfe, 0xf9, 0xf9, + 0xbb, 0xce, 0x20, 0x4f, 0x71, 0x16, 0xd2, 0x58, 0xd6, 0xed, 0x9f, 0x4b, 0xd0, 0xda, 0xa7, 0x31, + 0x23, 0x31, 0xcb, 0xd9, 0x2b, 0xe1, 0x80, 0x76, 0x61, 0xd9, 0x1f, 0xd2, 0xfe, 0x89, 0x69, 0x6c, + 0x19, 0xdb, 0x8d, 0x9d, 0x4d, 0x67, 0xde, 0xcb, 0xe9, 0xf2, 0xb2, 0x54, 0xbb, 0x52, 0x8b, 0x1e, + 0x40, 0x8d, 0x8c, 0xc2, 0x01, 0x89, 0xfb, 0xc4, 0x5c, 0x12, 0xdc, 0xd6, 0x55, 0xee, 0xa9, 0x52, + 0x28, 0xf4, 0x82, 0x40, 0x8f, 0xa1, 0x3e, 0xc2, 0xc3, 0x70, 0x80, 0x33, 0x9a, 0x9a, 0x65, 0x81, + 0xdf, 0xba, 0x8a, 0xbf, 0xd5, 0x12, 0xc5, 0x4f, 0x19, 0x74, 0x1f, 0x56, 0x46, 0x24, 0x65, 0x21, + 0x8d, 0xcd, 0x8a, 0xc0, 0xdb, 0xbf, 0xc1, 0xa5, 0x40, 0xc1, 0x5a, 0xcf, 0xbd, 0x59, 0x11, 0xf7, + 0x8f, 0x53, 0x1a, 0x17, 0xe6, 0xf2, 0x75, 0xde, 0xaf, 0xb5, 0x44, 0x7b, 0x5f, 0x30, 0xdc, 0x3b, + 0x0b, 0x23, 0x42, 0xf3, 0xcc, 0xac, 0x5e, 0xe7, 0x7d, 0x28, 0x05, 0xda, 0x5b, 0xe9, 0xed, 0x7d, + 0x68, 0x5c, 0xca, 0x12, 0xfd, 0x0f, 0xf5, 0x08, 0x8f, 0x3d, 0xbf, 0xc8, 0x08, 0x13, 0xe9, 0x97, + 0xdd, 0x5a, 0x84, 0xc7, 0x5d, 0xfe, 0x1f, 0xfd, 0x0b, 0x2b, 0xbc, 0x18, 0x60, 0x26, 0x02, 0x2e, + 0xbb, 0xd5, 0x08, 0x8f, 0x9f, 0x61, 0x66, 0x7f, 0x36, 0x60, 0x6d, 0x36, 0x59, 0x74, 0x07, 0x10, + 0xd7, 0xe2, 0x80, 0x78, 0x71, 0x1e, 0x79, 0xa2, 0x45, 0xfa, 0x8d, 0xad, 0x08, 0x8f, 0x9f, 0x04, + 0xe4, 0x65, 0x1e, 0x09, 0x6b, 0x86, 0x0e, 0x60, 0x5d, 0x8b, 0xf5, 0x74, 0xa8, 0x16, 0xfe, 0xe7, + 0xc8, 0xf1, 0x71, 0xf4, 0xf8, 0x38, 0x3d, 0x25, 0xe8, 0xd6, 0x4e, 0xbf, 0xb6, 0x4b, 0x1f, 0xbf, + 0xb5, 0x0d, 0x77, 0x4d, 0xbe, 0x4f, 0x57, 0x66, 0x37, 0x51, 0x9e, 0xdd, 0x84, 0x7d, 0x0f, 0x5a, + 0x73, 0x5d, 0x44, 0x36, 0x34, 0x93, 0xdc, 0xf7, 0x4e, 0x48, 0xe1, 0x89, 0xac, 0x4c, 0x63, 0xab, + 0xbc, 0x5d, 0x77, 0x1b, 0x49, 0xee, 0xbf, 0x20, 0xc5, 0x21, 0x5f, 0xb2, 0xef, 0x42, 0x73, 0xa6, + 0x7b, 0xa8, 0x0d, 0x0d, 0x9c, 0x24, 0x9e, 0xee, 0x39, 0xdf, 0x59, 0xc5, 0x05, 0x9c, 0x24, 0x4a, + 0x66, 0x1f, 0xc1, 0xea, 0x73, 0xcc, 0x8e, 0xc9, 0x40, 0x01, 0xb7, 0xa1, 0x25, 0x52, 0xf0, 0xe6, + 0x03, 0x6e, 0x8a, 0xe5, 0x03, 0x9d, 0xb2, 0x0d, 0xcd, 0xa9, 0x6e, 0x9a, 0x75, 0x43, 0xab, 0x78, + 0xe0, 0x1f, 0x0c, 0x68, 0xcd, 0xcd, 0x03, 0xea, 0x41, 0x33, 0x22, 0x8c, 0x89, 0x10, 0xc9, 0x10, + 0x17, 0xea, 0xf0, 0xfc, 0x21, 0xc1, 0x8a, 0x48, 0x6f, 0x55, 0x51, 0x3d, 0x0e, 0xa1, 0x87, 0x50, + 0x4f, 0x52, 0xd2, 0x0f, 0xd9, 0x42, 0x3d, 0x90, 0x6f, 0x98, 0x12, 0xf6, 0x8f, 0x25, 0x68, 0xce, + 0x4c, 0x1a, 0x9f, 0xcd, 0x24, 0xa5, 0x09, 0x65, 0x64, 0xd1, 0x0f, 0xd2, 0x7a, 0xbe, 0x23, 0xf5, + 0xc8, 0x77, 0x94, 0xe1, 0x45, 0xbf, 0x67, 0x55, 0x51, 0x3d, 0x0e, 0xa1, 0x5d, 0xa8, 0x8c, 0x68, + 0x46, 0xd4, 0xa1, 0xbe, 0x11, 0x16, 0x62, 0xf4, 0x08, 0x80, 0xff, 0x2a, 0xdf, 0xca, 0x82, 0x39, + 0x70, 0x44, 0x9a, 0xee, 0x41, 0xb5, 0x4f, 0xa3, 0x28, 0xcc, 0xd4, 0x79, 0xbe, 0x91, 0x55, 0x72, + 0xb4, 0x03, 0xff, 0xf8, 0x45, 0x82, 0x19, 0xf3, 0xe4, 0x82, 0x77, 0xf9, 0x60, 0xd7, 0xdc, 0xbf, + 0x64, 0x71, 0x5f, 0xd4, 0x54, 0xd0, 0xdd, 0x37, 0x9f, 0x26, 0x96, 0x71, 0x3a, 0xb1, 0x8c, 0xb3, + 0x89, 0x65, 0x7c, 0x9f, 0x58, 0xc6, 0xfb, 0x73, 0xab, 0x74, 0x76, 0x6e, 0x95, 0xbe, 0x9c, 0x5b, + 0xa5, 0xa3, 0xbd, 0x20, 0xcc, 0x8e, 0x73, 0xdf, 0xe9, 0xd3, 0xa8, 0x73, 0xf9, 0x4a, 0x9f, 0x3e, + 0xca, 0x3b, 0x7b, 0xfe, 0xba, 0xf7, 0xab, 0x62, 0x7d, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, + 0xfc, 0x06, 0xae, 0x9f, 0x09, 0x06, 0x00, 0x00, } func (this *ConsensusParams) Equal(that interface{}) bool { @@ -451,6 +657,12 @@ func (this *ConsensusParams) Equal(that interface{}) bool { if !this.Version.Equal(that1.Version) { return false } + if !this.Synchrony.Equal(that1.Synchrony) { + return false + } + if !this.Timeout.Equal(that1.Timeout) { + return false + } return true } func (this *BlockParams) Equal(that interface{}) bool { @@ -590,6 +802,114 @@ func (this *HashedParams) Equal(that interface{}) bool { } return true } +func (this *SynchronyParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SynchronyParams) + if !ok { + that2, ok := that.(SynchronyParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.MessageDelay != nil && that1.MessageDelay != nil { + if *this.MessageDelay != *that1.MessageDelay { + return false + } + } else if this.MessageDelay != nil { + return false + } else if that1.MessageDelay != nil { + return false + } + if this.Precision != nil && that1.Precision != nil { + if *this.Precision != *that1.Precision { + return false + } + } else if this.Precision != nil { + return false + } else if that1.Precision != nil { + return false + } + return true +} +func (this *TimeoutParams) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*TimeoutParams) + if !ok { + that2, ok := that.(TimeoutParams) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Propose != nil && that1.Propose != nil { + if *this.Propose != *that1.Propose { + return false + } + } else if this.Propose != nil { + return false + } else if that1.Propose != nil { + return false + } + if this.ProposeDelta != nil && that1.ProposeDelta != nil { + if *this.ProposeDelta != *that1.ProposeDelta { + return false + } + } else if this.ProposeDelta != nil { + return false + } else if that1.ProposeDelta != nil { + return false + } + if this.Vote != nil && that1.Vote != nil { + if *this.Vote != *that1.Vote { + return false + } + } else if this.Vote != nil { + return false + } else if that1.Vote != nil { + return false + } + if this.VoteDelta != nil && that1.VoteDelta != nil { + if *this.VoteDelta != *that1.VoteDelta { + return false + } + } else if this.VoteDelta != nil { + return false + } else if that1.VoteDelta != nil { + return false + } + if this.Commit != nil && that1.Commit != nil { + if *this.Commit != *that1.Commit { + return false + } + } else if this.Commit != nil { + return false + } else if that1.Commit != nil { + return false + } + if this.BypassCommitTimeout != that1.BypassCommitTimeout { + return false + } + return true +} func (m *ConsensusParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -610,6 +930,30 @@ func (m *ConsensusParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Timeout != nil { + { + size, err := m.Timeout.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Synchrony != nil { + { + size, err := m.Synchrony.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } if m.Version != nil { { size, err := m.Version.MarshalToSizedBuffer(dAtA[:i]) @@ -719,12 +1063,12 @@ func (m *EvidenceParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x18 } - n5, err5 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration):]) - if err5 != nil { - return 0, err5 + n7, err7 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration):]) + if err7 != nil { + return 0, err7 } - i -= n5 - i = encodeVarintParams(dAtA, i, uint64(n5)) + i -= n7 + i = encodeVarintParams(dAtA, i, uint64(n7)) i-- dAtA[i] = 0x12 if m.MaxAgeNumBlocks != 0 { @@ -828,63 +1172,197 @@ func (m *HashedParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintParams(dAtA []byte, offset int, v uint64) int { - offset -= sovParams(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *SynchronyParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *ConsensusParams) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Block != nil { - l = m.Block.Size() - n += 1 + l + sovParams(uint64(l)) - } - if m.Evidence != nil { - l = m.Evidence.Size() - n += 1 + l + sovParams(uint64(l)) - } - if m.Validator != nil { - l = m.Validator.Size() - n += 1 + l + sovParams(uint64(l)) - } - if m.Version != nil { - l = m.Version.Size() - n += 1 + l + sovParams(uint64(l)) - } - return n + +func (m *SynchronyParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *BlockParams) Size() (n int) { - if m == nil { - return 0 - } +func (m *SynchronyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.MaxBytes != 0 { - n += 1 + sovParams(uint64(m.MaxBytes)) + if m.Precision != nil { + n8, err8 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Precision, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Precision):]) + if err8 != nil { + return 0, err8 + } + i -= n8 + i = encodeVarintParams(dAtA, i, uint64(n8)) + i-- + dAtA[i] = 0x12 } - if m.MaxGas != 0 { - n += 1 + sovParams(uint64(m.MaxGas)) + if m.MessageDelay != nil { + n9, err9 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.MessageDelay, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.MessageDelay):]) + if err9 != nil { + return 0, err9 + } + i -= n9 + i = encodeVarintParams(dAtA, i, uint64(n9)) + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *EvidenceParams) Size() (n int) { - if m == nil { - return 0 +func (m *TimeoutParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l + return dAtA[:n], nil +} + +func (m *TimeoutParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeoutParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BypassCommitTimeout { + i-- + if m.BypassCommitTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.Commit != nil { + n10, err10 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Commit, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Commit):]) + if err10 != nil { + return 0, err10 + } + i -= n10 + i = encodeVarintParams(dAtA, i, uint64(n10)) + i-- + dAtA[i] = 0x2a + } + if m.VoteDelta != nil { + n11, err11 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.VoteDelta, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.VoteDelta):]) + if err11 != nil { + return 0, err11 + } + i -= n11 + i = encodeVarintParams(dAtA, i, uint64(n11)) + i-- + dAtA[i] = 0x22 + } + if m.Vote != nil { + n12, err12 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Vote, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Vote):]) + if err12 != nil { + return 0, err12 + } + i -= n12 + i = encodeVarintParams(dAtA, i, uint64(n12)) + i-- + dAtA[i] = 0x1a + } + if m.ProposeDelta != nil { + n13, err13 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.ProposeDelta, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.ProposeDelta):]) + if err13 != nil { + return 0, err13 + } + i -= n13 + i = encodeVarintParams(dAtA, i, uint64(n13)) + i-- + dAtA[i] = 0x12 + } + if m.Propose != nil { + n14, err14 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Propose, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Propose):]) + if err14 != nil { + return 0, err14 + } + i -= n14 + i = encodeVarintParams(dAtA, i, uint64(n14)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ConsensusParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Block != nil { + l = m.Block.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.Evidence != nil { + l = m.Evidence.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.Validator != nil { + l = m.Validator.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.Version != nil { + l = m.Version.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.Synchrony != nil { + l = m.Synchrony.Size() + n += 1 + l + sovParams(uint64(l)) + } + if m.Timeout != nil { + l = m.Timeout.Size() + n += 1 + l + sovParams(uint64(l)) + } + return n +} + +func (m *BlockParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxBytes != 0 { + n += 1 + sovParams(uint64(m.MaxBytes)) + } + if m.MaxGas != 0 { + n += 1 + sovParams(uint64(m.MaxGas)) + } + return n +} + +func (m *EvidenceParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l if m.MaxAgeNumBlocks != 0 { n += 1 + sovParams(uint64(m.MaxAgeNumBlocks)) } @@ -938,6 +1416,55 @@ func (m *HashedParams) Size() (n int) { return n } +func (m *SynchronyParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MessageDelay != nil { + l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.MessageDelay) + n += 1 + l + sovParams(uint64(l)) + } + if m.Precision != nil { + l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Precision) + n += 1 + l + sovParams(uint64(l)) + } + return n +} + +func (m *TimeoutParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Propose != nil { + l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Propose) + n += 1 + l + sovParams(uint64(l)) + } + if m.ProposeDelta != nil { + l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.ProposeDelta) + n += 1 + l + sovParams(uint64(l)) + } + if m.Vote != nil { + l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Vote) + n += 1 + l + sovParams(uint64(l)) + } + if m.VoteDelta != nil { + l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.VoteDelta) + n += 1 + l + sovParams(uint64(l)) + } + if m.Commit != nil { + l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Commit) + n += 1 + l + sovParams(uint64(l)) + } + if m.BypassCommitTimeout { + n += 2 + } + return n +} + func sovParams(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1117,6 +1644,78 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Synchrony", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Synchrony == nil { + m.Synchrony = &SynchronyParams{} + } + if err := m.Synchrony.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timeout == nil { + m.Timeout = &TimeoutParams{} + } + if err := m.Timeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipParams(dAtA[iNdEx:]) @@ -1586,6 +2185,378 @@ func (m *HashedParams) Unmarshal(dAtA []byte) error { } return nil } +func (m *SynchronyParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SynchronyParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SynchronyParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MessageDelay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MessageDelay == nil { + m.MessageDelay = new(time.Duration) + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.MessageDelay, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Precision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Precision == nil { + m.Precision = new(time.Duration) + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.Precision, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimeoutParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimeoutParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimeoutParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Propose", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Propose == nil { + m.Propose = new(time.Duration) + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.Propose, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposeDelta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProposeDelta == nil { + m.ProposeDelta = new(time.Duration) + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.ProposeDelta, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Vote == nil { + m.Vote = new(time.Duration) + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.Vote, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VoteDelta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VoteDelta == nil { + m.VoteDelta = new(time.Duration) + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.VoteDelta, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Commit == nil { + m.Commit = new(time.Duration) + } + if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.Commit, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BypassCommitTimeout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BypassCommitTimeout = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipParams(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/proto/tendermint/types/params.proto b/proto/tendermint/types/params.proto index cc926b64e0..466ba464fe 100644 --- a/proto/tendermint/types/params.proto +++ b/proto/tendermint/types/params.proto @@ -15,6 +15,8 @@ message ConsensusParams { EvidenceParams evidence = 2; ValidatorParams validator = 3; VersionParams version = 4; + SynchronyParams synchrony = 5; + TimeoutParams timeout = 6; } // BlockParams contains limits on the block size. @@ -43,8 +45,8 @@ message EvidenceParams { google.protobuf.Duration max_age_duration = 2 [(gogoproto.nullable) = false, (gogoproto.stdduration) = true]; - // This sets the maximum size of total evidence in bytes that can be committed in a single block. - // and should fall comfortably under the max block bytes. + // This sets the maximum size of total evidence in bytes that can be committed + // in a single block. and should fall comfortably under the max block bytes. // Default is 1048576 or 1MB int64 max_bytes = 3; } @@ -67,3 +69,61 @@ message HashedParams { int64 block_max_bytes = 1; int64 block_max_gas = 2; } + +// SynchronyParams configure the bounds under which a proposed block's timestamp is considered valid. +// These parameters are part of the proposer-based timestamps algorithm. For more information, +// see the specification of proposer-based timestamps: +// https://github.com/tendermint/tendermint/tree/master/spec/consensus/proposer-based-timestamp +message SynchronyParams { + // message_delay bounds how long a proposal message may take to reach all validators on a network + // and still be considered valid. + google.protobuf.Duration message_delay = 1 [(gogoproto.stdduration) = true]; + // precision bounds how skewed a proposer's clock may be from any validator + // on the network while still producing valid proposals. + google.protobuf.Duration precision = 2 [(gogoproto.stdduration) = true]; +} + +// TimeoutParams configure the timeouts for the steps of the Tendermint consensus algorithm. +message TimeoutParams { + // These fields configure the timeouts for the propose step of the Tendermint + // consensus algorithm: propose is the initial timeout and propose_delta + // determines how much the timeout grows in subsequent rounds. + // For the first round, this propose timeout is used and for every subsequent + // round, the timeout grows by propose_delta. + // + // For example: + // With propose = 10ms, propose_delta = 5ms, the first round's propose phase + // timeout would be 10ms, the second round's would be 15ms, the third 20ms and so on. + // + // If a node waiting for a proposal message does not receive one matching its + // current height and round before this timeout, the node will issue a + // nil prevote for the round and advance to the next step. + google.protobuf.Duration propose = 1 [(gogoproto.stdduration) = true]; + google.protobuf.Duration propose_delta = 2 [(gogoproto.stdduration) = true]; + + // vote along with vote_delta configure the timeout for both of the prevote and + // precommit steps of the Tendermint consensus algorithm. + // + // These parameters influence the vote step timeouts in the the same way that + // the propose and propose_delta parameters do to the proposal step. + // + // The vote timeout does not begin until a quorum of votes has been received. Once + // a quorum of votes has been seen and this timeout elapses, Tendermint will + // procced to the next step of the consensus algorithm. If Tendermint receives + // all of the remaining votes before the end of the timeout, it will proceed + // to the next step immediately. + google.protobuf.Duration vote = 3 [(gogoproto.stdduration) = true]; + google.protobuf.Duration vote_delta = 4 [(gogoproto.stdduration) = true]; + + // commit configures how long Tendermint will wait after receiving a quorum of + // precommits before beginning consensus for the next height. This can be + // used to allow slow precommits to arrive for inclusion in the next height before progressing. + google.protobuf.Duration commit = 5 [(gogoproto.stdduration) = true]; + + // bypass_commit_timeout configures the node to proceed immediately to + // the next height once the node has received all precommits for a block, forgoing + // the remaining commit timeout. + // Setting bypass_commit_timeout false (the default) causes Tendermint to wait + // for the full commit timeout. + bool bypass_commit_timeout = 6; +} diff --git a/proto/tendermint/types/types.pb.go b/proto/tendermint/types/types.pb.go index 01862bc857..f3065c2661 100644 --- a/proto/tendermint/types/types.pb.go +++ b/proto/tendermint/types/types.pb.go @@ -30,7 +30,7 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// BlockIdFlag indicates which BlcokID the signature is for +// BlockIdFlag indicates which BlockID the signature is for type BlockIDFlag int32 const ( @@ -608,6 +608,12 @@ type Vote struct { ValidatorIndex int32 `protobuf:"varint,7,opt,name=validator_index,json=validatorIndex,proto3" json:"validator_index,omitempty"` BlockSignature []byte `protobuf:"bytes,8,opt,name=block_signature,json=blockSignature,proto3" json:"block_signature,omitempty"` StateSignature []byte `protobuf:"bytes,10,opt,name=state_signature,json=stateSignature,proto3" json:"state_signature,omitempty"` + // Vote extension provided by the application. Only valid for precommit + // messages. + Extension []byte `protobuf:"bytes,11,opt,name=extension,proto3" json:"extension,omitempty"` + // Vote extension signature by the validator if they participated in + // consensus for the associated block. Only valid for precommit messages. + ExtensionSignature []byte `protobuf:"bytes,12,opt,name=extension_signature,json=extensionSignature,proto3" json:"extension_signature,omitempty"` } func (m *Vote) Reset() { *m = Vote{} } @@ -699,7 +705,22 @@ func (m *Vote) GetStateSignature() []byte { return nil } -// Commit contains the evidence that a block was committed by a set of validators. +func (m *Vote) GetExtension() []byte { + if m != nil { + return m.Extension + } + return nil +} + +func (m *Vote) GetExtensionSignature() []byte { + if m != nil { + return m.ExtensionSignature + } + return nil +} + +// Commit contains the evidence that a block was committed by a set of +// validators. type Commit struct { Height int64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` Round int32 `protobuf:"varint,2,opt,name=round,proto3" json:"round,omitempty"` @@ -1072,7 +1093,8 @@ func (m *BlockMeta) GetHasCoreChainLock() bool { return false } -// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. +// TxProof represents a Merkle proof of the presence of a transaction in the +// Merkle tree. type TxProof struct { RootHash []byte `protobuf:"bytes,1,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` @@ -1155,101 +1177,103 @@ func init() { func init() { proto.RegisterFile("tendermint/types/types.proto", fileDescriptor_d3a6e55e2345de56) } var fileDescriptor_d3a6e55e2345de56 = []byte{ - // 1491 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xce, 0xc6, 0x4e, 0x6c, 0x3f, 0xdb, 0xb1, 0xb3, 0xa4, 0xad, 0xe3, 0xb6, 0x8e, 0xb5, 0x88, - 0x36, 0x44, 0xe0, 0x94, 0x16, 0x51, 0xa8, 0xc4, 0x21, 0x76, 0xdc, 0xd6, 0x6a, 0x7e, 0x98, 0xb5, - 0x1b, 0x04, 0x97, 0xd5, 0xc6, 0x3b, 0xb1, 0x97, 0xda, 0x3b, 0xcb, 0xee, 0x38, 0x24, 0xbd, 0x72, - 0x00, 0xe5, 0xd4, 0x13, 0xb7, 0x48, 0x48, 0x70, 0xe0, 0x4f, 0xe9, 0xb1, 0xb7, 0x72, 0xa1, 0xa0, - 0xf4, 0xc2, 0x81, 0x0b, 0xff, 0x01, 0x9a, 0x37, 0xb3, 0xde, 0xb5, 0x93, 0x40, 0x15, 0x71, 0xb1, - 0x76, 0xde, 0xfb, 0xde, 0xcc, 0x9b, 0xef, 0x7d, 0xf3, 0x66, 0x0c, 0xd7, 0x18, 0x71, 0x2c, 0xe2, - 0x0d, 0x6c, 0x87, 0xad, 0xb2, 0x43, 0x97, 0xf8, 0xe2, 0xb7, 0xe2, 0x7a, 0x94, 0x51, 0x35, 0x1f, - 0x7a, 0x2b, 0x68, 0x2f, 0x2e, 0x74, 0x69, 0x97, 0xa2, 0x73, 0x95, 0x7f, 0x09, 0x5c, 0x71, 0xa9, - 0x4b, 0x69, 0xb7, 0x4f, 0x56, 0x71, 0xb4, 0x3b, 0xdc, 0x5b, 0x65, 0xf6, 0x80, 0xf8, 0xcc, 0x1c, - 0xb8, 0x12, 0x70, 0x3d, 0xb2, 0x4c, 0xc7, 0x3b, 0x74, 0x19, 0xe5, 0x58, 0xba, 0x27, 0xdd, 0xa5, - 0x88, 0x7b, 0x9f, 0x78, 0xbe, 0x4d, 0x9d, 0x68, 0x1e, 0xc5, 0xf2, 0xa9, 0x2c, 0xf7, 0xcd, 0xbe, - 0x6d, 0x99, 0x8c, 0x7a, 0x02, 0xa1, 0x7d, 0x02, 0xd9, 0xa6, 0xe9, 0xb1, 0x16, 0x61, 0x0f, 0x89, - 0x69, 0x11, 0x4f, 0x5d, 0x80, 0x19, 0x46, 0x99, 0xd9, 0x2f, 0x28, 0x65, 0x65, 0x39, 0xab, 0x8b, - 0x81, 0xaa, 0x42, 0xbc, 0x67, 0xfa, 0xbd, 0xc2, 0x74, 0x59, 0x59, 0xce, 0xe8, 0xf8, 0xad, 0xf5, - 0x20, 0xce, 0x43, 0x79, 0x84, 0xed, 0x58, 0xe4, 0x20, 0x88, 0xc0, 0x01, 0xb7, 0xee, 0x1e, 0x32, - 0xe2, 0xcb, 0x10, 0x31, 0x50, 0x3f, 0x84, 0x19, 0xcc, 0xbf, 0x10, 0x2b, 0x2b, 0xcb, 0xe9, 0xdb, - 0x85, 0x4a, 0x84, 0x28, 0xb1, 0xbf, 0x4a, 0x93, 0xfb, 0xab, 0xf1, 0xe7, 0xaf, 0x96, 0xa6, 0x74, - 0x01, 0xd6, 0xfa, 0x90, 0xa8, 0xf6, 0x69, 0xe7, 0x49, 0x63, 0x7d, 0x94, 0x88, 0x12, 0x26, 0xa2, - 0x6e, 0x42, 0xce, 0x35, 0x3d, 0x66, 0xf8, 0x84, 0x19, 0x3d, 0xdc, 0x05, 0x2e, 0x9a, 0xbe, 0xbd, - 0x54, 0x99, 0xac, 0x43, 0x65, 0x6c, 0xb3, 0x72, 0x95, 0xac, 0x1b, 0x35, 0x6a, 0x75, 0x48, 0xb4, - 0x98, 0xc9, 0x48, 0x63, 0x5d, 0xd5, 0x20, 0xdb, 0x37, 0x7d, 0x66, 0x98, 0xae, 0x6b, 0x44, 0x96, - 0x4d, 0x73, 0xe3, 0x9a, 0xeb, 0x3e, 0xe4, 0xab, 0x5f, 0x86, 0xd9, 0x1e, 0xb1, 0xbb, 0x3d, 0x86, - 0x8b, 0xc6, 0x74, 0x39, 0xd2, 0x5e, 0xce, 0xc0, 0xac, 0xe4, 0xf4, 0x53, 0x48, 0xc8, 0xea, 0xe0, - 0x04, 0xe9, 0xdb, 0xd7, 0xa3, 0x89, 0x49, 0x57, 0xa5, 0x46, 0x1d, 0x9f, 0x38, 0xfe, 0xd0, 0x97, - 0x69, 0x05, 0x31, 0xea, 0x0d, 0x48, 0x76, 0x7a, 0xa6, 0xed, 0x18, 0xb6, 0x85, 0x6b, 0xa4, 0xaa, - 0xe9, 0x93, 0x57, 0x4b, 0x89, 0x1a, 0xb7, 0x35, 0xd6, 0xf5, 0x04, 0x3a, 0x1b, 0x56, 0x24, 0x93, - 0x58, 0x34, 0x13, 0xf5, 0x2e, 0x14, 0x3a, 0xd4, 0x23, 0x86, 0x98, 0x84, 0x13, 0x49, 0x2c, 0x43, - 0x22, 0x2d, 0xac, 0xd9, 0x25, 0xee, 0xc7, 0xf9, 0x36, 0xd0, 0xfb, 0x50, 0x04, 0x7e, 0x0c, 0x71, - 0x2e, 0xc8, 0x42, 0x1c, 0x93, 0x2e, 0x56, 0x84, 0x5a, 0x2b, 0x81, 0x5a, 0x2b, 0xed, 0x40, 0xad, - 0xd5, 0x24, 0xcf, 0xf8, 0xd9, 0xef, 0x4b, 0x8a, 0x8e, 0x11, 0x6a, 0x4d, 0x12, 0xb7, 0xcb, 0x57, - 0xe3, 0x79, 0xcf, 0xe0, 0x14, 0x8b, 0xa7, 0x0b, 0x22, 0x0b, 0x2b, 0xf7, 0x8c, 0xcc, 0x0a, 0x93, - 0xa5, 0x2e, 0x43, 0x1e, 0x27, 0xe9, 0xd0, 0xc1, 0xc0, 0x66, 0xa2, 0x00, 0xb3, 0x58, 0x80, 0x39, - 0x6e, 0xaf, 0xa1, 0x19, 0x6b, 0x70, 0x15, 0x52, 0x96, 0xc9, 0x4c, 0x01, 0x49, 0x20, 0x24, 0xc9, - 0x0d, 0xe8, 0xbc, 0x09, 0xb9, 0x91, 0xea, 0x7d, 0x01, 0x49, 0x8a, 0x59, 0x42, 0x33, 0x02, 0x6f, - 0xc1, 0x82, 0x43, 0x0e, 0x98, 0x31, 0x89, 0x4e, 0x21, 0x5a, 0xe5, 0xbe, 0x9d, 0xf1, 0x88, 0x77, - 0x60, 0xae, 0x13, 0x54, 0x4d, 0x60, 0x01, 0xb1, 0xd9, 0x91, 0x15, 0x61, 0x8b, 0x90, 0x1c, 0x29, - 0x28, 0x8d, 0x80, 0x84, 0x29, 0xd5, 0xb3, 0x02, 0xf3, 0xb8, 0x47, 0x8f, 0xf8, 0xc3, 0x3e, 0x93, - 0x93, 0x64, 0x10, 0x93, 0xe3, 0x0e, 0x5d, 0xd8, 0x11, 0xfb, 0x36, 0x64, 0xc9, 0xbe, 0x6d, 0x11, - 0xa7, 0x43, 0x04, 0x2e, 0x8b, 0xb8, 0x4c, 0x60, 0x44, 0xd0, 0x2a, 0x2c, 0xb8, 0x1e, 0x75, 0xa9, - 0x4f, 0x3c, 0xc3, 0xf5, 0xa8, 0xc1, 0x0e, 0x04, 0x96, 0x20, 0x76, 0x3e, 0xf0, 0x35, 0x3d, 0xda, - 0x3e, 0x08, 0x76, 0x2d, 0x8d, 0x16, 0xea, 0x3c, 0x50, 0xea, 0x5e, 0x59, 0x59, 0x8e, 0xeb, 0x6a, - 0xe0, 0x5b, 0x73, 0xdd, 0x1d, 0xe1, 0xd1, 0xbe, 0x53, 0x20, 0x5b, 0x8b, 0x0a, 0x86, 0xef, 0x02, - 0x15, 0x26, 0xca, 0x2d, 0xa5, 0x25, 0xda, 0x41, 0x8e, 0x3b, 0xb0, 0xa2, 0x52, 0x54, 0x37, 0x20, - 0x17, 0xc5, 0x86, 0x5d, 0x25, 0x1b, 0x22, 0x79, 0x5e, 0xd7, 0x20, 0xe5, 0xdb, 0x5d, 0xc7, 0x64, - 0x43, 0x8f, 0xa0, 0xa0, 0x33, 0x7a, 0x68, 0xb8, 0x17, 0xff, 0xf3, 0xc7, 0x25, 0x45, 0x2b, 0x40, - 0x7c, 0xdd, 0x64, 0xa6, 0x9a, 0x87, 0x18, 0x3b, 0xf0, 0x0b, 0x4a, 0x39, 0xb6, 0x9c, 0xd1, 0xf9, - 0xa7, 0xf6, 0x6a, 0x1a, 0xe2, 0x3b, 0x94, 0x11, 0xf5, 0x0e, 0xc4, 0xb9, 0xd0, 0x30, 0x9b, 0xb9, - 0xb3, 0x3a, 0x42, 0xcb, 0xee, 0x3a, 0xc4, 0xda, 0xf4, 0xbb, 0xed, 0x43, 0x97, 0xe8, 0x08, 0x3e, - 0xef, 0x4c, 0xf3, 0xa6, 0xe6, 0xd1, 0xa1, 0x63, 0x61, 0x3e, 0x33, 0xba, 0x18, 0xa8, 0x75, 0x48, - 0x8e, 0x74, 0x1e, 0xff, 0x2f, 0x9d, 0xe7, 0xb8, 0xce, 0xf9, 0xf1, 0x95, 0x06, 0x3d, 0xb1, 0x2b, - 0xe5, 0xfe, 0x01, 0x5c, 0x1a, 0x29, 0x6f, 0xac, 0x74, 0x42, 0xf3, 0xea, 0xc8, 0x19, 0xd6, 0x2e, - 0x2a, 0x6d, 0x43, 0x34, 0xe1, 0x04, 0x66, 0x16, 0x4a, 0xbb, 0x81, 0xdd, 0xf8, 0x26, 0xe4, 0x44, - 0x8a, 0x21, 0xa5, 0xf2, 0x0c, 0xa0, 0xb9, 0x15, 0x58, 0x39, 0xd0, 0xe7, 0xcd, 0x2f, 0x02, 0x14, - 0x92, 0x9e, 0x43, 0xf3, 0x08, 0xa8, 0xfd, 0x36, 0x0d, 0xb3, 0xe2, 0x04, 0x46, 0xd8, 0x52, 0xce, - 0x66, 0x6b, 0xfa, 0x3c, 0xb6, 0x62, 0x17, 0x67, 0xab, 0x0e, 0x49, 0x91, 0xa8, 0x6d, 0x61, 0x13, - 0x3b, 0x73, 0x1a, 0xd9, 0xc7, 0xc3, 0x69, 0xa4, 0x41, 0x4f, 0x60, 0x6c, 0xc3, 0x52, 0x97, 0x20, - 0xfd, 0xf5, 0x90, 0x7a, 0xc3, 0x41, 0xf4, 0x94, 0x80, 0x30, 0x21, 0xc5, 0xf7, 0x60, 0x91, 0xf5, - 0x3c, 0xe2, 0xf7, 0x68, 0xdf, 0x32, 0x26, 0x39, 0xdc, 0x43, 0xf8, 0x95, 0x11, 0xa0, 0x3a, 0x4e, - 0xe6, 0x58, 0xec, 0x24, 0xad, 0xdd, 0x89, 0xd8, 0xd6, 0x38, 0xbf, 0x7f, 0x4d, 0x43, 0xb2, 0x89, - 0x67, 0xcf, 0xec, 0xff, 0xbf, 0x22, 0xbe, 0xf0, 0x75, 0x70, 0xb6, 0xfa, 0xaf, 0x42, 0xca, 0xa5, - 0x7d, 0x43, 0x78, 0xe2, 0xe8, 0x49, 0xba, 0xb4, 0xaf, 0x9f, 0x2a, 0xf6, 0xcc, 0xc5, 0x8b, 0x5d, - 0x85, 0xd4, 0xe8, 0x65, 0x84, 0xc7, 0xe1, 0x4d, 0x6f, 0xa3, 0x30, 0x6c, 0xbc, 0x9f, 0x24, 0x26, - 0xfa, 0x89, 0xe6, 0x41, 0x46, 0x70, 0x28, 0xaf, 0xec, 0x5b, 0x9c, 0x3c, 0x7c, 0x4a, 0x28, 0xa7, - 0x5f, 0x2a, 0x22, 0x6d, 0x81, 0xd4, 0x25, 0x8e, 0x47, 0x88, 0x8b, 0x4a, 0x3e, 0x3e, 0xce, 0x88, - 0x10, 0xe7, 0x45, 0x97, 0x38, 0xed, 0x07, 0x05, 0x60, 0x83, 0x33, 0x8b, 0xfb, 0xe5, 0x77, 0xa6, - 0x8f, 0x29, 0x18, 0x63, 0x2b, 0x97, 0xce, 0xab, 0xb6, 0x5c, 0x3f, 0xe3, 0x47, 0xf3, 0xae, 0x41, - 0x36, 0xec, 0x08, 0x3e, 0x09, 0x92, 0x39, 0x63, 0x92, 0xd1, 0x55, 0xd6, 0x22, 0x4c, 0xcf, 0xec, - 0x47, 0x46, 0xda, 0xdf, 0x0a, 0xa4, 0x30, 0xa7, 0x4d, 0xc2, 0xcc, 0xb1, 0x1a, 0x2a, 0x17, 0xaf, - 0xe1, 0x75, 0x80, 0xe0, 0xf8, 0x3c, 0x25, 0x52, 0x92, 0x29, 0xd9, 0x7d, 0x9e, 0x12, 0xf5, 0xa3, - 0x11, 0xe1, 0xb1, 0x7f, 0x27, 0x5c, 0xbe, 0x14, 0x02, 0xda, 0xaf, 0x40, 0xc2, 0x19, 0x0e, 0x0c, - 0xde, 0xfe, 0xe3, 0x42, 0xe6, 0xce, 0x70, 0xd0, 0x3e, 0xf0, 0xd5, 0xf7, 0xe1, 0xad, 0x9e, 0xe9, - 0x1b, 0x13, 0x52, 0x47, 0x85, 0x27, 0xf5, 0x7c, 0xcf, 0xf4, 0xc7, 0xae, 0x30, 0xed, 0x2b, 0x48, - 0xb4, 0x0f, 0xf0, 0xed, 0xc9, 0x15, 0xed, 0x51, 0xca, 0xa2, 0x2f, 0xbe, 0x24, 0x37, 0x60, 0x3f, - 0x50, 0x21, 0xce, 0x5f, 0x16, 0xc1, 0x4b, 0x98, 0x7f, 0xab, 0x95, 0x37, 0x7c, 0xd5, 0xca, 0xf7, - 0xec, 0xca, 0x4b, 0x05, 0xd2, 0x92, 0x9f, 0xfb, 0x7d, 0xb3, 0xcb, 0x3b, 0x7f, 0x75, 0x63, 0xbb, - 0xf6, 0xc8, 0x68, 0xac, 0x1b, 0xf7, 0x37, 0xd6, 0x1e, 0x18, 0x8f, 0xb7, 0x1e, 0x6d, 0x6d, 0x7f, - 0xbe, 0x95, 0x9f, 0x2a, 0x5e, 0x3e, 0x3a, 0x2e, 0xab, 0x11, 0xec, 0x63, 0xe7, 0x89, 0x43, 0xbf, - 0x71, 0xf8, 0x35, 0x3f, 0x1e, 0xb2, 0x56, 0x6d, 0xd5, 0xb7, 0xda, 0x79, 0xa5, 0x78, 0xe9, 0xe8, - 0xb8, 0x3c, 0x1f, 0x89, 0x58, 0xdb, 0xf5, 0x89, 0xc3, 0x4e, 0x07, 0xd4, 0xb6, 0x37, 0x37, 0x1b, - 0xed, 0xfc, 0xf4, 0xa9, 0x00, 0xd9, 0xd5, 0xdf, 0x85, 0xf9, 0xf1, 0x80, 0xad, 0xc6, 0x46, 0x3e, - 0x56, 0x54, 0x8f, 0x8e, 0xcb, 0x73, 0x11, 0xf4, 0x96, 0xdd, 0x2f, 0x26, 0xbf, 0xff, 0xa9, 0x34, - 0xf5, 0xcb, 0xcf, 0x25, 0x65, 0xe5, 0xdb, 0x69, 0xc8, 0x8e, 0xf5, 0x22, 0xf5, 0x3d, 0xb8, 0xd2, - 0x6a, 0x3c, 0xd8, 0xaa, 0xaf, 0x1b, 0x9b, 0xad, 0x07, 0x46, 0xfb, 0x8b, 0x66, 0x3d, 0xb2, 0xbb, - 0xdc, 0xd1, 0x71, 0x39, 0x2d, 0xb7, 0x74, 0x1e, 0xba, 0xa9, 0xd7, 0x77, 0xb6, 0xdb, 0xf5, 0xbc, - 0x22, 0xd0, 0x4d, 0x8f, 0xec, 0x53, 0x46, 0x10, 0x7d, 0x0b, 0x16, 0xcf, 0x40, 0x8f, 0x36, 0x36, - 0x7f, 0x74, 0x5c, 0xce, 0x36, 0x3d, 0x22, 0x8e, 0x1b, 0x46, 0xac, 0xc0, 0xe5, 0xc9, 0x08, 0x09, - 0x8f, 0x15, 0xe7, 0x8e, 0x8e, 0xcb, 0x50, 0x0b, 0xb1, 0x15, 0x28, 0x9c, 0x9e, 0x7d, 0xbb, 0xb9, - 0xdd, 0x5a, 0xdb, 0xc8, 0x97, 0x8b, 0xf9, 0xa3, 0xe3, 0x72, 0x26, 0x68, 0xd0, 0x1c, 0x1f, 0xb2, - 0x50, 0xfd, 0xec, 0xf9, 0x49, 0x49, 0x79, 0x71, 0x52, 0x52, 0xfe, 0x38, 0x29, 0x29, 0xcf, 0x5e, - 0x97, 0xa6, 0x5e, 0xbc, 0x2e, 0x4d, 0xfd, 0xfa, 0xba, 0x34, 0xf5, 0xe5, 0xdd, 0xae, 0xcd, 0x7a, - 0xc3, 0xdd, 0x4a, 0x87, 0x0e, 0x56, 0xa3, 0xff, 0xcd, 0xc2, 0x4f, 0xf1, 0x1f, 0x71, 0xf2, 0x7f, - 0xdb, 0xee, 0x2c, 0xda, 0xef, 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0x3e, 0x1d, 0xfb, 0x32, 0x78, - 0x0e, 0x00, 0x00, + // 1524 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xcd, 0x6e, 0x1b, 0x47, + 0x12, 0xd6, 0x88, 0x94, 0x48, 0x16, 0x49, 0x91, 0x1a, 0xcb, 0x36, 0x45, 0xdb, 0x14, 0xc1, 0xc5, + 0xda, 0x5a, 0x61, 0x97, 0xf2, 0xda, 0x8b, 0xf5, 0xae, 0x81, 0x3d, 0x88, 0x14, 0x6d, 0x13, 0xd6, + 0x0f, 0x77, 0x48, 0x2b, 0x48, 0x2e, 0x83, 0x11, 0xa7, 0x45, 0x4e, 0x4c, 0x4e, 0x4f, 0x66, 0x9a, + 0x0a, 0xe5, 0x6b, 0x0e, 0x09, 0x74, 0xf2, 0x29, 0x37, 0x01, 0x01, 0x92, 0x43, 0x2e, 0x79, 0x0f, + 0x1f, 0x7d, 0x73, 0x2e, 0x71, 0x02, 0xf9, 0x92, 0x43, 0x2e, 0x79, 0x83, 0xa0, 0xab, 0x7b, 0x7e, + 0x48, 0x49, 0x89, 0x21, 0xe4, 0x42, 0x4c, 0x57, 0x7d, 0x55, 0x5d, 0x5d, 0xf5, 0x75, 0x75, 0x11, + 0x6e, 0x32, 0x62, 0x9b, 0xc4, 0x1d, 0x5a, 0x36, 0x5b, 0x67, 0x47, 0x0e, 0xf1, 0xc4, 0x6f, 0xd5, + 0x71, 0x29, 0xa3, 0x6a, 0x3e, 0xd4, 0x56, 0x51, 0x5e, 0x5c, 0xea, 0xd1, 0x1e, 0x45, 0xe5, 0x3a, + 0xff, 0x12, 0xb8, 0xe2, 0x4a, 0x8f, 0xd2, 0xde, 0x80, 0xac, 0xe3, 0x6a, 0x7f, 0x74, 0xb0, 0xce, + 0xac, 0x21, 0xf1, 0x98, 0x31, 0x74, 0x24, 0xe0, 0x56, 0x64, 0x9b, 0xae, 0x7b, 0xe4, 0x30, 0xca, + 0xb1, 0xf4, 0x40, 0xaa, 0x4b, 0x11, 0xf5, 0x21, 0x71, 0x3d, 0x8b, 0xda, 0xd1, 0x38, 0x8a, 0xe5, + 0x33, 0x51, 0x1e, 0x1a, 0x03, 0xcb, 0x34, 0x18, 0x75, 0x05, 0xa2, 0xf2, 0x5f, 0xc8, 0xb6, 0x0c, + 0x97, 0xb5, 0x09, 0x7b, 0x42, 0x0c, 0x93, 0xb8, 0xea, 0x12, 0xcc, 0x31, 0xca, 0x8c, 0x41, 0x41, + 0x29, 0x2b, 0xab, 0x59, 0x4d, 0x2c, 0x54, 0x15, 0xe2, 0x7d, 0xc3, 0xeb, 0x17, 0x66, 0xcb, 0xca, + 0x6a, 0x46, 0xc3, 0xef, 0x4a, 0x1f, 0xe2, 0xdc, 0x94, 0x5b, 0x58, 0xb6, 0x49, 0xc6, 0xbe, 0x05, + 0x2e, 0xb8, 0x74, 0xff, 0x88, 0x11, 0x4f, 0x9a, 0x88, 0x85, 0xfa, 0x2f, 0x98, 0xc3, 0xf8, 0x0b, + 0xb1, 0xb2, 0xb2, 0x9a, 0xbe, 0x57, 0xa8, 0x46, 0x12, 0x25, 0xce, 0x57, 0x6d, 0x71, 0x7d, 0x2d, + 0xfe, 0xea, 0xed, 0xca, 0x8c, 0x26, 0xc0, 0x95, 0x01, 0x24, 0x6a, 0x03, 0xda, 0x7d, 0xde, 0xdc, + 0x0c, 0x02, 0x51, 0xc2, 0x40, 0xd4, 0x6d, 0xc8, 0x39, 0x86, 0xcb, 0x74, 0x8f, 0x30, 0xbd, 0x8f, + 0xa7, 0xc0, 0x4d, 0xd3, 0xf7, 0x56, 0xaa, 0xd3, 0x75, 0xa8, 0x4e, 0x1c, 0x56, 0xee, 0x92, 0x75, + 0xa2, 0xc2, 0x4a, 0x03, 0x12, 0x6d, 0x66, 0x30, 0xd2, 0xdc, 0x54, 0x2b, 0x90, 0x1d, 0x18, 0x1e, + 0xd3, 0x0d, 0xc7, 0xd1, 0x23, 0xdb, 0xa6, 0xb9, 0x70, 0xc3, 0x71, 0x9e, 0xf0, 0xdd, 0xaf, 0xc1, + 0x7c, 0x9f, 0x58, 0xbd, 0x3e, 0xc3, 0x4d, 0x63, 0x9a, 0x5c, 0x55, 0xde, 0xcc, 0xc1, 0xbc, 0xcc, + 0xe9, 0xff, 0x20, 0x21, 0xab, 0x83, 0x0e, 0xd2, 0xf7, 0x6e, 0x45, 0x03, 0x93, 0xaa, 0x6a, 0x9d, + 0xda, 0x1e, 0xb1, 0xbd, 0x91, 0x27, 0xc3, 0xf2, 0x6d, 0xd4, 0xdb, 0x90, 0xec, 0xf6, 0x0d, 0xcb, + 0xd6, 0x2d, 0x13, 0xf7, 0x48, 0xd5, 0xd2, 0xa7, 0x6f, 0x57, 0x12, 0x75, 0x2e, 0x6b, 0x6e, 0x6a, + 0x09, 0x54, 0x36, 0xcd, 0x48, 0x24, 0xb1, 0x68, 0x24, 0xea, 0x03, 0x28, 0x74, 0xa9, 0x4b, 0x74, + 0xe1, 0x84, 0x27, 0x92, 0x98, 0xba, 0x44, 0x9a, 0x58, 0xb3, 0xab, 0x5c, 0x8f, 0xfe, 0xb6, 0x50, + 0xfb, 0x44, 0x18, 0xfe, 0x07, 0xe2, 0x9c, 0x90, 0x85, 0x38, 0x06, 0x5d, 0xac, 0x0a, 0xb6, 0x56, + 0x7d, 0xb6, 0x56, 0x3b, 0x3e, 0x5b, 0x6b, 0x49, 0x1e, 0xf1, 0xcb, 0x1f, 0x57, 0x14, 0x0d, 0x2d, + 0xd4, 0xba, 0x4c, 0xdc, 0x3e, 0xdf, 0x8d, 0xc7, 0x3d, 0x87, 0x2e, 0x96, 0xcf, 0x16, 0x44, 0x16, + 0x56, 0x9e, 0x19, 0x33, 0x2b, 0x44, 0xa6, 0xba, 0x0a, 0x79, 0x74, 0xd2, 0xa5, 0xc3, 0xa1, 0xc5, + 0x44, 0x01, 0xe6, 0xb1, 0x00, 0x0b, 0x5c, 0x5e, 0x47, 0x31, 0xd6, 0xe0, 0x06, 0xa4, 0x4c, 0x83, + 0x19, 0x02, 0x92, 0x40, 0x48, 0x92, 0x0b, 0x50, 0x79, 0x07, 0x72, 0x01, 0xeb, 0x3d, 0x01, 0x49, + 0x0a, 0x2f, 0xa1, 0x18, 0x81, 0x77, 0x61, 0xc9, 0x26, 0x63, 0xa6, 0x4f, 0xa3, 0x53, 0x88, 0x56, + 0xb9, 0x6e, 0x6f, 0xd2, 0xe2, 0xaf, 0xb0, 0xd0, 0xf5, 0xab, 0x26, 0xb0, 0x80, 0xd8, 0x6c, 0x20, + 0x45, 0xd8, 0x32, 0x24, 0x03, 0x06, 0xa5, 0x11, 0x90, 0x30, 0x24, 0x7b, 0xd6, 0x60, 0x11, 0xcf, + 0xe8, 0x12, 0x6f, 0x34, 0x60, 0xd2, 0x49, 0x06, 0x31, 0x39, 0xae, 0xd0, 0x84, 0x1c, 0xb1, 0x7f, + 0x81, 0x2c, 0x39, 0xb4, 0x4c, 0x62, 0x77, 0x89, 0xc0, 0x65, 0x11, 0x97, 0xf1, 0x85, 0x08, 0x5a, + 0x87, 0x25, 0xc7, 0xa5, 0x0e, 0xf5, 0x88, 0xab, 0x3b, 0x2e, 0xd5, 0xd9, 0x58, 0x60, 0x09, 0x62, + 0x17, 0x7d, 0x5d, 0xcb, 0xa5, 0x9d, 0xb1, 0x7f, 0x6a, 0x29, 0x34, 0x91, 0xe7, 0x3e, 0x53, 0x0f, + 0xca, 0xca, 0x6a, 0x5c, 0x53, 0x7d, 0xdd, 0x86, 0xe3, 0xec, 0x09, 0x4d, 0xe5, 0x73, 0x05, 0xb2, + 0xf5, 0x28, 0x61, 0xf8, 0x29, 0x90, 0x61, 0xa2, 0xdc, 0x92, 0x5a, 0xa2, 0x1d, 0xe4, 0xb8, 0x02, + 0x2b, 0x2a, 0x49, 0x75, 0x1b, 0x72, 0x51, 0x6c, 0xd8, 0x55, 0xb2, 0x21, 0x92, 0xc7, 0x75, 0x13, + 0x52, 0x9e, 0xd5, 0xb3, 0x0d, 0x36, 0x72, 0x09, 0x12, 0x3a, 0xa3, 0x85, 0x82, 0x87, 0xf1, 0x9f, + 0xbf, 0x5a, 0x51, 0x2a, 0x05, 0x88, 0x6f, 0x1a, 0xcc, 0x50, 0xf3, 0x10, 0x63, 0x63, 0xaf, 0xa0, + 0x94, 0x63, 0xab, 0x19, 0x8d, 0x7f, 0x56, 0xbe, 0x8b, 0x41, 0x7c, 0x8f, 0x32, 0xa2, 0xde, 0x87, + 0x38, 0x27, 0x1a, 0x46, 0xb3, 0x70, 0x5e, 0x47, 0x68, 0x5b, 0x3d, 0x9b, 0x98, 0xdb, 0x5e, 0xaf, + 0x73, 0xe4, 0x10, 0x0d, 0xc1, 0x17, 0xdd, 0x69, 0xde, 0xd4, 0x5c, 0x3a, 0xb2, 0x4d, 0x8c, 0x67, + 0x4e, 0x13, 0x0b, 0xb5, 0x01, 0xc9, 0x80, 0xe7, 0xf1, 0x3f, 0xe2, 0x79, 0x8e, 0xf3, 0x9c, 0x5f, + 0x5f, 0x29, 0xd0, 0x12, 0xfb, 0x92, 0xee, 0xff, 0x84, 0xab, 0x01, 0xf3, 0x26, 0x4a, 0x27, 0x38, + 0xaf, 0x06, 0xca, 0xb0, 0x76, 0x51, 0x6a, 0xeb, 0xa2, 0x09, 0x27, 0x30, 0xb2, 0x90, 0xda, 0x4d, + 0xec, 0xc6, 0x77, 0x20, 0x27, 0x42, 0x0c, 0x53, 0x2a, 0xef, 0x00, 0x8a, 0xdb, 0xbe, 0x94, 0x03, + 0x3d, 0xde, 0xfc, 0x22, 0x40, 0x41, 0xe9, 0x05, 0x14, 0x87, 0xc0, 0x9b, 0x90, 0x22, 0x63, 0x46, + 0x6c, 0xe4, 0x8a, 0x20, 0x75, 0x28, 0x50, 0xd7, 0xe1, 0x4a, 0xb0, 0x88, 0xb8, 0x12, 0xc4, 0x56, + 0x03, 0x55, 0xe0, 0xae, 0xf2, 0xc3, 0x2c, 0xcc, 0x8b, 0x0b, 0x1d, 0x49, 0xbe, 0x72, 0x7e, 0xf2, + 0x67, 0x2f, 0x4a, 0x7e, 0xec, 0xf2, 0xc9, 0x6f, 0x40, 0x52, 0x9c, 0xdb, 0x32, 0xb1, 0x27, 0x9e, + 0xeb, 0x46, 0x3e, 0x0b, 0xa1, 0x1b, 0x29, 0xd0, 0x12, 0x68, 0xdb, 0x34, 0xd5, 0x15, 0x48, 0x7f, + 0x32, 0xa2, 0xee, 0x68, 0x18, 0xbd, 0x74, 0x20, 0x44, 0x58, 0xb1, 0x87, 0xb0, 0xcc, 0xfa, 0x2e, + 0xf1, 0xfa, 0x74, 0x60, 0xea, 0xd3, 0x25, 0x39, 0x40, 0xf8, 0xf5, 0x00, 0x50, 0x9b, 0xac, 0xcd, + 0x84, 0xed, 0x74, 0x95, 0x7a, 0x53, 0xb6, 0xed, 0x89, 0x72, 0x55, 0x7e, 0x99, 0x85, 0x64, 0x0b, + 0xaf, 0xb2, 0x31, 0xf8, 0x73, 0xef, 0xc4, 0xa5, 0x5f, 0x97, 0xf3, 0x2f, 0xd3, 0x0d, 0x48, 0x39, + 0x74, 0xa0, 0x0b, 0x4d, 0x1c, 0x35, 0x49, 0x87, 0x0e, 0xb4, 0x33, 0xc5, 0x9e, 0xbb, 0x7c, 0xb1, + 0x6b, 0x90, 0x0a, 0x06, 0x2d, 0xbc, 0x5d, 0xef, 0xfb, 0xb8, 0x85, 0x66, 0x93, 0xed, 0x29, 0x31, + 0xd5, 0x9e, 0x2a, 0x2e, 0x64, 0x44, 0x0e, 0xe5, 0x04, 0x70, 0x97, 0x27, 0x0f, 0x27, 0x13, 0xe5, + 0xec, 0xe0, 0x23, 0xc2, 0x16, 0x48, 0x4d, 0xe2, 0xb8, 0x85, 0x78, 0xf7, 0xe4, 0x2c, 0x73, 0x8e, + 0x85, 0xb8, 0x2f, 0x9a, 0xc4, 0x55, 0xbe, 0x54, 0x00, 0xb6, 0x78, 0x66, 0xf1, 0xbc, 0xfc, 0x09, + 0xf6, 0x30, 0x04, 0x7d, 0x62, 0xe7, 0xd2, 0x45, 0xd5, 0x96, 0xfb, 0x67, 0xbc, 0x68, 0xdc, 0x75, + 0xc8, 0x86, 0x0d, 0xc6, 0x23, 0x7e, 0x30, 0xe7, 0x38, 0x09, 0x5e, 0xc6, 0x36, 0x61, 0x5a, 0xe6, + 0x30, 0xb2, 0xaa, 0xfc, 0xaa, 0x40, 0x0a, 0x63, 0xda, 0x26, 0xcc, 0x98, 0xa8, 0xa1, 0x72, 0xf9, + 0x1a, 0xde, 0x02, 0xf0, 0xaf, 0xcf, 0x0b, 0x22, 0x29, 0x99, 0x92, 0xcd, 0xec, 0x05, 0x51, 0xff, + 0x1d, 0x24, 0x3c, 0xf6, 0xfb, 0x09, 0x97, 0x83, 0x87, 0x9f, 0xf6, 0xeb, 0x90, 0xb0, 0x47, 0x43, + 0x9d, 0xbf, 0x26, 0x71, 0x41, 0x73, 0x7b, 0x34, 0xec, 0x8c, 0x3d, 0xf5, 0x1f, 0x70, 0xa5, 0x6f, + 0x78, 0xfa, 0x14, 0xd5, 0x91, 0xe1, 0x49, 0x2d, 0xdf, 0x37, 0xbc, 0x89, 0x17, 0xb1, 0xf2, 0x31, + 0x24, 0x3a, 0x63, 0x1c, 0x65, 0x39, 0xa3, 0x5d, 0x4a, 0x59, 0x74, 0x80, 0x4c, 0x72, 0x01, 0xf6, + 0x03, 0x15, 0xe2, 0x7c, 0x50, 0xf1, 0x07, 0x6b, 0xfe, 0xad, 0x56, 0xdf, 0x73, 0x48, 0x96, 0xe3, + 0xf1, 0xda, 0x1b, 0x05, 0xd2, 0x32, 0x3f, 0x8f, 0x06, 0x46, 0x8f, 0x3f, 0x24, 0xb5, 0xad, 0xdd, + 0xfa, 0x53, 0xbd, 0xb9, 0xa9, 0x3f, 0xda, 0xda, 0x78, 0xac, 0x3f, 0xdb, 0x79, 0xba, 0xb3, 0xfb, + 0xc1, 0x4e, 0x7e, 0xa6, 0x78, 0xed, 0xf8, 0xa4, 0xac, 0x46, 0xb0, 0xcf, 0xec, 0xe7, 0x36, 0xfd, + 0x94, 0xf7, 0xeb, 0xa5, 0x49, 0x93, 0x8d, 0x5a, 0xbb, 0xb1, 0xd3, 0xc9, 0x2b, 0xc5, 0xab, 0xc7, + 0x27, 0xe5, 0xc5, 0x88, 0xc5, 0xc6, 0xbe, 0x47, 0x6c, 0x76, 0xd6, 0xa0, 0xbe, 0xbb, 0xbd, 0xdd, + 0xec, 0xe4, 0x67, 0xcf, 0x18, 0xc8, 0xae, 0xfe, 0x37, 0x58, 0x9c, 0x34, 0xd8, 0x69, 0x6e, 0xe5, + 0x63, 0x45, 0xf5, 0xf8, 0xa4, 0xbc, 0x10, 0x41, 0xef, 0x58, 0x83, 0x62, 0xf2, 0x8b, 0xaf, 0x4b, + 0x33, 0xdf, 0x7e, 0x53, 0x52, 0xd6, 0x3e, 0x9b, 0x85, 0xec, 0x44, 0x2f, 0x52, 0xff, 0x0e, 0xd7, + 0xdb, 0xcd, 0xc7, 0x3b, 0x8d, 0x4d, 0x7d, 0xbb, 0xfd, 0x58, 0xef, 0x7c, 0xd8, 0x6a, 0x44, 0x4e, + 0x97, 0x3b, 0x3e, 0x29, 0xa7, 0xe5, 0x91, 0x2e, 0x42, 0xb7, 0xb4, 0xc6, 0xde, 0x6e, 0xa7, 0x91, + 0x57, 0x04, 0xba, 0xe5, 0x92, 0x43, 0xca, 0x08, 0xa2, 0xef, 0xc2, 0xf2, 0x39, 0xe8, 0xe0, 0x60, + 0x8b, 0xc7, 0x27, 0xe5, 0x6c, 0xcb, 0x25, 0xe2, 0xba, 0xa1, 0xc5, 0x1a, 0x5c, 0x9b, 0xb6, 0x90, + 0xf0, 0x58, 0x71, 0xe1, 0xf8, 0xa4, 0x0c, 0xf5, 0x10, 0x5b, 0x85, 0xc2, 0x59, 0xef, 0xbb, 0xad, + 0xdd, 0xf6, 0xc6, 0x56, 0xbe, 0x5c, 0xcc, 0x1f, 0x9f, 0x94, 0x33, 0x7e, 0x83, 0xe6, 0xf8, 0x30, + 0x0b, 0xb5, 0xff, 0xbf, 0x3a, 0x2d, 0x29, 0xaf, 0x4f, 0x4b, 0xca, 0x4f, 0xa7, 0x25, 0xe5, 0xe5, + 0xbb, 0xd2, 0xcc, 0xeb, 0x77, 0xa5, 0x99, 0xef, 0xdf, 0x95, 0x66, 0x3e, 0x7a, 0xd0, 0xb3, 0x58, + 0x7f, 0xb4, 0x5f, 0xed, 0xd2, 0xe1, 0x7a, 0xf4, 0xaf, 0x5e, 0xf8, 0x29, 0xfe, 0x72, 0x4e, 0xff, + 0x0d, 0xdc, 0x9f, 0x47, 0xf9, 0xfd, 0xdf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x08, 0x23, 0xcc, 0xd7, + 0xc7, 0x0e, 0x00, 0x00, } func (this *CoreChainLock) Equal(that interface{}) bool { @@ -1673,6 +1697,20 @@ func (m *Vote) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.ExtensionSignature) > 0 { + i -= len(m.ExtensionSignature) + copy(dAtA[i:], m.ExtensionSignature) + i = encodeVarintTypes(dAtA, i, uint64(len(m.ExtensionSignature))) + i-- + dAtA[i] = 0x62 + } + if len(m.Extension) > 0 { + i -= len(m.Extension) + copy(dAtA[i:], m.Extension) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Extension))) + i-- + dAtA[i] = 0x5a + } if len(m.StateSignature) > 0 { i -= len(m.StateSignature) copy(dAtA[i:], m.StateSignature) @@ -2299,6 +2337,14 @@ func (m *Vote) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + l = len(m.Extension) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ExtensionSignature) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } return n } @@ -3910,6 +3956,74 @@ func (m *Vote) Unmarshal(dAtA []byte) error { m.StateSignature = []byte{} } iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Extension = append(m.Extension[:0], dAtA[iNdEx:postIndex]...) + if m.Extension == nil { + m.Extension = []byte{} + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtensionSignature", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExtensionSignature = append(m.ExtensionSignature[:0], dAtA[iNdEx:postIndex]...) + if m.ExtensionSignature == nil { + m.ExtensionSignature = []byte{} + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/proto/tendermint/types/types.proto b/proto/tendermint/types/types.proto index 7426de15d0..c17d0b3c4a 100644 --- a/proto/tendermint/types/types.proto +++ b/proto/tendermint/types/types.proto @@ -9,15 +9,18 @@ import "tendermint/crypto/proof.proto"; import "tendermint/version/types.proto"; import "tendermint/types/validator.proto"; -// BlockIdFlag indicates which BlcokID the signature is for +// BlockIdFlag indicates which BlockID the signature is for enum BlockIDFlag { option (gogoproto.goproto_enum_stringer) = true; option (gogoproto.goproto_enum_prefix) = false; - BLOCK_ID_FLAG_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "BlockIDFlagUnknown"]; - BLOCK_ID_FLAG_ABSENT = 1 [(gogoproto.enumvalue_customname) = "BlockIDFlagAbsent"]; - BLOCK_ID_FLAG_COMMIT = 2 [(gogoproto.enumvalue_customname) = "BlockIDFlagCommit"]; - BLOCK_ID_FLAG_NIL = 3 [(gogoproto.enumvalue_customname) = "BlockIDFlagNil"]; + BLOCK_ID_FLAG_UNKNOWN = 0 + [(gogoproto.enumvalue_customname) = "BlockIDFlagUnknown"]; + BLOCK_ID_FLAG_ABSENT = 1 + [(gogoproto.enumvalue_customname) = "BlockIDFlagAbsent"]; + BLOCK_ID_FLAG_COMMIT = 2 + [(gogoproto.enumvalue_customname) = "BlockIDFlagCommit"]; + BLOCK_ID_FLAG_NIL = 3 [(gogoproto.enumvalue_customname) = "BlockIDFlagNil"]; } // SignedMsgType is a type of signed message in the consensus. @@ -25,14 +28,19 @@ enum SignedMsgType { option (gogoproto.goproto_enum_stringer) = true; option (gogoproto.goproto_enum_prefix) = false; - SIGNED_MSG_TYPE_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "UnknownType"]; + SIGNED_MSG_TYPE_UNKNOWN = 0 + [(gogoproto.enumvalue_customname) = "UnknownType"]; // Votes - SIGNED_MSG_TYPE_PREVOTE = 1 [(gogoproto.enumvalue_customname) = "PrevoteType"]; - SIGNED_MSG_TYPE_PRECOMMIT = 2 [(gogoproto.enumvalue_customname) = "PrecommitType"]; - SIGNED_MSG_TYPE_COMMIT = 3 [(gogoproto.enumvalue_customname) = "CommitType"]; + SIGNED_MSG_TYPE_PREVOTE = 1 + [(gogoproto.enumvalue_customname) = "PrevoteType"]; + SIGNED_MSG_TYPE_PRECOMMIT = 2 + [(gogoproto.enumvalue_customname) = "PrecommitType"]; + SIGNED_MSG_TYPE_COMMIT = 3 + [(gogoproto.enumvalue_customname) = "CommitType"]; // Proposals - SIGNED_MSG_TYPE_PROPOSAL = 32 [(gogoproto.enumvalue_customname) = "ProposalType"]; + SIGNED_MSG_TYPE_PROPOSAL = 32 + [(gogoproto.enumvalue_customname) = "ProposalType"]; } // PartsetHeader @@ -64,11 +72,12 @@ message StateID { // Header defines the structure of a Tendermint block header. message Header { // basic block info - tendermint.version.Consensus version = 1 [(gogoproto.nullable) = false]; - string chain_id = 2 [(gogoproto.customname) = "ChainID"]; - int64 height = 3; + tendermint.version.Consensus version = 1 [(gogoproto.nullable) = false]; + string chain_id = 2 [(gogoproto.customname) = "ChainID"]; + int64 height = 3; uint32 core_chain_locked_height = 100; - google.protobuf.Timestamp time = 4 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + google.protobuf.Timestamp time = 4 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; // prev block info BlockID last_block_id = 5 [(gogoproto.nullable) = false]; @@ -82,7 +91,8 @@ message Header { bytes next_validators_hash = 9; // validators for the next block bytes consensus_hash = 10; // consensus params for current block bytes app_hash = 11; // state after txs from the previous block - bytes last_results_hash = 12; // root hash of all results from the txs from the previous block + bytes last_results_hash = + 12; // root hash of all results from the txs from the previous block // consensus info bytes evidence_hash = 13; // evidence included in the block @@ -113,15 +123,26 @@ message Vote { SignedMsgType type = 1; int64 height = 2; int32 round = 3; - BlockID block_id = 4 - [(gogoproto.nullable) = false, (gogoproto.customname) = "BlockID"]; // zero if vote is nil. + BlockID block_id = 4 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "BlockID" + ]; // zero if vote is nil. + bytes validator_pro_tx_hash = 6; int32 validator_index = 7; bytes block_signature = 8; bytes state_signature = 10; + + // Vote extension provided by the application. Only valid for precommit + // messages. + bytes extension = 11; + // Vote extension signature by the validator if they participated in + // consensus for the associated block. Only valid for precommit messages. + bytes extension_signature = 12; } -// Commit contains the evidence that a block was committed by a set of validators. +// Commit contains the evidence that a block was committed by a set of +// validators. message Commit { int64 height = 1; int32 round = 2; @@ -138,8 +159,10 @@ message Proposal { uint32 core_chain_locked_height = 100; int32 round = 3; int32 pol_round = 4; - BlockID block_id = 5 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; - google.protobuf.Timestamp timestamp = 6 [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + BlockID block_id = 5 + [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + google.protobuf.Timestamp timestamp = 6 + [(gogoproto.nullable) = false, (gogoproto.stdtime) = true]; bytes signature = 7; } @@ -154,14 +177,17 @@ message LightBlock { } message BlockMeta { - BlockID block_id = 1 [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; - int64 block_size = 2; - Header header = 3 [(gogoproto.nullable) = false]; - int64 num_txs = 4; + BlockID block_id = 1 + [(gogoproto.customname) = "BlockID", (gogoproto.nullable) = false]; + int64 block_size = 2; + Header header = 3 [(gogoproto.nullable) = false]; + int64 num_txs = 4; + bool has_core_chain_lock = 100; } -// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. +// TxProof represents a Merkle proof of the presence of a transaction in the +// Merkle tree. message TxProof { bytes root_hash = 1; bytes data = 2; diff --git a/proto/tendermint/version/types.pb.go b/proto/tendermint/version/types.pb.go index 6e224392e8..76a94fd3c0 100644 --- a/proto/tendermint/version/types.pb.go +++ b/proto/tendermint/version/types.pb.go @@ -23,9 +23,9 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// Consensus captures the consensus rules for processing a block in the blockchain, -// including all blockchain data structures and the rules of the application's -// state transition machine. +// Consensus captures the consensus rules for processing a block in the +// blockchain, including all blockchain data structures and the rules of the +// application's state transition machine. type Consensus struct { Block uint64 `protobuf:"varint,1,opt,name=block,proto3" json:"block,omitempty"` App uint64 `protobuf:"varint,2,opt,name=app,proto3" json:"app,omitempty"` diff --git a/proto/tendermint/version/types.proto b/proto/tendermint/version/types.proto index 3c4e4cc533..37124dd4e7 100644 --- a/proto/tendermint/version/types.proto +++ b/proto/tendermint/version/types.proto @@ -5,9 +5,9 @@ option go_package = "github.com/tendermint/tendermint/proto/tendermint/version"; import "gogoproto/gogo.proto"; -// Consensus captures the consensus rules for processing a block in the blockchain, -// including all blockchain data structures and the rules of the application's -// state transition machine. +// Consensus captures the consensus rules for processing a block in the +// blockchain, including all blockchain data structures and the rules of the +// application's state transition machine. message Consensus { option (gogoproto.equal) = true; diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index 54d19a2012..e59fcc83b1 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -3,7 +3,6 @@ package client_test import ( "context" "fmt" - "reflect" "testing" "time" @@ -17,7 +16,7 @@ import ( "github.com/tendermint/tendermint/types" ) -var waitForEventTimeout = 8 * time.Second +const waitForEventTimeout = 2 * time.Second // MakeTxKV returns a text transaction, allong with expected key, value pair func MakeTxKV() ([]byte, []byte, []byte) { @@ -26,164 +25,48 @@ func MakeTxKV() ([]byte, []byte, []byte) { return k, v, append(k, append([]byte("="), v...)...) } -func TestHeaderEvents(t *testing.T) { - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - i, c := i, c - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err, "%d: %+v", i, err) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) - } - - evt, err := client.WaitForOneEvent(c, types.EventNewBlockHeaderValue, waitForEventTimeout) - require.Nil(t, err, "%d: %+v", i, err) - _, ok := evt.(types.EventDataNewBlockHeader) - require.True(t, ok, "%d: %#v", i, evt) - // TODO: more checks... - }) - } -} - -// subscribe to new blocks and make sure height increments by 1 -func TestBlockEvents(t *testing.T) { - n, conf := NodeSuite(t) - for _, c := range GetClients(t, n, conf) { - c := c - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) - } - - const subscriber = "TestBlockEvents" - - eventCh, err := c.Subscribe(context.Background(), subscriber, types.QueryForEvent(types.EventNewBlockValue).String()) - require.NoError(t, err) - t.Cleanup(func() { - if err := c.UnsubscribeAll(context.Background(), subscriber); err != nil { - t.Error(err) - } - }) - - var firstBlockHeight int64 - for i := int64(0); i < 3; i++ { - event := <-eventCh - blockEvent, ok := event.Data.(types.EventDataNewBlock) - require.True(t, ok) - - block := blockEvent.Block - - if firstBlockHeight == 0 { - firstBlockHeight = block.Header.Height - } - - require.Equal(t, firstBlockHeight+i, block.Header.Height) - } - }) - } -} - -func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) { testTxEventsSent(t, "async") } -func TestTxEventsSentWithBroadcastTxSync(t *testing.T) { testTxEventsSent(t, "sync") } - -func testTxEventsSent(t *testing.T, broadcastMethod string) { - n, conf := NodeSuite(t) - for _, c := range GetClients(t, n, conf) { - c := c - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) - } - - // make the tx - _, _, tx := MakeTxKV() - - // send - go func() { - var ( - txres *coretypes.ResultBroadcastTx - err error - ctx = context.Background() - ) - switch broadcastMethod { - case "async": - txres, err = c.BroadcastTxAsync(ctx, tx) - case "sync": - txres, err = c.BroadcastTxSync(ctx, tx) - default: - panic(fmt.Sprintf("Unknown broadcastMethod %s", broadcastMethod)) - } - if assert.NoError(t, err) { - assert.Equal(t, txres.Code, abci.CodeTypeOK) - } - }() - - // and wait for confirmation - evt, err := client.WaitForOneEvent(c, types.EventTxValue, waitForEventTimeout) - require.Nil(t, err) - - // and make sure it has the proper info - txe, ok := evt.(types.EventDataTx) - require.True(t, ok) - - // make sure this is the proper tx - require.EqualValues(t, tx, txe.Tx) - require.True(t, txe.Result.IsOK()) - }) - } -} - -// Test HTTPClient resubscribes upon disconnect && subscription error. -// Test Local client resubscribes upon subscription error. -func TestClientsResubscribe(t *testing.T) { - // TODO(melekes) -} - -func TestHTTPReturnsErrorIfClientIsNotRunning(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) +func testTxEventsSent(ctx context.Context, t *testing.T, broadcastMethod string, c client.Client) { + t.Helper() + // make the tx + _, _, tx := MakeTxKV() + + // send + done := make(chan struct{}) + go func() { + defer close(done) + var ( + txres *coretypes.ResultBroadcastTx + err error + ) + switch broadcastMethod { + case "async": + txres, err = c.BroadcastTxAsync(ctx, tx) + case "sync": + txres, err = c.BroadcastTxSync(ctx, tx) + default: + require.FailNowf(t, "Unknown broadcastMethod %s", broadcastMethod) + } + if assert.NoError(t, err) { + assert.Equal(t, txres.Code, abci.CodeTypeOK) + } + }() + + // and wait for confirmation + ectx, cancel := context.WithTimeout(ctx, waitForEventTimeout) defer cancel() - _, conf := NodeSuite(t) - - c := getHTTPClient(t, conf) - - // on Subscribe - _, err := c.Subscribe(ctx, "TestHeaderEvents", - types.QueryForEvent(types.EventNewBlockHeaderValue).String()) - assert.Error(t, err) + // Wait for the transaction we sent to be confirmed. + query := fmt.Sprintf(`tm.event = '%s' AND tx.hash = '%X'`, + types.EventTxValue, types.Tx(tx).Hash()) + evt, err := client.WaitForOneEvent(ectx, c, query) + require.NoError(t, err) - // on Unsubscribe - err = c.Unsubscribe(ctx, "TestHeaderEvents", - types.QueryForEvent(types.EventNewBlockHeaderValue).String()) - assert.Error(t, err) + // and make sure it has the proper info + txe, ok := evt.(types.EventDataTx) + require.True(t, ok) - // on UnsubscribeAll - err = c.UnsubscribeAll(ctx, "TestHeaderEvents") - assert.Error(t, err) + // make sure this is the proper tx + require.EqualValues(t, tx, txe.Tx) + require.True(t, txe.Result.IsOK()) + <-done } diff --git a/rpc/client/eventstream/eventstream.go b/rpc/client/eventstream/eventstream.go new file mode 100644 index 0000000000..59cfc8b5f0 --- /dev/null +++ b/rpc/client/eventstream/eventstream.go @@ -0,0 +1,193 @@ +// Package eventstream implements a convenience client for the Events method +// of the Tendermint RPC service, allowing clients to observe a resumable +// stream of events matching a query. +package eventstream + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/tendermint/tendermint/rpc/coretypes" +) + +// Client is the subset of the RPC client interface consumed by Stream. +type Client interface { + Events(ctx context.Context, req *coretypes.RequestEvents) (*coretypes.ResultEvents, error) +} + +// ErrStopRunning is returned by a Run callback to signal that no more events +// are wanted and that Run should return. +var ErrStopRunning = errors.New("stop accepting events") + +// A Stream cpatures the state of a streaming event subscription. +type Stream struct { + filter *coretypes.EventFilter // the query being streamed + batchSize int // request batch size + newestSeen string // from the latest item matching our query + waitTime time.Duration // the long-polling interval + client Client +} + +// New constructs a new stream for the given query and options. +// If opts == nil, the stream uses default values as described by +// StreamOptions. This function will panic if cli == nil. +func New(cli Client, query string, opts *StreamOptions) *Stream { + if cli == nil { + panic("eventstream: nil client") + } + return &Stream{ + filter: &coretypes.EventFilter{Query: query}, + batchSize: opts.batchSize(), + newestSeen: opts.resumeFrom(), + waitTime: opts.waitTime(), + client: cli, + } +} + +// Run polls the service for events matching the query, and calls accept for +// each such event. Run handles pagination transparently, and delivers events +// to accept in order of publication. +// +// Run continues until ctx ends or accept reports an error. If accept returns +// ErrStopRunning, Run returns nil; otherwise Run returns the error reported by +// accept or ctx. Run also returns an error if the server reports an error +// from the Events method. +// +// If the stream falls behind the event log on the server, Run will stop and +// report an error of concrete type *MissedItemsError. Call Reset to reset the +// stream to the head of the log, and call Run again to resume. +func (s *Stream) Run(ctx context.Context, accept func(*coretypes.EventItem) error) error { + for { + items, err := s.fetchPages(ctx) + if err != nil { + return err + } + + // Deliver events from the current batch to the receiver. We visit the + // batch in reverse order so the receiver sees them in forward order. + for i := len(items) - 1; i >= 0; i-- { + if err := ctx.Err(); err != nil { + return err + } + + itm := items[i] + err := accept(itm) + if itm.Cursor > s.newestSeen { + s.newestSeen = itm.Cursor // update the latest delivered + } + if errors.Is(err, ErrStopRunning) { + return nil + } else if err != nil { + return err + } + } + } +} + +// Reset updates the stream's current cursor position to the head of the log. +// This method may safely be called only when Run is not executing. +func (s *Stream) Reset() { s.newestSeen = "" } + +// fetchPages fetches the next batch of matching results. If there are multiple +// pages, all the matching pages are retrieved. An error is reported if the +// current scan position falls out of the event log window. +func (s *Stream) fetchPages(ctx context.Context) ([]*coretypes.EventItem, error) { + var pageCursor string // if non-empty, page through items before this + var items []*coretypes.EventItem + + // Fetch the next paginated batch of matching responses. + for { + rsp, err := s.client.Events(ctx, &coretypes.RequestEvents{ + Filter: s.filter, + MaxItems: s.batchSize, + After: s.newestSeen, + Before: pageCursor, + WaitTime: s.waitTime, + }) + if err != nil { + return nil, err + } + + // If the oldest item in the log is newer than our most recent item, + // it means we might have missed some events matching our query. + if s.newestSeen != "" && s.newestSeen < rsp.Oldest { + return nil, &MissedItemsError{ + Query: s.filter.Query, + NewestSeen: s.newestSeen, + OldestPresent: rsp.Oldest, + } + } + items = append(items, rsp.Items...) + + if rsp.More { + // There are more results matching this request, leave the baseline + // where it is and set the page cursor so that subsequent requests + // will get the next chunk. + pageCursor = items[len(items)-1].Cursor + } else if len(items) != 0 { + // We got everything matching so far. + return items, nil + } + } +} + +// StreamOptions are optional settings for a Stream value. A nil *StreamOptions +// is ready for use and provides default values as described. +type StreamOptions struct { + // How many items to request per call to the service. The stream may pin + // this value to a minimum default batch size. + BatchSize int + + // If set, resume streaming from this cursor. Typically this is set to the + // cursor of the most recently-received matching value. If empty, streaming + // begins at the head of the log (the default). + ResumeFrom string + + // Specifies the long poll interval. The stream may pin this value to a + // minimum default poll interval. + WaitTime time.Duration +} + +func (o *StreamOptions) batchSize() int { + const minBatchSize = 16 + if o == nil || o.BatchSize < minBatchSize { + return minBatchSize + } + return o.BatchSize +} + +func (o *StreamOptions) resumeFrom() string { + if o == nil { + return "" + } + return o.ResumeFrom +} + +func (o *StreamOptions) waitTime() time.Duration { + const minWaitTime = 5 * time.Second + if o == nil || o.WaitTime < minWaitTime { + return minWaitTime + } + return o.WaitTime +} + +// MissedItemsError is an error that indicates the stream missed (lost) some +// number of events matching the specified query. +type MissedItemsError struct { + // The cursor of the newest matching item the stream has observed. + NewestSeen string + + // The oldest cursor in the log at the point the miss was detected. + // Any matching events between NewestSeen and OldestPresent are lost. + OldestPresent string + + // The active query. + Query string +} + +// Error satisfies the error interface. +func (e *MissedItemsError) Error() string { + return fmt.Sprintf("missed events matching %q between %q and %q", e.Query, e.NewestSeen, e.OldestPresent) +} diff --git a/rpc/client/eventstream/eventstream_test.go b/rpc/client/eventstream/eventstream_test.go new file mode 100644 index 0000000000..8cd9df30f8 --- /dev/null +++ b/rpc/client/eventstream/eventstream_test.go @@ -0,0 +1,274 @@ +package eventstream_test + +import ( + "context" + "errors" + "fmt" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/google/go-cmp/cmp" + + "github.com/tendermint/tendermint/internal/eventlog" + "github.com/tendermint/tendermint/internal/eventlog/cursor" + rpccore "github.com/tendermint/tendermint/internal/rpc/core" + "github.com/tendermint/tendermint/rpc/client/eventstream" + "github.com/tendermint/tendermint/rpc/coretypes" + "github.com/tendermint/tendermint/types" +) + +func TestStream_filterOrder(t *testing.T) { + defer leaktest.Check(t) + + s := newStreamTester(t, `tm.event = 'good'`, eventlog.LogSettings{ + WindowSize: 30 * time.Second, + }, nil) + + // Verify that events are delivered in forward time order (i.e., that the + // stream unpacks the pages correctly) and that events not matching the + // query (here, type="bad") are skipped. + // + // The minimum batch size is 16 and half the events we publish match, so we + // publish > 32 items (> 16 good) to ensure we exercise paging. + etype := [2]string{"good", "bad"} + var items []testItem + for i := 0; i < 40; i++ { + s.advance(100 * time.Millisecond) + text := fmt.Sprintf("item%d", i) + cur := s.publish(etype[i%2], text) + + // Even-numbered items match the target type. + if i%2 == 0 { + items = append(items, makeTestItem(cur, text)) + } + } + + s.start() + for _, itm := range items { + s.mustItem(t, itm) + } + s.stopWait() +} + +func TestStream_lostItem(t *testing.T) { + defer leaktest.Check(t) + + s := newStreamTester(t, ``, eventlog.LogSettings{ + WindowSize: 30 * time.Second, + }, nil) + + // Publish an item and let the client observe it. + cur := s.publish("ok", "whatever") + s.start() + s.mustItem(t, makeTestItem(cur, "whatever")) + s.stopWait() + + // Time passes, and cur expires out of the window. + s.advance(50 * time.Second) + next1 := s.publish("ok", "more stuff") + s.advance(15 * time.Second) + next2 := s.publish("ok", "still more stuff") + + // At this point, the oldest item in the log is newer than the point at + // which we continued, we should get an error. + s.start() + var missed *eventstream.MissedItemsError + if err := s.mustError(t); !errors.As(err, &missed) { + t.Errorf("Wrong error: got %v, want %T", err, missed) + } else { + t.Logf("Correctly reported missed item: %v", missed) + } + + // If we reset the stream and continue from head, we should catch up. + s.stopWait() + s.stream.Reset() + s.start() + + s.mustItem(t, makeTestItem(next1, "more stuff")) + s.mustItem(t, makeTestItem(next2, "still more stuff")) + s.stopWait() +} + +func TestMinPollTime(t *testing.T) { + defer leaktest.Check(t) + + s := newStreamTester(t, ``, eventlog.LogSettings{ + WindowSize: 30 * time.Second, + }, nil) + + s.publish("bad", "whatever") + + // Waiting for an item on a log with no matching events incurs a minimum + // wait time and reports no events. + ctx := context.Background() + filter := &coretypes.EventFilter{Query: `tm.event = 'good'`} + + t.Run("NoneMatch", func(t *testing.T) { + start := time.Now() + + // Request a very short delay, and affirm we got the server's minimum. + rsp, err := s.env.Events(ctx, &coretypes.RequestEvents{ + Filter: filter, + MaxItems: 1, + WaitTime: 10 * time.Millisecond, + }) + if err != nil { + t.Fatalf("Events failed: %v", err) + } else if elapsed := time.Since(start); elapsed < time.Second { + t.Errorf("Events returned too quickly: got %v, wanted 1s", elapsed) + } else if len(rsp.Items) != 0 { + t.Errorf("Events returned %d items, expected none", len(rsp.Items)) + } + }) + + s.publish("good", "whatever") + + // Waiting for an available matching item incurs no delay. + t.Run("SomeMatch", func(t *testing.T) { + start := time.Now() + + // Request a long-ish delay and affirm we don't block for it. + // Check for this by ensuring we return sooner than the minimum delay, + // since we don't know the exact timing. + rsp, err := s.env.Events(ctx, &coretypes.RequestEvents{ + Filter: filter, + MaxItems: 1, + WaitTime: 10 * time.Second, + }) + if err != nil { + t.Fatalf("Events failed: %v", err) + } else if elapsed := time.Since(start); elapsed > 500*time.Millisecond { + t.Errorf("Events returned too slowly: got %v, wanted immediate", elapsed) + } else if len(rsp.Items) == 0 { + t.Error("Events returned no items, wanted at least 1") + } + }) +} + +// testItem is a wrapper for comparing item results in a friendly output format +// for the cmp package. +type testItem struct { + Cursor string + Data string + + // N.B. Fields exported to simplify use in cmp. +} + +func makeTestItem(cur, data string) testItem { + return testItem{ + Cursor: cur, + Data: fmt.Sprintf(`{"type":%q,"value":%q}`, types.EventDataString("").TypeTag(), data), + } +} + +// streamTester is a simulation harness for an eventstream.Stream. It simulates +// the production service by plumbing an event log into a stub RPC environment, +// into which the test can publish events and advance the perceived time to +// exercise various cases of the stream. +type streamTester struct { + log *eventlog.Log + env *rpccore.Environment + clock int64 + index int64 + stream *eventstream.Stream + errc chan error + recv chan *coretypes.EventItem + stop func() +} + +func newStreamTester(t *testing.T, query string, logOpts eventlog.LogSettings, streamOpts *eventstream.StreamOptions) *streamTester { + t.Helper() + s := new(streamTester) + + // Plumb a time source controlled by the tester into the event log. + logOpts.Source = cursor.Source{ + TimeIndex: s.timeNow, + } + lg, err := eventlog.New(logOpts) + if err != nil { + t.Fatalf("Creating event log: %v", err) + } + s.log = lg + s.env = &rpccore.Environment{EventLog: lg} + s.stream = eventstream.New(s, query, streamOpts) + return s +} + +// start starts the stream receiver, which runs until it it terminated by +// calling stop. +func (s *streamTester) start() { + ctx, cancel := context.WithCancel(context.Background()) + s.errc = make(chan error, 1) + s.recv = make(chan *coretypes.EventItem) + s.stop = cancel + go func() { + defer close(s.errc) + s.errc <- s.stream.Run(ctx, func(itm *coretypes.EventItem) error { + select { + case <-ctx.Done(): + return ctx.Err() + case s.recv <- itm: + return nil + } + }) + }() +} + +// publish adds a single event to the event log at the present moment. +func (s *streamTester) publish(etype, payload string) string { + _ = s.log.Add(etype, types.EventDataString(payload)) + s.index++ + return fmt.Sprintf("%016x-%04x", s.clock, s.index) +} + +// wait blocks until either an item is received or the runner stops. +func (s *streamTester) wait() (*coretypes.EventItem, error) { + select { + case itm := <-s.recv: + return itm, nil + case err := <-s.errc: + return nil, err + } +} + +// mustItem waits for an item and fails if either an error occurs or the item +// does not match want. +func (s *streamTester) mustItem(t *testing.T, want testItem) { + t.Helper() + + itm, err := s.wait() + if err != nil { + t.Fatalf("Receive: got error %v, want item %v", err, want) + } + got := testItem{Cursor: itm.Cursor, Data: string(itm.Data)} + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Item: (-want, +got)\n%s", diff) + } +} + +// mustError waits for an error and fails if an item is returned. +func (s *streamTester) mustError(t *testing.T) error { + t.Helper() + itm, err := s.wait() + if err == nil { + t.Fatalf("Receive: got item %v, want error", itm) + } + return err +} + +// stopWait stops the runner and waits for it to terminate. +func (s *streamTester) stopWait() { s.stop(); s.wait() } //nolint:errcheck + +// timeNow reports the current simulated time index. +func (s *streamTester) timeNow() int64 { return s.clock } + +// advance moves the simulated time index. +func (s *streamTester) advance(d time.Duration) { s.clock += int64(d) } + +// Events implements the eventstream.Client interface by delegating to a stub +// environment as if it were a local RPC client. This works because the Events +// method only requires the event log, the other fields are unused. +func (s *streamTester) Events(ctx context.Context, req *coretypes.RequestEvents) (*coretypes.ResultEvents, error) { + return s.env.Events(ctx, req) +} diff --git a/rpc/client/evidence_test.go b/rpc/client/evidence_test.go index 9bf06a3862..3d240e51c8 100644 --- a/rpc/client/evidence_test.go +++ b/rpc/client/evidence_test.go @@ -1,21 +1,14 @@ package client_test import ( - "bytes" "context" "testing" "time" "github.com/dashevo/dashd-go/btcjson" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/libs" - - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/crypto/encoding" - "github.com/tendermint/tendermint/crypto/tmhash" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -23,16 +16,13 @@ import ( "github.com/tendermint/tendermint/types" ) -// For some reason the empty node used in tests has a time of -// 2018-10-10 08:20:13.695936996 +0000 UTC -// this is because the test genesis time is set here -// so in order to validate evidence we need evidence to be the same time -var defaultTestTime = time.Date(2018, 10, 10, 8, 20, 13, 695936996, time.UTC) - func newEvidence(t *testing.T, val *privval.FilePV, vote *types.Vote, vote2 *types.Vote, chainID string, stateID types.StateID, - quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash) *types.DuplicateVoteEvidence { + quorumType btcjson.LLMQType, + quorumHash crypto.QuorumHash, + timestamp time.Time, +) *types.DuplicateVoteEvidence { t.Helper() var err error @@ -57,7 +47,7 @@ func newEvidence(t *testing.T, val *privval.FilePV, validator := types.NewValidator(privKey.PubKey(), types.DefaultDashVotingPower, val.Key.ProTxHash, "") valSet := types.NewValidatorSet([]*types.Validator{validator}, validator.PubKey, quorumType, quorumHash, true) - ev, err := types.NewDuplicateVoteEvidence(vote, vote2, defaultTestTime, valSet) + ev, err := types.NewDuplicateVoteEvidence(vote, vote2, timestamp, valSet) require.NoError(t, err) return ev } @@ -68,6 +58,7 @@ func makeEvidences( chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, + timestamp time.Time, ) (correct *types.DuplicateVoteEvidence, fakes []*types.DuplicateVoteEvidence) { vote := types.Vote{ ValidatorProTxHash: val.Key.ProTxHash, @@ -76,10 +67,10 @@ func makeEvidences( Round: 0, Type: tmproto.PrevoteType, BlockID: types.BlockID{ - Hash: tmhash.Sum(tmrand.Bytes(tmhash.Size)), + Hash: crypto.Checksum(tmrand.Bytes(crypto.HashSize)), PartSetHeader: types.PartSetHeader{ Total: 1000, - Hash: tmhash.Sum([]byte("partset")), + Hash: crypto.Checksum([]byte("partset")), }, }, } @@ -87,8 +78,8 @@ func makeEvidences( stateID := types.RandStateID().WithHeight(vote.Height - 1) vote2 := vote - vote2.BlockID.Hash = tmhash.Sum([]byte("blockhash2")) - correct = newEvidence(t, val, &vote, &vote2, chainID, stateID, quorumType, quorumHash) + vote2.BlockID.Hash = crypto.Checksum([]byte("blockhash2")) + correct = newEvidence(t, val, &vote, &vote2, chainID, stateID, quorumType, quorumHash, timestamp) fakes = make([]*types.DuplicateVoteEvidence, 0) @@ -96,104 +87,39 @@ func makeEvidences( { v := vote2 v.ValidatorProTxHash = []byte("some_pro_tx_hash") - fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, stateID, quorumType, quorumHash)) + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, stateID, quorumType, quorumHash, timestamp)) } // different height { v := vote2 v.Height = vote.Height + 1 - fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, stateID, quorumType, quorumHash)) + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, stateID, quorumType, quorumHash, timestamp)) } // different round { v := vote2 v.Round = vote.Round + 1 - fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, stateID, quorumType, quorumHash)) + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, stateID, quorumType, quorumHash, timestamp)) } // different type { v := vote2 v.Type = tmproto.PrecommitType - fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, stateID, quorumType, quorumHash)) + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, stateID, quorumType, quorumHash, timestamp)) } // exactly same vote { v := vote - fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, stateID, quorumType, quorumHash)) + fakes = append(fakes, newEvidence(t, val, &vote, &v, chainID, stateID, quorumType, quorumHash, timestamp)) } return correct, fakes } -func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, config := NodeSuite(t) - chainID := config.ChainID() - - pv, err := privval.LoadOrGenFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) - require.NoError(t, err) - - for i, c := range GetClients(t, n, config) { - vals, err := c.Validators(ctx, libs.Int64Ptr(1), nil, nil, libs.BoolPtr(true)) - require.NoError(t, err) - correct, fakes := makeEvidences(t, pv, chainID, vals.QuorumType, *vals.QuorumHash) - t.Logf("client %d", i) - - // make sure that the node has produced enough blocks - waitForBlock(ctx, t, c, 2) - - result, err := c.BroadcastEvidence(ctx, correct) - require.NoError(t, err, "BroadcastEvidence(%s) failed", correct) - assert.Equal(t, correct.Hash(), result.Hash, "expected result hash to match evidence hash") - - status, err := c.Status(ctx) - require.NoError(t, err) - err = client.WaitForHeight(c, status.SyncInfo.LatestBlockHeight+2, nil) - require.NoError(t, err) - - pubKey, err := pv.GetFirstPubKey(context.Background()) - require.NoError(t, err, "private validator must have a public key") - rawpub := pubKey.Bytes() - result2, err := c.ABCIQuery(context.Background(), "/vsu", []byte{}) - require.NoError(t, err) - qres := result2.Response - require.True(t, qres.IsOK()) - - var vsu abci.ValidatorSetUpdate - err = abci.ReadMessage(bytes.NewReader(qres.Value), &vsu) - require.NoError(t, err, "Error reading query result, value %v", qres.Value) - require.Equal(t, 1, len(vsu.ValidatorUpdates)) - v := vsu.ValidatorUpdates[0] - require.Equal(t, pv.Key.ProTxHash.Bytes(), v.ProTxHash) - - pk, err := encoding.PubKeyFromProto(*v.PubKey) - require.NoError(t, err) - - require.EqualValues(t, rawpub, pk, "Stored PubKey not equal with expected, value %v", string(qres.Value)) - require.Equal(t, types.DefaultDashVotingPower, v.Power, - "Stored Power not equal with expected, value %v", string(qres.Value)) - - for _, fake := range fakes { - _, err := c.BroadcastEvidence(ctx, fake) - require.Error(t, err, "BroadcastEvidence(%s) succeeded, but the evidence was fake", fake) - } - } -} - -func TestBroadcastEmptyEvidence(t *testing.T) { - n, conf := NodeSuite(t) - for _, c := range GetClients(t, n, conf) { - _, err := c.BroadcastEvidence(context.Background(), nil) - assert.Error(t, err) - } -} - func waitForBlock(ctx context.Context, t *testing.T, c client.Client, height int64) { timer := time.NewTimer(0 * time.Millisecond) defer timer.Stop() diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index fcb6b61dd2..d919e0d74c 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -3,8 +3,13 @@ package client_test import ( "bytes" "context" - "fmt" "log" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" rpchttp "github.com/tendermint/tendermint/rpc/client/http" @@ -12,16 +17,14 @@ import ( rpctest "github.com/tendermint/tendermint/rpc/test" ) -func ExampleHTTP_simple() { +func TestHTTPSimple(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Start a tendermint node (and kvstore) in the background to test against app := kvstore.NewApplication() - conf, err := rpctest.CreateConfig("ExampleHTTP_simple") - if err != nil { - log.Fatal(err) - } + conf, err := rpctest.CreateConfig(t, "ExampleHTTP_simple") + require.NoError(t, err) _, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) if err != nil { @@ -32,9 +35,7 @@ func ExampleHTTP_simple() { // Create our RPC client rpcAddr := conf.RPC.ListenAddress c, err := rpchttp.New(rpcAddr) - if err != nil { - log.Fatal(err) //nolint:gocritic - } + require.NoError(t, err) // Create a transaction k := []byte("name") @@ -43,49 +44,37 @@ func ExampleHTTP_simple() { // Broadcast the transaction and wait for it to commit (rather use // c.BroadcastTxSync though in production). - bres, err := c.BroadcastTxCommit(context.Background(), tx) + bres, err := c.BroadcastTxCommit(ctx, tx) + require.NoError(t, err) if err != nil { log.Fatal(err) } - if bres.CheckTx.IsErr() || bres.DeliverTx.IsErr() { + if bres.CheckTx.IsErr() || bres.TxResult.IsErr() { log.Fatal("BroadcastTxCommit transaction failed") } // Now try to fetch the value for the key - qres, err := c.ABCIQuery(context.Background(), "/key", k) - if err != nil { - log.Fatal(err) - } - if qres.Response.IsErr() { - log.Fatal("ABCIQuery failed") - } - if !bytes.Equal(qres.Response.Key, k) { - log.Fatal("returned key does not match queried key") - } - if !bytes.Equal(qres.Response.Value, v) { - log.Fatal("returned value does not match sent value") - } - - fmt.Println("Sent tx :", string(tx)) - fmt.Println("Queried for :", string(qres.Response.Key)) - fmt.Println("Got value :", string(qres.Response.Value)) - - // Output: - // Sent tx : name=satoshi - // Queried for : name - // Got value : satoshi + qres, err := c.ABCIQuery(ctx, "/key", k) + require.NoError(t, err) + require.False(t, qres.Response.IsErr(), "ABCIQuery failed") + require.True(t, bytes.Equal(qres.Response.Key, k), + "returned key does not match queried key") + require.True(t, bytes.Equal(qres.Response.Value, v), + "returned value does not match sent value [%s]", string(v)) + + assert.Equal(t, "name=satoshi", string(tx), "sent tx") + assert.Equal(t, "name", string(qres.Response.Key), "queried for") + assert.Equal(t, "satoshi", string(qres.Response.Value), "got value") } -func ExampleHTTP_batching() { +func TestHTTPBatching(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Start a tendermint node (and kvstore) in the background to test against app := kvstore.NewApplication() - conf, err := rpctest.CreateConfig("ExampleHTTP_batching") - if err != nil { - log.Fatal(err) - } + conf, err := rpctest.CreateConfig(t, "ExampleHTTP_batching") + require.NoError(t, err) _, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) if err != nil { @@ -94,10 +83,8 @@ func ExampleHTTP_batching() { defer func() { _ = closer(ctx) }() rpcAddr := conf.RPC.ListenAddress - c, err := rpchttp.New(rpcAddr) - if err != nil { - log.Fatal(err) - } + c, err := rpchttp.NewWithClient(rpcAddr, http.DefaultClient) + require.NoError(t, err) // Create our two transactions k1 := []byte("firstName") @@ -117,41 +104,51 @@ func ExampleHTTP_batching() { for _, tx := range txs { // Broadcast the transaction and wait for it to commit (rather use // c.BroadcastTxSync though in production). - if _, err := batch.BroadcastTxCommit(context.Background(), tx); err != nil { - log.Fatal(err) //nolint:gocritic - } + _, err := batch.BroadcastTxSync(ctx, tx) + require.NoError(t, err) } // Send the batch of 2 transactions - if _, err := batch.Send(context.Background()); err != nil { - log.Fatal(err) - } - - // Now let's query for the original results as a batch - keys := [][]byte{k1, k2} - for _, key := range keys { - if _, err := batch.ABCIQuery(context.Background(), "/key", key); err != nil { - log.Fatal(err) - } - } + _, err = batch.Send(ctx) + require.NoError(t, err) + + // wait for the transaction to land, we could poll more for + // the transactions to land definitively. + require.Eventually(t, + func() bool { + // Now let's query for the original results as a batch + exists := 0 + for _, key := range [][]byte{k1, k2} { + _, err := batch.ABCIQuery(ctx, "/key", key) + if err == nil { + exists++ + + } + } + return exists == 2 + }, + 10*time.Second, + time.Second, + ) // Send the 2 queries and keep the results - results, err := batch.Send(context.Background()) - if err != nil { - log.Fatal(err) - } + results, err := batch.Send(ctx) + require.NoError(t, err) + require.Len(t, results, 2) // Each result in the returned list is the deserialized result of each // respective ABCIQuery response for _, result := range results { qr, ok := result.(*coretypes.ResultABCIQuery) - if !ok { - log.Fatal("invalid result type from ABCIQuery request") + require.True(t, ok, "invalid result type from ABCIQuery request") + + switch string(qr.Response.Key) { + case "firstName": + require.Equal(t, "satoshi", string(qr.Response.Value)) + case "lastName": + require.Equal(t, "nakamoto", string(qr.Response.Value)) + default: + t.Fatalf("encountered unknown key %q", string(qr.Response.Key)) } - fmt.Println(string(qr.Response.Key), "=", string(qr.Response.Value)) } - - // Output: - // firstName = satoshi - // lastName = nakamoto } diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go index 58e48dbba6..05694afff0 100644 --- a/rpc/client/helpers.go +++ b/rpc/client/helpers.go @@ -2,12 +2,11 @@ package client import ( "context" - "errors" "fmt" - "sync" "time" - "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/internal/jsontypes" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) @@ -34,13 +33,13 @@ func DefaultWaitStrategy(delta int64) (abort error) { // // If waiter is nil, we use DefaultWaitStrategy, but you can also // provide your own implementation -func WaitForHeight(c StatusClient, h int64, waiter Waiter) error { +func WaitForHeight(ctx context.Context, c StatusClient, h int64, waiter Waiter) error { if waiter == nil { waiter = DefaultWaitStrategy } delta := int64(1) for delta > 0 { - s, err := c.Status(context.Background()) + s, err := c.Status(ctx) if err != nil { return err } @@ -54,116 +53,25 @@ func WaitForHeight(c StatusClient, h int64, waiter Waiter) error { return nil } -// WaitForOneEvent subscribes to a websocket event for the given -// event time and returns upon receiving it one time, or -// when the timeout duration has expired. -// -// This handles subscribing and unsubscribing under the hood -func WaitForOneEvent(c EventsClient, eventValue string, timeout time.Duration) (types.TMEventData, error) { - const subscriber = "helpers" - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - // register for the next event of this type - eventCh, err := c.Subscribe(ctx, subscriber, types.QueryForEvent(eventValue).String()) - if err != nil { - return nil, fmt.Errorf("failed to subscribe: %w", err) - } - - // make sure to un-register after the test is over - defer func() { - if deferErr := c.UnsubscribeAll(ctx, subscriber); deferErr != nil { - panic(err) +// WaitForOneEvent waits for the first event matching the given query on c, or +// until ctx ends. It reports an error if ctx ends before a matching event is +// received. +func WaitForOneEvent(ctx context.Context, c EventsClient, query string) (types.EventData, error) { + for { + rsp, err := c.Events(ctx, &coretypes.RequestEvents{ + Filter: &coretypes.EventFilter{Query: query}, + MaxItems: 1, + WaitTime: 10 * time.Second, // duration doesn't matter, limited by ctx timeout + }) + if err != nil { + return nil, err + } else if len(rsp.Items) == 0 { + continue // continue polling until ctx expires } - }() - - select { - case event := <-eventCh: - return event.Data, nil - case <-ctx.Done(): - return nil, errors.New("timed out waiting for event") - } -} - -var ( - // ErrClientRunning is returned by Start when the client is already running. - ErrClientRunning = errors.New("client already running") - - // ErrClientNotRunning is returned by Stop when the client is not running. - ErrClientNotRunning = errors.New("client is not running") -) - -// RunState is a helper that a client implementation can embed to implement -// common plumbing for keeping track of run state and logging. -// -// TODO(creachadair): This type is a temporary measure, and will be removed. -// See the discussion on #6971. -type RunState struct { - Logger log.Logger - - mu sync.Mutex - name string - isRunning bool - quit chan struct{} -} - -// NewRunState returns a new unstarted run state tracker with the given logging -// label and log sink. If logger == nil, a no-op logger is provided by default. -func NewRunState(name string, logger log.Logger) *RunState { - if logger == nil { - logger = log.NewNopLogger() - } - return &RunState{ - name: name, - Logger: logger, - } -} - -// Start sets the state to running, or reports an error. -func (r *RunState) Start() error { - r.mu.Lock() - defer r.mu.Unlock() - if r.isRunning { - r.Logger.Error("not starting client, it is already started", "client", r.name) - return ErrClientRunning - } - r.Logger.Info("starting client", "client", r.name) - r.isRunning = true - r.quit = make(chan struct{}) - return nil -} - -// Stop sets the state to not running, or reports an error. -func (r *RunState) Stop() error { - r.mu.Lock() - defer r.mu.Unlock() - if !r.isRunning { - r.Logger.Error("not stopping client; it is already stopped", "client", r.name) - return ErrClientNotRunning + var result types.EventData + if err := jsontypes.Unmarshal(rsp.Items[0].Data, &result); err != nil { + return nil, err + } + return result, nil } - r.Logger.Info("stopping client", "client", r.name) - r.isRunning = false - close(r.quit) - return nil -} - -// SetLogger updates the log sink. -func (r *RunState) SetLogger(logger log.Logger) { - r.mu.Lock() - defer r.mu.Unlock() - r.Logger = logger -} - -// IsRunning reports whether the state is running. -func (r *RunState) IsRunning() bool { - r.mu.Lock() - defer r.mu.Unlock() - return r.isRunning -} - -// Quit returns a channel that is closed when a call to Stop succeeds. -func (r *RunState) Quit() <-chan struct{} { - r.mu.Lock() - defer r.mu.Unlock() - return r.quit } diff --git a/rpc/client/helpers_test.go b/rpc/client/helpers_test.go index 60732b9914..a66becbd58 100644 --- a/rpc/client/helpers_test.go +++ b/rpc/client/helpers_test.go @@ -1,6 +1,7 @@ package client_test import ( + "context" "errors" "strings" "testing" @@ -14,7 +15,8 @@ import ( ) func TestWaitForHeight(t *testing.T) { - assert, require := assert.New(t), require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // test with error result - immediate failure m := &mock.StatusMock{ @@ -25,11 +27,12 @@ func TestWaitForHeight(t *testing.T) { r := mock.NewStatusRecorder(m) // connection failure always leads to error - err := client.WaitForHeight(r, 8, nil) - require.NotNil(err) - require.Equal("bye", err.Error()) + err := client.WaitForHeight(ctx, r, 8, nil) + require.Error(t, err) + require.Equal(t, "bye", err.Error()) + // we called status once to check - require.Equal(1, len(r.Calls)) + require.Equal(t, 1, len(r.Calls)) // now set current block height to 10 m.Call = mock.Call{ @@ -37,17 +40,19 @@ func TestWaitForHeight(t *testing.T) { } // we will not wait for more than 10 blocks - err = client.WaitForHeight(r, 40, nil) - require.NotNil(err) - require.True(strings.Contains(err.Error(), "aborting")) + err = client.WaitForHeight(ctx, r, 40, nil) + require.Error(t, err) + require.True(t, strings.Contains(err.Error(), "aborting")) + // we called status once more to check - require.Equal(2, len(r.Calls)) + require.Equal(t, 2, len(r.Calls)) // waiting for the past returns immediately - err = client.WaitForHeight(r, 5, nil) - require.Nil(err) + err = client.WaitForHeight(ctx, r, 5, nil) + require.NoError(t, err) + // we called status once more to check - require.Equal(3, len(r.Calls)) + require.Equal(t, 3, len(r.Calls)) // since we can't update in a background goroutine (test --race) // we use the callback to update the status height @@ -58,20 +63,21 @@ func TestWaitForHeight(t *testing.T) { } // we wait for a few blocks - err = client.WaitForHeight(r, 12, myWaiter) - require.Nil(err) + err = client.WaitForHeight(ctx, r, 12, myWaiter) + require.NoError(t, err) + // we called status once to check - require.Equal(5, len(r.Calls)) + require.Equal(t, 5, len(r.Calls)) pre := r.Calls[3] - require.Nil(pre.Error) + require.Nil(t, pre.Error) prer, ok := pre.Response.(*coretypes.ResultStatus) - require.True(ok) - assert.Equal(int64(10), prer.SyncInfo.LatestBlockHeight) + require.True(t, ok) + assert.Equal(t, int64(10), prer.SyncInfo.LatestBlockHeight) post := r.Calls[4] - require.Nil(post.Error) + require.Nil(t, post.Error) postr, ok := post.Response.(*coretypes.ResultStatus) - require.True(ok) - assert.Equal(int64(15), postr.SyncInfo.LatestBlockHeight) + require.True(t, ok) + assert.Equal(t, int64(15), postr.SyncInfo.LatestBlockHeight) } diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index c6c0e51b42..c42ce6b5b8 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -2,6 +2,7 @@ package http import ( "context" + "errors" "fmt" "net/http" "time" @@ -122,28 +123,18 @@ func NewWithTimeout(remote string, t time.Duration) (*HTTP, error) { return NewWithClient(remote, c) } -// NewWithClient allows you to set a custom http client. An error is returned -// on invalid remote. The function panics when client is nil. +// NewWithClient constructs an RPC client using a custom HTTP client. +// An error is reported if c == nil or remote is an invalid address. func NewWithClient(remote string, c *http.Client) (*HTTP, error) { if c == nil { - panic("nil http.Client") - } - return NewWithClientAndWSOptions(remote, c, DefaultWSOptions()) -} - -// NewWithClientAndWSOptions allows you to set a custom http client and -// WebSocket options. An error is returned on invalid remote. The function -// panics when client is nil. -func NewWithClientAndWSOptions(remote string, c *http.Client, wso WSOptions) (*HTTP, error) { - if c == nil { - panic("nil http.Client") + return nil, errors.New("nil client") } rpc, err := jsonrpcclient.NewWithHTTPClient(remote, c) if err != nil { return nil, err } - wsEvents, err := newWsEvents(remote, wso) + wsEvents, err := newWsEvents(remote) if err != nil { return nil, err } @@ -203,98 +194,70 @@ func (b *BatchHTTP) Count() int { func (c *baseRPCClient) Status(ctx context.Context) (*coretypes.ResultStatus, error) { result := new(coretypes.ResultStatus) - _, err := c.caller.Call(ctx, "status", map[string]interface{}{}, result) - if err != nil { + if err := c.caller.Call(ctx, "status", nil, result); err != nil { return nil, err } - return result, nil } func (c *baseRPCClient) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { result := new(coretypes.ResultABCIInfo) - _, err := c.caller.Call(ctx, "abci_info", map[string]interface{}{}, result) - if err != nil { + if err := c.caller.Call(ctx, "abci_info", nil, result); err != nil { return nil, err } - return result, nil } -func (c *baseRPCClient) ABCIQuery( - ctx context.Context, - path string, - data bytes.HexBytes, -) (*coretypes.ResultABCIQuery, error) { +func (c *baseRPCClient) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } -func (c *baseRPCClient) ABCIQueryWithOptions( - ctx context.Context, - path string, - data bytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { +func (c *baseRPCClient) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { result := new(coretypes.ResultABCIQuery) - _, err := c.caller.Call(ctx, "abci_query", - map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, - result) - if err != nil { + if err := c.caller.Call(ctx, "abci_query", &coretypes.RequestABCIQuery{ + Path: path, + Data: data, + Height: coretypes.Int64(opts.Height), + Prove: opts.Prove, + }, result); err != nil { return nil, err } - return result, nil } -func (c *baseRPCClient) BroadcastTxCommit( - ctx context.Context, - tx types.Tx, -) (*coretypes.ResultBroadcastTxCommit, error) { +func (c *baseRPCClient) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { result := new(coretypes.ResultBroadcastTxCommit) - _, err := c.caller.Call(ctx, "broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) - if err != nil { + if err := c.caller.Call(ctx, "broadcast_tx_commit", &coretypes.RequestBroadcastTx{ + Tx: tx, + }, result); err != nil { return nil, err } return result, nil } -func (c *baseRPCClient) BroadcastTxAsync( - ctx context.Context, - tx types.Tx, -) (*coretypes.ResultBroadcastTx, error) { +func (c *baseRPCClient) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.broadcastTX(ctx, "broadcast_tx_async", tx) } -func (c *baseRPCClient) BroadcastTxSync( - ctx context.Context, - tx types.Tx, -) (*coretypes.ResultBroadcastTx, error) { +func (c *baseRPCClient) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.broadcastTX(ctx, "broadcast_tx_sync", tx) } -func (c *baseRPCClient) broadcastTX( - ctx context.Context, - route string, - tx types.Tx, -) (*coretypes.ResultBroadcastTx, error) { +func (c *baseRPCClient) broadcastTX(ctx context.Context, route string, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { result := new(coretypes.ResultBroadcastTx) - _, err := c.caller.Call(ctx, route, map[string]interface{}{"tx": tx}, result) - if err != nil { + if err := c.caller.Call(ctx, route, &coretypes.RequestBroadcastTx{Tx: tx}, result); err != nil { return nil, err } return result, nil } -func (c *baseRPCClient) UnconfirmedTxs( - ctx context.Context, - limit *int, -) (*coretypes.ResultUnconfirmedTxs, error) { +func (c *baseRPCClient) UnconfirmedTxs(ctx context.Context, page *int, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) { result := new(coretypes.ResultUnconfirmedTxs) - params := make(map[string]interface{}) - if limit != nil { - params["limit"] = limit - } - _, err := c.caller.Call(ctx, "unconfirmed_txs", params, result) - if err != nil { + + if err := c.caller.Call(ctx, "unconfirmed_txs", &coretypes.RequestUnconfirmedTxs{ + Page: coretypes.Int64Ptr(page), + PerPage: coretypes.Int64Ptr(perPage), + }, result); err != nil { return nil, err } return result, nil @@ -302,8 +265,7 @@ func (c *baseRPCClient) UnconfirmedTxs( func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { result := new(coretypes.ResultUnconfirmedTxs) - _, err := c.caller.Call(ctx, "num_unconfirmed_txs", map[string]interface{}{}, result) - if err != nil { + if err := c.caller.Call(ctx, "num_unconfirmed_txs", nil, result); err != nil { return nil, err } return result, nil @@ -311,16 +273,14 @@ func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*coretypes.Resul func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { result := new(coretypes.ResultCheckTx) - _, err := c.caller.Call(ctx, "check_tx", map[string]interface{}{"tx": tx}, result) - if err != nil { + if err := c.caller.Call(ctx, "check_tx", &coretypes.RequestCheckTx{Tx: tx}, result); err != nil { return nil, err } return result, nil } func (c *baseRPCClient) RemoveTx(ctx context.Context, txKey types.TxKey) error { - _, err := c.caller.Call(ctx, "remove_tx", map[string]interface{}{"tx_key": txKey}, nil) - if err != nil { + if err := c.caller.Call(ctx, "remove_tx", &coretypes.RequestRemoveTx{TxKey: txKey}, nil); err != nil { return err } return nil @@ -328,8 +288,7 @@ func (c *baseRPCClient) RemoveTx(ctx context.Context, txKey types.TxKey) error { func (c *baseRPCClient) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { result := new(coretypes.ResultNetInfo) - _, err := c.caller.Call(ctx, "net_info", map[string]interface{}{}, result) - if err != nil { + if err := c.caller.Call(ctx, "net_info", nil, result); err != nil { return nil, err } return result, nil @@ -337,8 +296,7 @@ func (c *baseRPCClient) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { result := new(coretypes.ResultDumpConsensusState) - _, err := c.caller.Call(ctx, "dump_consensus_state", map[string]interface{}{}, result) - if err != nil { + if err := c.caller.Call(ctx, "dump_consensus_state", nil, result); err != nil { return nil, err } return result, nil @@ -346,24 +304,25 @@ func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*coretypes.Resu func (c *baseRPCClient) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { result := new(coretypes.ResultConsensusState) - _, err := c.caller.Call(ctx, "consensus_state", map[string]interface{}{}, result) - if err != nil { + if err := c.caller.Call(ctx, "consensus_state", nil, result); err != nil { return nil, err } return result, nil } -func (c *baseRPCClient) ConsensusParams( - ctx context.Context, - height *int64, -) (*coretypes.ResultConsensusParams, error) { +func (c *baseRPCClient) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { result := new(coretypes.ResultConsensusParams) - params := make(map[string]interface{}) - if height != nil { - params["height"] = height + if err := c.caller.Call(ctx, "consensus_params", &coretypes.RequestConsensusParams{ + Height: (*coretypes.Int64)(height), + }, result); err != nil { + return nil, err } - _, err := c.caller.Call(ctx, "consensus_params", params, result) - if err != nil { + return result, nil +} + +func (c *baseRPCClient) Events(ctx context.Context, req *coretypes.RequestEvents) (*coretypes.ResultEvents, error) { + result := new(coretypes.ResultEvents) + if err := c.caller.Call(ctx, "events", req, result); err != nil { return nil, err } return result, nil @@ -371,23 +330,18 @@ func (c *baseRPCClient) ConsensusParams( func (c *baseRPCClient) Health(ctx context.Context) (*coretypes.ResultHealth, error) { result := new(coretypes.ResultHealth) - _, err := c.caller.Call(ctx, "health", map[string]interface{}{}, result) - if err != nil { + if err := c.caller.Call(ctx, "health", nil, result); err != nil { return nil, err } return result, nil } -func (c *baseRPCClient) BlockchainInfo( - ctx context.Context, - minHeight, - maxHeight int64, -) (*coretypes.ResultBlockchainInfo, error) { +func (c *baseRPCClient) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { result := new(coretypes.ResultBlockchainInfo) - _, err := c.caller.Call(ctx, "blockchain", - map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight}, - result) - if err != nil { + if err := c.caller.Call(ctx, "blockchain", &coretypes.RequestBlockchainInfo{ + MinHeight: coretypes.Int64(minHeight), + MaxHeight: coretypes.Int64(maxHeight), + }, result); err != nil { return nil, err } return result, nil @@ -395,8 +349,7 @@ func (c *baseRPCClient) BlockchainInfo( func (c *baseRPCClient) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { result := new(coretypes.ResultGenesis) - _, err := c.caller.Call(ctx, "genesis", map[string]interface{}{}, result) - if err != nil { + if err := c.caller.Call(ctx, "genesis", nil, result); err != nil { return nil, err } return result, nil @@ -404,8 +357,9 @@ func (c *baseRPCClient) Genesis(ctx context.Context) (*coretypes.ResultGenesis, func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*coretypes.ResultGenesisChunk, error) { result := new(coretypes.ResultGenesisChunk) - _, err := c.caller.Call(ctx, "genesis_chunked", map[string]interface{}{"chunk": id}, result) - if err != nil { + if err := c.caller.Call(ctx, "genesis_chunked", &coretypes.RequestGenesisChunked{ + Chunk: coretypes.Int64(id), + }, result); err != nil { return nil, err } return result, nil @@ -413,12 +367,9 @@ func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*coretypes func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { result := new(coretypes.ResultBlock) - params := make(map[string]interface{}) - if height != nil { - params["height"] = height - } - _, err := c.caller.Call(ctx, "block", params, result) - if err != nil { + if err := c.caller.Call(ctx, "block", &coretypes.RequestBlockInfo{ + Height: (*coretypes.Int64)(height), + }, result); err != nil { return nil, err } return result, nil @@ -426,27 +377,17 @@ func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*coretypes.Re func (c *baseRPCClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { result := new(coretypes.ResultBlock) - params := map[string]interface{}{ - "hash": hash, - } - _, err := c.caller.Call(ctx, "block_by_hash", params, result) - if err != nil { + if err := c.caller.Call(ctx, "block_by_hash", &coretypes.RequestBlockByHash{Hash: hash}, result); err != nil { return nil, err } return result, nil } -func (c *baseRPCClient) BlockResults( - ctx context.Context, - height *int64, -) (*coretypes.ResultBlockResults, error) { +func (c *baseRPCClient) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { result := new(coretypes.ResultBlockResults) - params := make(map[string]interface{}) - if height != nil { - params["height"] = height - } - _, err := c.caller.Call(ctx, "block_results", params, result) - if err != nil { + if err := c.caller.Call(ctx, "block_results", &coretypes.RequestBlockInfo{ + Height: (*coretypes.Int64)(height), + }, result); err != nil { return nil, err } return result, nil @@ -454,12 +395,9 @@ func (c *baseRPCClient) BlockResults( func (c *baseRPCClient) Header(ctx context.Context, height *int64) (*coretypes.ResultHeader, error) { result := new(coretypes.ResultHeader) - params := make(map[string]interface{}) - if height != nil { - params["height"] = height - } - _, err := c.caller.Call(ctx, "header", params, result) - if err != nil { + if err := c.caller.Call(ctx, "header", &coretypes.RequestBlockInfo{ + Height: (*coretypes.Int64)(height), + }, result); err != nil { return nil, err } return result, nil @@ -467,11 +405,9 @@ func (c *baseRPCClient) Header(ctx context.Context, height *int64) (*coretypes.R func (c *baseRPCClient) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) { result := new(coretypes.ResultHeader) - params := map[string]interface{}{ - "hash": hash, - } - _, err := c.caller.Call(ctx, "header_by_hash", params, result) - if err != nil { + if err := c.caller.Call(ctx, "header_by_hash", &coretypes.RequestBlockByHash{ + Hash: hash, + }, result); err != nil { return nil, err } return result, nil @@ -479,12 +415,9 @@ func (c *baseRPCClient) HeaderByHash(ctx context.Context, hash bytes.HexBytes) ( func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { result := new(coretypes.ResultCommit) - params := make(map[string]interface{}) - if height != nil { - params["height"] = height - } - _, err := c.caller.Call(ctx, "commit", params, result) - if err != nil { + if err := c.caller.Call(ctx, "commit", &coretypes.RequestBlockInfo{ + Height: (*coretypes.Int64)(height), + }, result); err != nil { return nil, err } if result.Commit == nil { @@ -498,70 +431,35 @@ func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*coretypes.R func (c *baseRPCClient) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { result := new(coretypes.ResultTx) - params := map[string]interface{}{ - "hash": hash, - "prove": prove, - } - _, err := c.caller.Call(ctx, "tx", params, result) - if err != nil { + if err := c.caller.Call(ctx, "tx", &coretypes.RequestTx{Hash: hash, Prove: prove}, result); err != nil { return nil, err } return result, nil } -func (c *baseRPCClient) TxSearch( - ctx context.Context, - query string, - prove bool, - page, - perPage *int, - orderBy string, -) (*coretypes.ResultTxSearch, error) { - +func (c *baseRPCClient) TxSearch(ctx context.Context, query string, prove bool, page, perPage *int, orderBy string) (*coretypes.ResultTxSearch, error) { result := new(coretypes.ResultTxSearch) - params := map[string]interface{}{ - "query": query, - "prove": prove, - "order_by": orderBy, - } - - if page != nil { - params["page"] = page - } - if perPage != nil { - params["per_page"] = perPage - } - - _, err := c.caller.Call(ctx, "tx_search", params, result) - if err != nil { + if err := c.caller.Call(ctx, "tx_search", &coretypes.RequestTxSearch{ + Query: query, + Prove: prove, + OrderBy: orderBy, + Page: coretypes.Int64Ptr(page), + PerPage: coretypes.Int64Ptr(perPage), + }, result); err != nil { return nil, err } return result, nil } -func (c *baseRPCClient) BlockSearch( - ctx context.Context, - query string, - page, perPage *int, - orderBy string, -) (*coretypes.ResultBlockSearch, error) { - +func (c *baseRPCClient) BlockSearch(ctx context.Context, query string, page, perPage *int, orderBy string) (*coretypes.ResultBlockSearch, error) { result := new(coretypes.ResultBlockSearch) - params := map[string]interface{}{ - "query": query, - "order_by": orderBy, - } - - if page != nil { - params["page"] = page - } - if perPage != nil { - params["per_page"] = perPage - } - - _, err := c.caller.Call(ctx, "block_search", params, result) - if err != nil { + if err := c.caller.Call(ctx, "block_search", &coretypes.RequestBlockSearch{ + Query: query, + OrderBy: orderBy, + Page: coretypes.Int64Ptr(page), + PerPage: coretypes.Int64Ptr(perPage), + }, result); err != nil { return nil, err } @@ -571,38 +469,26 @@ func (c *baseRPCClient) BlockSearch( func (c *baseRPCClient) Validators( ctx context.Context, height *int64, - page, - perPage *int, + page, perPage *int, requestQuorumInfo *bool, ) (*coretypes.ResultValidators, error) { result := new(coretypes.ResultValidators) - params := make(map[string]interface{}) - if page != nil { - params["page"] = page - } - if perPage != nil { - params["per_page"] = perPage - } - if height != nil { - params["height"] = height - } - if requestQuorumInfo != nil { - params["request_quorum_info"] = requestQuorumInfo - } - _, err := c.caller.Call(ctx, "validators", params, result) - if err != nil { + if err := c.caller.Call(ctx, "validators", &coretypes.RequestValidators{ + Height: (*coretypes.Int64)(height), + Page: coretypes.Int64Ptr(page), + PerPage: coretypes.Int64Ptr(perPage), + RequestQuorumInfo: requestQuorumInfo, + }, result); err != nil { return nil, err } return result, nil } -func (c *baseRPCClient) BroadcastEvidence( - ctx context.Context, - ev types.Evidence, -) (*coretypes.ResultBroadcastEvidence, error) { +func (c *baseRPCClient) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { result := new(coretypes.ResultBroadcastEvidence) - _, err := c.caller.Call(ctx, "broadcast_evidence", map[string]interface{}{"evidence": ev}, result) - if err != nil { + if err := c.caller.Call(ctx, "broadcast_evidence", &coretypes.RequestBroadcastEvidence{ + Evidence: ev, + }, result); err != nil { return nil, err } return result, nil diff --git a/rpc/client/http/ws.go b/rpc/client/http/ws.go index 0f908e271b..e9a5ac829f 100644 --- a/rpc/client/http/ws.go +++ b/rpc/client/http/ws.go @@ -2,53 +2,25 @@ package http import ( "context" - "errors" + "encoding/json" "fmt" "strings" + "sync" "time" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - tmjson "github.com/tendermint/tendermint/libs/json" - "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/internal/pubsub" + "github.com/tendermint/tendermint/libs/log" rpcclient "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/coretypes" jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" ) -// WSOptions for the WS part of the HTTP client. -type WSOptions struct { - Path string // path (e.g. "/ws") - - jsonrpcclient.WSOptions // WSClient options -} - -// DefaultWSOptions returns default WS options. -// See jsonrpcclient.DefaultWSOptions. -func DefaultWSOptions() WSOptions { - return WSOptions{ - Path: "/websocket", - WSOptions: jsonrpcclient.DefaultWSOptions(), - } -} - -// Validate performs a basic validation of WSOptions. -func (wso WSOptions) Validate() error { - if len(wso.Path) <= 1 { - return errors.New("empty Path") - } - if wso.Path[0] != '/' { - return errors.New("leading slash is missing in Path") - } - - return nil -} - -// wsEvents is a wrapper around WSClient, which implements EventsClient. +// wsEvents is a wrapper around WSClient, which implements SubscriptionClient. type wsEvents struct { - *rpcclient.RunState - ws *jsonrpcclient.WSClient + Logger log.Logger + ws *jsonrpcclient.WSClient - mtx tmsync.RWMutex + mtx sync.RWMutex subscriptions map[string]*wsSubscription } @@ -58,27 +30,16 @@ type wsSubscription struct { query string } -var _ rpcclient.EventsClient = (*wsEvents)(nil) - -func newWsEvents(remote string, wso WSOptions) (*wsEvents, error) { - // validate options - if err := wso.Validate(); err != nil { - return nil, fmt.Errorf("invalid WSOptions: %w", err) - } - - // remove the trailing / from the remote else the websocket endpoint - // won't parse correctly - if remote[len(remote)-1] == '/' { - remote = remote[:len(remote)-1] - } +var _ rpcclient.SubscriptionClient = (*wsEvents)(nil) +func newWsEvents(remote string) (*wsEvents, error) { w := &wsEvents{ + Logger: log.NewNopLogger(), subscriptions: make(map[string]*wsSubscription), } - w.RunState = rpcclient.NewRunState("wsEvents", nil) var err error - w.ws, err = jsonrpcclient.NewWSWithOptions(remote, wso.Path, wso.WSOptions) + w.ws, err = jsonrpcclient.NewWS(strings.TrimSuffix(remote, "/"), "/websocket") if err != nil { return nil, fmt.Errorf("can't create WS client: %w", err) } @@ -86,27 +47,24 @@ func newWsEvents(remote string, wso WSOptions) (*wsEvents, error) { // resubscribe immediately w.redoSubscriptionsAfter(0 * time.Second) }) - w.ws.SetLogger(w.Logger) + w.ws.Logger = w.Logger return w, nil } // Start starts the websocket client and the event loop. -func (w *wsEvents) Start() error { - if err := w.ws.Start(); err != nil { +func (w *wsEvents) Start(ctx context.Context) error { + if err := w.ws.Start(ctx); err != nil { return err } - go w.eventListener() + go w.eventListener(ctx) return nil } -// IsRunning reports whether the websocket client is running. -func (w *wsEvents) IsRunning() bool { return w.ws.IsRunning() } - // Stop shuts down the websocket client. func (w *wsEvents) Stop() error { return w.ws.Stop() } -// Subscribe implements EventsClient by using WSClient to subscribe given +// Subscribe implements SubscriptionClient by using WSClient to subscribe given // subscriber to query. By default, it returns a channel with cap=1. Error is // returned if it fails to subscribe. // @@ -120,11 +78,6 @@ func (w *wsEvents) Stop() error { return w.ws.Stop() } // It returns an error if wsEvents is not running. func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) { - - if !w.IsRunning() { - return nil, rpcclient.ErrClientNotRunning - } - if err := w.ws.Subscribe(ctx, query); err != nil { return nil, err } @@ -144,20 +97,17 @@ func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, return outc, nil } -// Unsubscribe implements EventsClient by using WSClient to unsubscribe given -// subscriber from query. +// Unsubscribe implements SubscriptionClient by using WSClient to unsubscribe +// given subscriber from query. // // It returns an error if wsEvents is not running. func (w *wsEvents) Unsubscribe(ctx context.Context, subscriber, query string) error { - if !w.IsRunning() { - return rpcclient.ErrClientNotRunning - } - if err := w.ws.Unsubscribe(ctx, query); err != nil { return err } w.mtx.Lock() + defer w.mtx.Unlock() info, ok := w.subscriptions[query] if ok { if info.id != "" { @@ -165,27 +115,22 @@ func (w *wsEvents) Unsubscribe(ctx context.Context, subscriber, query string) er } delete(w.subscriptions, info.query) } - w.mtx.Unlock() return nil } -// UnsubscribeAll implements EventsClient by using WSClient to unsubscribe -// given subscriber from all the queries. +// UnsubscribeAll implements SubscriptionClient by using WSClient to +// unsubscribe given subscriber from all the queries. // // It returns an error if wsEvents is not running. func (w *wsEvents) UnsubscribeAll(ctx context.Context, subscriber string) error { - if !w.IsRunning() { - return rpcclient.ErrClientNotRunning - } - if err := w.ws.UnsubscribeAll(ctx); err != nil { return err } w.mtx.Lock() + defer w.mtx.Unlock() w.subscriptions = make(map[string]*wsSubscription) - w.mtx.Unlock() return nil } @@ -195,7 +140,7 @@ func (w *wsEvents) UnsubscribeAll(ctx context.Context, subscriber string) error func (w *wsEvents) redoSubscriptionsAfter(d time.Duration) { time.Sleep(d) - ctx := context.Background() + ctx := context.TODO() w.mtx.Lock() defer w.mtx.Unlock() @@ -216,7 +161,7 @@ func isErrAlreadySubscribed(err error) bool { return strings.Contains(err.Error(), pubsub.ErrAlreadySubscribed.Error()) } -func (w *wsEvents) eventListener() { +func (w *wsEvents) eventListener(ctx context.Context) { for { select { case resp, ok := <-w.ws.ResponsesCh: @@ -239,7 +184,7 @@ func (w *wsEvents) eventListener() { } result := new(coretypes.ResultEvent) - err := tmjson.Unmarshal(resp.Result, result) + err := json.Unmarshal(resp.Result, result) if err != nil { w.Logger.Error("failed to unmarshal response", "err", err) continue @@ -258,11 +203,11 @@ func (w *wsEvents) eventListener() { if ok { select { case out.res <- *result: - case <-w.Quit(): + case <-ctx.Done(): return } } - case <-w.Quit(): + case <-ctx.Done(): return } } diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 9de460cf1c..61405ab14f 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -32,26 +32,21 @@ import ( // Client describes the interface of Tendermint RPC client implementations. type Client interface { - // These methods define the operational structure of the client. - - // Start the client. Start must report an error if the client is running. - Start() error - - // Stop the client. Stop must report an error if the client is not running. - Stop() error - - // IsRunning reports whether the client is running. - IsRunning() bool + // Start the client, which will run until the context terminates. + // An error from Start indicates the client could not start. + Start(context.Context) error // These embedded interfaces define the callable methods of the service. + ABCIClient EventsClient + EvidenceClient HistoryClient + MempoolClient NetworkClient SignClient StatusClient - EvidenceClient - MempoolClient + SubscriptionClient } // ABCIClient groups together the functionality that principally affects the @@ -81,8 +76,7 @@ type SignClient interface { Header(ctx context.Context, height *int64) (*coretypes.ResultHeader, error) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) - Validators(ctx context.Context, height *int64, page, perPage *int, - requestQuorumInfo *bool) (*coretypes.ResultValidators, error) + Validators(ctx context.Context, height *int64, page, perPage *int, requestQuorumInfo *bool) (*coretypes.ResultValidators, error) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) // TxSearch defines a method to search for a paginated set of transactions by @@ -96,7 +90,7 @@ type SignClient interface { ) (*coretypes.ResultTxSearch, error) // BlockSearch defines a method to search for a paginated set of blocks by - // BeginBlock and EndBlock event search criteria. + // FinalizeBlock event search criteria. BlockSearch( ctx context.Context, query string, @@ -127,26 +121,47 @@ type NetworkClient interface { Health(context.Context) (*coretypes.ResultHealth, error) } -// EventsClient is reactive, you can subscribe to any message, given the proper -// string. see tendermint/types/events.go +// EventsClient exposes the methods to retrieve events from the consensus engine. type EventsClient interface { - // Subscribe subscribes given subscriber to query. Returns a channel with - // cap=1 onto which events are published. An error is returned if it fails to - // subscribe. outCapacity can be used optionally to set capacity for the - // channel. Channel is never closed to prevent accidental reads. + // Events fetches a batch of events from the server matching the given query + // and time range. + Events(ctx context.Context, req *coretypes.RequestEvents) (*coretypes.ResultEvents, error) +} + +// TODO(creachadair): This interface should be removed once the streaming event +// interface is removed in Tendermint v0.37. +type SubscriptionClient interface { + // Subscribe issues a subscription request for the given subscriber ID and + // query. This method does not block: If subscription fails, it reports an + // error, and if subscription succeeds it returns a channel that delivers + // matching events until the subscription is stopped. The channel is never + // closed; the client is responsible for knowing when no further data will + // be sent. // - // ctx cannot be used to unsubscribe. To unsubscribe, use either Unsubscribe - // or UnsubscribeAll. + // The context only governs the initial subscription, it does not control + // the lifetime of the channel. To cancel a subscription call Unsubscribe or + // UnsubscribeAll. + // + // Deprecated: This method will be removed in Tendermint v0.37, use Events + // instead. Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) + // Unsubscribe unsubscribes given subscriber from query. + // + // Deprecated: This method will be removed in Tendermint v0.37, use Events + // instead. Unsubscribe(ctx context.Context, subscriber, query string) error + // UnsubscribeAll unsubscribes given subscriber from all the queries. + // + // Deprecated: This method will be removed in Tendermint v0.37, use Events + // instead. UnsubscribeAll(ctx context.Context, subscriber string) error } // MempoolClient shows us data about current mempool state. type MempoolClient interface { - UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) + UnconfirmedTxs(ctx context.Context, page, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) NumUnconfirmedTxs(context.Context) (*coretypes.ResultUnconfirmedTxs, error) CheckTx(context.Context, types.Tx) (*coretypes.ResultCheckTx, error) RemoveTx(context.Context, types.TxKey) error diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 2bc132fdf4..b95a77aeda 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -6,14 +6,14 @@ import ( "fmt" "time" + "github.com/tendermint/tendermint/internal/eventbus" + "github.com/tendermint/tendermint/internal/pubsub" + "github.com/tendermint/tendermint/internal/pubsub/query" rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/pubsub/query" rpcclient "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -38,9 +38,8 @@ don't need to do anything). It will keep trying indefinitely with exponential backoff (10ms -> 20ms -> 40ms) until successful. */ type Local struct { - *types.EventBus + *eventbus.EventBus Logger log.Logger - ctx *rpctypes.Context env *rpccore.Environment } @@ -48,72 +47,67 @@ type Local struct { // local RPC client constructor needs to build a local client. type NodeService interface { RPCEnvironment() *rpccore.Environment - EventBus() *types.EventBus + EventBus() *eventbus.EventBus } // New configures a client that calls the Node directly. -func New(node NodeService) (*Local, error) { +func New(logger log.Logger, node NodeService) (*Local, error) { env := node.RPCEnvironment() if env == nil { return nil, errors.New("rpc is nil") } return &Local{ EventBus: node.EventBus(), - Logger: log.NewNopLogger(), - ctx: &rpctypes.Context{}, + Logger: logger, env: env, }, nil } var _ rpcclient.Client = (*Local)(nil) -// SetLogger allows to set a logger on the client. -func (c *Local) SetLogger(l log.Logger) { - c.Logger = l -} - func (c *Local) Status(ctx context.Context) (*coretypes.ResultStatus, error) { - return c.env.Status(c.ctx) + return c.env.Status(ctx) } func (c *Local) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { - return c.env.ABCIInfo(c.ctx) + return c.env.ABCIInfo(ctx) } func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } -func (c *Local) ABCIQueryWithOptions( - ctx context.Context, - path string, - data bytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { - return c.env.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) +func (c *Local) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { + return c.env.ABCIQuery(ctx, &coretypes.RequestABCIQuery{ + Path: path, Data: data, Height: coretypes.Int64(opts.Height), Prove: opts.Prove, + }) } func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { - return c.env.BroadcastTxCommit(c.ctx, tx) + return c.env.BroadcastTxCommit(ctx, &coretypes.RequestBroadcastTx{Tx: tx}) } func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - return c.env.BroadcastTxAsync(c.ctx, tx) + return c.env.BroadcastTxAsync(ctx, &coretypes.RequestBroadcastTx{Tx: tx}) } func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - return c.env.BroadcastTxSync(c.ctx, tx) + return c.env.BroadcastTxSync(ctx, &coretypes.RequestBroadcastTx{Tx: tx}) } -func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { - return c.env.UnconfirmedTxs(c.ctx, limit) +func (c *Local) UnconfirmedTxs(ctx context.Context, page, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) { + return c.env.UnconfirmedTxs(ctx, &coretypes.RequestUnconfirmedTxs{ + Page: coretypes.Int64Ptr(page), + PerPage: coretypes.Int64Ptr(perPage), + }) } func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { - return c.env.NumUnconfirmedTxs(c.ctx) + return c.env.NumUnconfirmedTxs(ctx) } func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { - return c.env.CheckTx(c.ctx, tx) + return c.env.CheckTx(ctx, &coretypes.RequestCheckTx{Tx: tx}) } func (c *Local) RemoveTx(ctx context.Context, txKey types.TxKey) error { @@ -121,194 +115,190 @@ func (c *Local) RemoveTx(ctx context.Context, txKey types.TxKey) error { } func (c *Local) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { - return c.env.NetInfo(c.ctx) + return c.env.NetInfo(ctx) } func (c *Local) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { - return c.env.DumpConsensusState(c.ctx) + return c.env.DumpConsensusState(ctx) } func (c *Local) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { - return c.env.GetConsensusState(c.ctx) + return c.env.GetConsensusState(ctx) } func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { - return c.env.ConsensusParams(c.ctx, height) -} - -func (c *Local) Health(ctx context.Context) (*coretypes.ResultHealth, error) { - return c.env.Health(c.ctx) + return c.env.ConsensusParams(ctx, &coretypes.RequestConsensusParams{Height: (*coretypes.Int64)(height)}) } -func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*coretypes.ResultDialSeeds, error) { - return c.env.UnsafeDialSeeds(c.ctx, seeds) +func (c *Local) Events(ctx context.Context, req *coretypes.RequestEvents) (*coretypes.ResultEvents, error) { + return c.env.Events(ctx, req) } -func (c *Local) DialPeers( - ctx context.Context, - peers []string, - persistent, - unconditional, - private bool, -) (*coretypes.ResultDialPeers, error) { - return c.env.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private) +func (c *Local) Health(ctx context.Context) (*coretypes.ResultHealth, error) { + return c.env.Health(ctx) } func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { - return c.env.BlockchainInfo(c.ctx, minHeight, maxHeight) + return c.env.BlockchainInfo(ctx, &coretypes.RequestBlockchainInfo{ + MinHeight: coretypes.Int64(minHeight), + MaxHeight: coretypes.Int64(maxHeight), + }) } func (c *Local) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { - return c.env.Genesis(c.ctx) + return c.env.Genesis(ctx) } func (c *Local) GenesisChunked(ctx context.Context, id uint) (*coretypes.ResultGenesisChunk, error) { - return c.env.GenesisChunked(c.ctx, id) + return c.env.GenesisChunked(ctx, &coretypes.RequestGenesisChunked{Chunk: coretypes.Int64(id)}) } func (c *Local) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { - return c.env.Block(c.ctx, height) + return c.env.Block(ctx, &coretypes.RequestBlockInfo{Height: (*coretypes.Int64)(height)}) } func (c *Local) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { - return c.env.BlockByHash(c.ctx, hash) + return c.env.BlockByHash(ctx, &coretypes.RequestBlockByHash{Hash: hash}) } func (c *Local) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { - return c.env.BlockResults(c.ctx, height) + return c.env.BlockResults(ctx, &coretypes.RequestBlockInfo{Height: (*coretypes.Int64)(height)}) } func (c *Local) Header(ctx context.Context, height *int64) (*coretypes.ResultHeader, error) { - return c.env.Header(c.ctx, height) + return c.env.Header(ctx, &coretypes.RequestBlockInfo{Height: (*coretypes.Int64)(height)}) } func (c *Local) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) { - return c.env.HeaderByHash(c.ctx, hash) + return c.env.HeaderByHash(ctx, &coretypes.RequestBlockByHash{Hash: hash}) } func (c *Local) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { - return c.env.Commit(c.ctx, height) + return c.env.Commit(ctx, &coretypes.RequestBlockInfo{Height: (*coretypes.Int64)(height)}) } func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int, requestQuorumInfo *bool) (*coretypes.ResultValidators, error) { - return c.env.Validators(c.ctx, height, page, perPage, requestQuorumInfo) + return c.env.Validators(ctx, &coretypes.RequestValidators{ + Height: (*coretypes.Int64)(height), + Page: coretypes.Int64Ptr(page), + PerPage: coretypes.Int64Ptr(perPage), + RequestQuorumInfo: requestQuorumInfo, + }) } func (c *Local) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { - return c.env.Tx(c.ctx, hash, prove) + return c.env.Tx(ctx, &coretypes.RequestTx{Hash: hash, Prove: prove}) } -func (c *Local) TxSearch( - _ context.Context, - queryString string, - prove bool, - page, - perPage *int, - orderBy string, -) (*coretypes.ResultTxSearch, error) { - return c.env.TxSearch(c.ctx, queryString, prove, page, perPage, orderBy) +func (c *Local) TxSearch(ctx context.Context, queryString string, prove bool, page, perPage *int, orderBy string) (*coretypes.ResultTxSearch, error) { + return c.env.TxSearch(ctx, &coretypes.RequestTxSearch{ + Query: queryString, + Prove: prove, + Page: coretypes.Int64Ptr(page), + PerPage: coretypes.Int64Ptr(perPage), + OrderBy: orderBy, + }) } -func (c *Local) BlockSearch( - _ context.Context, - queryString string, - page, perPage *int, - orderBy string, -) (*coretypes.ResultBlockSearch, error) { - return c.env.BlockSearch(c.ctx, queryString, page, perPage, orderBy) +func (c *Local) BlockSearch(ctx context.Context, queryString string, page, perPage *int, orderBy string) (*coretypes.ResultBlockSearch, error) { + return c.env.BlockSearch(ctx, &coretypes.RequestBlockSearch{ + Query: queryString, + Page: coretypes.Int64Ptr(page), + PerPage: coretypes.Int64Ptr(perPage), + OrderBy: orderBy, + }) } func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { - return c.env.BroadcastEvidence(c.ctx, ev) + return c.env.BroadcastEvidence(ctx, &coretypes.RequestBroadcastEvidence{Evidence: ev}) } -func (c *Local) Subscribe( - ctx context.Context, - subscriber, - queryString string, - outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) { +func (c *Local) Subscribe(ctx context.Context, subscriber, queryString string, capacity ...int) (<-chan coretypes.ResultEvent, error) { q, err := query.New(queryString) if err != nil { return nil, fmt.Errorf("failed to parse query: %w", err) } - outCap := 1 - if len(outCapacity) > 0 { - outCap = outCapacity[0] + limit, quota := 1, 0 + if len(capacity) > 0 { + limit = capacity[0] + if len(capacity) > 1 { + quota = capacity[1] + } } - var sub types.Subscription - if outCap > 0 { - sub, err = c.EventBus.Subscribe(ctx, subscriber, q, outCap) - } else { - sub, err = c.EventBus.SubscribeUnbuffered(ctx, subscriber, q) + ctx, cancel := context.WithCancel(ctx) + go func() { c.Wait(); cancel() }() + + subArgs := pubsub.SubscribeArgs{ + ClientID: subscriber, + Query: q, + Quota: quota, + Limit: limit, } + sub, err := c.EventBus.SubscribeWithArgs(ctx, subArgs) if err != nil { return nil, fmt.Errorf("failed to subscribe: %w", err) } - outc := make(chan coretypes.ResultEvent, outCap) - go c.eventsRoutine(sub, subscriber, q, outc) + outc := make(chan coretypes.ResultEvent, 1) + go c.eventsRoutine(ctx, sub, subArgs, outc) return outc, nil } -func (c *Local) eventsRoutine( - sub types.Subscription, - subscriber string, - q pubsub.Query, - outc chan<- coretypes.ResultEvent) { +func (c *Local) eventsRoutine(ctx context.Context, sub eventbus.Subscription, subArgs pubsub.SubscribeArgs, outc chan<- coretypes.ResultEvent) { + qstr := subArgs.Query.String() for { - select { - case msg := <-sub.Out(): - result := coretypes.ResultEvent{ - SubscriptionID: msg.SubscriptionID(), - Query: q.String(), - Data: msg.Data(), - Events: msg.Events(), - } - - if cap(outc) == 0 { - outc <- result - } else { - select { - case outc <- result: - default: - c.Logger.Error("wanted to publish ResultEvent, but out channel is full", - "result", result, "query", result.Query) - } - } - case <-sub.Canceled(): - if sub.Err() == pubsub.ErrUnsubscribed { - return + msg, err := sub.Next(ctx) + if errors.Is(err, pubsub.ErrUnsubscribed) { + return // client unsubscribed + } else if err != nil { + c.Logger.Error("subscription was canceled, resubscribing", + "err", err, "query", subArgs.Query.String()) + sub = c.resubscribe(ctx, subArgs) + if sub == nil { + return // client terminated } - - c.Logger.Error("subscription was canceled, resubscribing...", "err", sub.Err(), "query", q.String()) - sub = c.resubscribe(subscriber, q) - if sub == nil { // client was stopped - return - } - case <-c.Quit(): + continue + } + select { + case outc <- coretypes.ResultEvent{ + SubscriptionID: msg.SubscriptionID(), + Query: qstr, + Data: msg.Data(), + Events: msg.Events(), + }: + case <-ctx.Done(): return } } } // Try to resubscribe with exponential backoff. -func (c *Local) resubscribe(subscriber string, q pubsub.Query) types.Subscription { +func (c *Local) resubscribe(ctx context.Context, subArgs pubsub.SubscribeArgs) eventbus.Subscription { + timer := time.NewTimer(0) + defer timer.Stop() + attempts := 0 for { if !c.IsRunning() { return nil } - sub, err := c.EventBus.Subscribe(context.Background(), subscriber, q) + sub, err := c.EventBus.SubscribeWithArgs(ctx, subArgs) if err == nil { return sub } attempts++ - time.Sleep((10 << uint(attempts)) * time.Millisecond) // 10ms -> 20ms -> 40ms + timer.Reset((10 << uint(attempts)) * time.Millisecond) // 10ms -> 20ms -> 40ms + select { + case <-timer.C: + continue + case <-ctx.Done(): + return nil + } } } diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go index de0336a4cf..45a83afbbb 100644 --- a/rpc/client/main_test.go +++ b/rpc/client/main_test.go @@ -2,39 +2,36 @@ package client_test import ( "context" - "fmt" - "io/ioutil" - "os" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" rpctest "github.com/tendermint/tendermint/rpc/test" ) -func NodeSuite(t *testing.T) (service.Service, *config.Config) { +func NodeSuite(ctx context.Context, t *testing.T, logger log.Logger) (service.Service, *config.Config) { t.Helper() - ctx, cancel := context.WithCancel(context.Background()) - - conf, err := rpctest.CreateConfig(t.Name()) - require.NoError(t, err) + ctx, cancel := context.WithCancel(ctx) - // start a tendermint node in the background to test against - dir, err := ioutil.TempDir("/tmp", fmt.Sprint("rpc-client-test-", t.Name())) + conf, err := rpctest.CreateConfig(t, t.Name()) require.NoError(t, err) - app := kvstore.NewPersistentKVStoreApplication(dir) + app := kvstore.NewApplication() + // start a tendermint node in the background to test against. node, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) require.NoError(t, err) t.Cleanup(func() { - _ = closer(ctx) cancel() - app.Close() - _ = os.RemoveAll(dir) + assert.NoError(t, closer(ctx)) + assert.NoError(t, app.Close()) + node.Wait() }) return node, conf } diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 700b08f5e5..a6aebdb14b 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -25,46 +25,65 @@ var ( ) func (a ABCIApp) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { - return &coretypes.ResultABCIInfo{Response: a.App.Info(proxy.RequestInfo)}, nil + res, err := a.App.Info(ctx, &proxy.RequestInfo) + if err != nil { + return nil, err + } + + return &coretypes.ResultABCIInfo{Response: *res}, nil } func (a ABCIApp) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return a.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } -func (a ABCIApp) ABCIQueryWithOptions( - ctx context.Context, - path string, - data bytes.HexBytes, - opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { - q := a.App.Query(abci.RequestQuery{ +func (a ABCIApp) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { + q, err := a.App.Query(ctx, &abci.RequestQuery{ Data: data, Path: path, Height: opts.Height, Prove: opts.Prove, }) - return &coretypes.ResultABCIQuery{Response: q}, nil + if err != nil { + return nil, err + } + + return &coretypes.ResultABCIQuery{Response: *q}, nil } // NOTE: Caller should call a.App.Commit() separately, // this function does not actually wait for a commit. // TODO: Make it wait for a commit and set res.Height appropriately. func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { - res := coretypes.ResultBroadcastTxCommit{} - res.CheckTx = a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) + resp, err := a.App.CheckTx(ctx, &abci.RequestCheckTx{Tx: tx}) + if err != nil { + return nil, err + } + + res := &coretypes.ResultBroadcastTxCommit{CheckTx: *resp} if res.CheckTx.IsErr() { - return &res, nil + return res, nil + } + + fb, err := a.App.FinalizeBlock(ctx, &abci.RequestFinalizeBlock{Txs: [][]byte{tx}}) + if err != nil { + return nil, err } - res.DeliverTx = a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) + + res.TxResult = *fb.TxResults[0] res.Height = -1 // TODO - return &res, nil + return res, nil } func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) + c, err := a.App.CheckTx(ctx, &abci.RequestCheckTx{Tx: tx}) + if err != nil { + return nil, err + } + // and this gets written in a background thread... if !c.IsErr() { - go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() + go func() { _, _ = a.App.FinalizeBlock(ctx, &abci.RequestFinalizeBlock{Txs: [][]byte{tx}}) }() } return &coretypes.ResultBroadcastTx{ Code: c.Code, @@ -76,10 +95,14 @@ func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes. } func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) + c, err := a.App.CheckTx(ctx, &abci.RequestCheckTx{Tx: tx}) + if err != nil { + return nil, err + } + // and this gets written in a background thread... if !c.IsErr() { - go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() + go func() { _, _ = a.App.FinalizeBlock(ctx, &abci.RequestFinalizeBlock{Txs: [][]byte{tx}}) }() } return &coretypes.ResultBroadcastTx{ Code: c.Code, @@ -112,11 +135,7 @@ func (m ABCIMock) ABCIQuery(ctx context.Context, path string, data bytes.HexByte return m.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } -func (m ABCIMock) ABCIQueryWithOptions( - ctx context.Context, - path string, - data bytes.HexBytes, - opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { +func (m ABCIMock) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Prove}) if err != nil { return nil, err @@ -184,11 +203,7 @@ func (r *ABCIRecorder) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, return res, err } -func (r *ABCIRecorder) ABCIQuery( - ctx context.Context, - path string, - data bytes.HexBytes, -) (*coretypes.ResultABCIQuery, error) { +func (r *ABCIRecorder) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return r.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index 25fbbc05d2..e35ccf29ca 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -19,7 +19,8 @@ import ( ) func TestABCIMock(t *testing.T) { - assert, require := assert.New(t), require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() key, value := []byte("foo"), []byte("bar") height := int64(10) @@ -37,8 +38,8 @@ func TestABCIMock(t *testing.T) { BroadcastCommit: mock.Call{ Args: goodTx, Response: &coretypes.ResultBroadcastTxCommit{ - CheckTx: abci.ResponseCheckTx{Data: bytes.HexBytes("stand")}, - DeliverTx: abci.ResponseDeliverTx{Data: bytes.HexBytes("deliver")}, + CheckTx: abci.ResponseCheckTx{Data: bytes.HexBytes("stand")}, + TxResult: abci.ExecTxResult{Data: bytes.HexBytes("deliver")}, }, Error: errors.New("bad tx"), }, @@ -46,40 +47,41 @@ func TestABCIMock(t *testing.T) { } // now, let's try to make some calls - _, err := m.ABCIInfo(context.Background()) - require.NotNil(err) - assert.Equal("foobar", err.Error()) + _, err := m.ABCIInfo(ctx) + require.Error(t, err) + assert.Equal(t, "foobar", err.Error()) // query always returns the response - _query, err := m.ABCIQueryWithOptions(context.Background(), "/", nil, client.ABCIQueryOptions{Prove: false}) + _query, err := m.ABCIQueryWithOptions(ctx, "/", nil, client.ABCIQueryOptions{Prove: false}) query := _query.Response - require.Nil(err) - require.NotNil(query) - assert.EqualValues(key, query.Key) - assert.EqualValues(value, query.Value) - assert.Equal(height, query.Height) + require.NoError(t, err) + require.NotNil(t, query) + assert.EqualValues(t, key, query.Key) + assert.EqualValues(t, value, query.Value) + assert.Equal(t, height, query.Height) // non-commit calls always return errors - _, err = m.BroadcastTxSync(context.Background(), goodTx) - require.NotNil(err) - assert.Equal("must commit", err.Error()) - _, err = m.BroadcastTxAsync(context.Background(), goodTx) - require.NotNil(err) - assert.Equal("must commit", err.Error()) + _, err = m.BroadcastTxSync(ctx, goodTx) + require.Error(t, err) + assert.Equal(t, "must commit", err.Error()) + _, err = m.BroadcastTxAsync(ctx, goodTx) + require.Error(t, err) + assert.Equal(t, "must commit", err.Error()) // commit depends on the input - _, err = m.BroadcastTxCommit(context.Background(), badTx) - require.NotNil(err) - assert.Equal("bad tx", err.Error()) - bres, err := m.BroadcastTxCommit(context.Background(), goodTx) - require.Nil(err, "%+v", err) - assert.EqualValues(0, bres.CheckTx.Code) - assert.EqualValues("stand", bres.CheckTx.Data) - assert.EqualValues("deliver", bres.DeliverTx.Data) + _, err = m.BroadcastTxCommit(ctx, badTx) + require.Error(t, err) + assert.Equal(t, "bad tx", err.Error()) + bres, err := m.BroadcastTxCommit(ctx, goodTx) + require.NoError(t, err, "%+v", err) + assert.EqualValues(t, 0, bres.CheckTx.Code) + assert.EqualValues(t, "stand", bres.CheckTx.Data) + assert.EqualValues(t, "deliver", bres.TxResult.Data) } func TestABCIRecorder(t *testing.T) { - assert, require := assert.New(t), require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // This mock returns errors on everything but Query m := mock.ABCIMock{ @@ -93,107 +95,110 @@ func TestABCIRecorder(t *testing.T) { } r := mock.NewABCIRecorder(m) - require.Equal(0, len(r.Calls)) + require.Equal(t, 0, len(r.Calls)) - _, err := r.ABCIInfo(context.Background()) - assert.Nil(err, "expected no err on info") + _, err := r.ABCIInfo(ctx) + assert.NoError(t, err, "expected no err on info") _, err = r.ABCIQueryWithOptions( - context.Background(), + ctx, "path", bytes.HexBytes("data"), client.ABCIQueryOptions{Prove: false}, ) - assert.NotNil(err, "expected error on query") - require.Equal(2, len(r.Calls)) + assert.Error(t, err, "expected error on query") + require.Equal(t, 2, len(r.Calls)) info := r.Calls[0] - assert.Equal("abci_info", info.Name) - assert.Nil(info.Error) - assert.Nil(info.Args) - require.NotNil(info.Response) + assert.Equal(t, "abci_info", info.Name) + assert.Nil(t, info.Error) + assert.Nil(t, info.Args) + require.NotNil(t, info.Response) ir, ok := info.Response.(*coretypes.ResultABCIInfo) - require.True(ok) - assert.Equal("data", ir.Response.Data) - assert.Equal("v0.9.9", ir.Response.Version) + require.True(t, ok) + assert.Equal(t, "data", ir.Response.Data) + assert.Equal(t, "v0.9.9", ir.Response.Version) query := r.Calls[1] - assert.Equal("abci_query", query.Name) - assert.Nil(query.Response) - require.NotNil(query.Error) - assert.Equal("query", query.Error.Error()) - require.NotNil(query.Args) + assert.Equal(t, "abci_query", query.Name) + assert.Nil(t, query.Response) + require.NotNil(t, query.Error) + assert.Equal(t, "query", query.Error.Error()) + require.NotNil(t, query.Args) qa, ok := query.Args.(mock.QueryArgs) - require.True(ok) - assert.Equal("path", qa.Path) - assert.EqualValues("data", qa.Data) - assert.False(qa.Prove) + require.True(t, ok) + assert.Equal(t, "path", qa.Path) + assert.EqualValues(t, "data", qa.Data) + assert.False(t, qa.Prove) // now add some broadcasts (should all err) txs := []types.Tx{{1}, {2}, {3}} - _, err = r.BroadcastTxCommit(context.Background(), txs[0]) - assert.NotNil(err, "expected err on broadcast") - _, err = r.BroadcastTxSync(context.Background(), txs[1]) - assert.NotNil(err, "expected err on broadcast") - _, err = r.BroadcastTxAsync(context.Background(), txs[2]) - assert.NotNil(err, "expected err on broadcast") + _, err = r.BroadcastTxCommit(ctx, txs[0]) + assert.Error(t, err, "expected err on broadcast") + _, err = r.BroadcastTxSync(ctx, txs[1]) + assert.Error(t, err, "expected err on broadcast") + _, err = r.BroadcastTxAsync(ctx, txs[2]) + assert.Error(t, err, "expected err on broadcast") - require.Equal(5, len(r.Calls)) + require.Equal(t, 5, len(r.Calls)) bc := r.Calls[2] - assert.Equal("broadcast_tx_commit", bc.Name) - assert.Nil(bc.Response) - require.NotNil(bc.Error) - assert.EqualValues(bc.Args, txs[0]) + assert.Equal(t, "broadcast_tx_commit", bc.Name) + assert.Nil(t, bc.Response) + require.NotNil(t, bc.Error) + assert.EqualValues(t, bc.Args, txs[0]) bs := r.Calls[3] - assert.Equal("broadcast_tx_sync", bs.Name) - assert.Nil(bs.Response) - require.NotNil(bs.Error) - assert.EqualValues(bs.Args, txs[1]) + assert.Equal(t, "broadcast_tx_sync", bs.Name) + assert.Nil(t, bs.Response) + require.NotNil(t, bs.Error) + assert.EqualValues(t, bs.Args, txs[1]) ba := r.Calls[4] - assert.Equal("broadcast_tx_async", ba.Name) - assert.Nil(ba.Response) - require.NotNil(ba.Error) - assert.EqualValues(ba.Args, txs[2]) + assert.Equal(t, "broadcast_tx_async", ba.Name) + assert.Nil(t, ba.Response) + require.NotNil(t, ba.Error) + assert.EqualValues(t, ba.Args, txs[2]) } func TestABCIApp(t *testing.T) { - assert, require := assert.New(t), require.New(t) app := kvstore.NewApplication() m := mock.ABCIApp{app} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // get some info - info, err := m.ABCIInfo(context.Background()) - require.Nil(err) - assert.Equal(`{"size":0}`, info.Response.GetData()) + info, err := m.ABCIInfo(ctx) + require.NoError(t, err) + assert.Equal(t, `{"size":0}`, info.Response.GetData()) // add a key key, value := "foo", "bar" tx := fmt.Sprintf("%s=%s", key, value) - res, err := m.BroadcastTxCommit(context.Background(), types.Tx(tx)) - require.Nil(err) - assert.True(res.CheckTx.IsOK()) - require.NotNil(res.DeliverTx) - assert.True(res.DeliverTx.IsOK()) + res, err := m.BroadcastTxCommit(ctx, types.Tx(tx)) + require.NoError(t, err) + assert.True(t, res.CheckTx.IsOK()) + require.NotNil(t, res.TxResult) + assert.True(t, res.TxResult.IsOK()) // commit // TODO: This may not be necessary in the future if res.Height == -1 { - m.App.Commit() + _, err := m.App.Commit(ctx) + require.NoError(t, err) } // check the key _qres, err := m.ABCIQueryWithOptions( - context.Background(), + ctx, "/key", bytes.HexBytes(key), client.ABCIQueryOptions{Prove: true}, ) qres := _qres.Response - require.Nil(err) - assert.EqualValues(value, qres.Value) + require.NoError(t, err) + assert.EqualValues(t, value, qres.Value) // XXX Check proof } diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 12770a5b60..56bb83a664 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -22,7 +22,6 @@ import ( "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/coretypes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -76,11 +75,11 @@ func (c Call) GetResponse(args interface{}) (interface{}, error) { } func (c Client) Status(ctx context.Context) (*coretypes.ResultStatus, error) { - return c.env.Status(&rpctypes.Context{}) + return c.env.Status(ctx) } func (c Client) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { - return c.env.ABCIInfo(&rpctypes.Context{}) + return c.env.ABCIInfo(ctx) } func (c Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { @@ -92,84 +91,79 @@ func (c Client) ABCIQueryWithOptions( path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { - return c.env.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove) + return c.env.ABCIQuery(ctx, &coretypes.RequestABCIQuery{ + Path: path, Data: data, Height: coretypes.Int64(opts.Height), Prove: opts.Prove, + }) } func (c Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { - return c.env.BroadcastTxCommit(&rpctypes.Context{}, tx) + return c.env.BroadcastTxCommit(ctx, &coretypes.RequestBroadcastTx{Tx: tx}) } func (c Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - return c.env.BroadcastTxAsync(&rpctypes.Context{}, tx) + return c.env.BroadcastTxAsync(ctx, &coretypes.RequestBroadcastTx{Tx: tx}) } func (c Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { - return c.env.BroadcastTxSync(&rpctypes.Context{}, tx) + return c.env.BroadcastTxSync(ctx, &coretypes.RequestBroadcastTx{Tx: tx}) } func (c Client) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { - return c.env.CheckTx(&rpctypes.Context{}, tx) + return c.env.CheckTx(ctx, &coretypes.RequestCheckTx{Tx: tx}) } func (c Client) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { - return c.env.NetInfo(&rpctypes.Context{}) + return c.env.NetInfo(ctx) } func (c Client) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { - return c.env.GetConsensusState(&rpctypes.Context{}) + return c.env.GetConsensusState(ctx) } func (c Client) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { - return c.env.DumpConsensusState(&rpctypes.Context{}) + return c.env.DumpConsensusState(ctx) } func (c Client) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { - return c.env.ConsensusParams(&rpctypes.Context{}, height) + return c.env.ConsensusParams(ctx, &coretypes.RequestConsensusParams{Height: (*coretypes.Int64)(height)}) } func (c Client) Health(ctx context.Context) (*coretypes.ResultHealth, error) { - return c.env.Health(&rpctypes.Context{}) -} - -func (c Client) DialSeeds(ctx context.Context, seeds []string) (*coretypes.ResultDialSeeds, error) { - return c.env.UnsafeDialSeeds(&rpctypes.Context{}, seeds) -} - -func (c Client) DialPeers( - ctx context.Context, - peers []string, - persistent, - unconditional, - private bool, -) (*coretypes.ResultDialPeers, error) { - return c.env.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent, unconditional, private) + return c.env.Health(ctx) } func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { - return c.env.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight) + return c.env.BlockchainInfo(ctx, &coretypes.RequestBlockchainInfo{ + MinHeight: coretypes.Int64(minHeight), + MaxHeight: coretypes.Int64(maxHeight), + }) } func (c Client) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { - return c.env.Genesis(&rpctypes.Context{}) + return c.env.Genesis(ctx) } func (c Client) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { - return c.env.Block(&rpctypes.Context{}, height) + return c.env.Block(ctx, &coretypes.RequestBlockInfo{Height: (*coretypes.Int64)(height)}) } func (c Client) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { - return c.env.BlockByHash(&rpctypes.Context{}, hash) + return c.env.BlockByHash(ctx, &coretypes.RequestBlockByHash{Hash: hash}) } func (c Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { - return c.env.Commit(&rpctypes.Context{}, height) + return c.env.Commit(ctx, &coretypes.RequestBlockInfo{Height: (*coretypes.Int64)(height)}) } -func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int, - requestQuorumInfo *bool) (*coretypes.ResultValidators, error) { - return c.env.Validators(&rpctypes.Context{}, height, page, perPage, requestQuorumInfo) +func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int, requestQuorumInfo *bool) (*coretypes.ResultValidators, error) { + return c.env.Validators(ctx, &coretypes.RequestValidators{ + Height: (*coretypes.Int64)(height), + Page: coretypes.Int64Ptr(page), + PerPage: coretypes.Int64Ptr(perPage), + RequestQuorumInfo: requestQuorumInfo, + }) } func (c Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { - return c.env.BroadcastEvidence(&rpctypes.Context{}, ev) + return c.env.BroadcastEvidence(ctx, &coretypes.RequestBroadcastEvidence{Evidence: ev}) } diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go index 98655280e3..fb70ca9d93 100644 --- a/rpc/client/mock/status_test.go +++ b/rpc/client/mock/status_test.go @@ -14,7 +14,8 @@ import ( ) func TestStatus(t *testing.T) { - assert, require := assert.New(t), require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() m := &mock.StatusMock{ Call: mock.Call{ @@ -38,37 +39,37 @@ func TestStatus(t *testing.T) { } r := mock.NewStatusRecorder(m) - require.Equal(0, len(r.Calls)) + require.Equal(t, 0, len(r.Calls)) // make sure response works proper - status, err := r.Status(context.Background()) - require.Nil(err, "%+v", err) - assert.EqualValues("block", status.SyncInfo.LatestBlockHash) - assert.EqualValues(10, status.SyncInfo.LatestBlockHeight) - assert.EqualValues(20, status.SyncInfo.MaxPeerBlockHeight) - assert.EqualValues(time.Second, status.SyncInfo.TotalSyncedTime) - assert.EqualValues(time.Minute, status.SyncInfo.RemainingTime) + status, err := r.Status(ctx) + require.NoError(t, err) + assert.EqualValues(t, "block", status.SyncInfo.LatestBlockHash) + assert.EqualValues(t, 10, status.SyncInfo.LatestBlockHeight) + assert.EqualValues(t, 20, status.SyncInfo.MaxPeerBlockHeight) + assert.EqualValues(t, time.Second, status.SyncInfo.TotalSyncedTime) + assert.EqualValues(t, time.Minute, status.SyncInfo.RemainingTime) // make sure recorder works properly - require.Equal(1, len(r.Calls)) + require.Equal(t, 1, len(r.Calls)) rs := r.Calls[0] - assert.Equal("status", rs.Name) - assert.Nil(rs.Args) - assert.Nil(rs.Error) - require.NotNil(rs.Response) + assert.Equal(t, "status", rs.Name) + assert.Nil(t, rs.Args) + assert.Nil(t, rs.Error) + require.NotNil(t, rs.Response) st, ok := rs.Response.(*coretypes.ResultStatus) - require.True(ok) - assert.EqualValues("block", st.SyncInfo.LatestBlockHash) - assert.EqualValues(10, st.SyncInfo.LatestBlockHeight) - assert.EqualValues(20, st.SyncInfo.MaxPeerBlockHeight) - assert.EqualValues(time.Second, status.SyncInfo.TotalSyncedTime) - assert.EqualValues(time.Minute, status.SyncInfo.RemainingTime) + require.True(t, ok) + assert.EqualValues(t, "block", st.SyncInfo.LatestBlockHash) + assert.EqualValues(t, 10, st.SyncInfo.LatestBlockHeight) + assert.EqualValues(t, 20, st.SyncInfo.MaxPeerBlockHeight) + assert.EqualValues(t, time.Second, status.SyncInfo.TotalSyncedTime) + assert.EqualValues(t, time.Minute, status.SyncInfo.RemainingTime) - assert.EqualValues(10, st.SyncInfo.TotalSnapshots) - assert.EqualValues(time.Duration(10), st.SyncInfo.ChunkProcessAvgTime) - assert.EqualValues(10, st.SyncInfo.SnapshotHeight) - assert.EqualValues(9, status.SyncInfo.SnapshotChunksCount) - assert.EqualValues(10, status.SyncInfo.SnapshotChunksTotal) - assert.EqualValues(9, status.SyncInfo.BackFilledBlocks) - assert.EqualValues(10, status.SyncInfo.BackFillBlocksTotal) + assert.EqualValues(t, 10, st.SyncInfo.TotalSnapshots) + assert.EqualValues(t, time.Duration(10), st.SyncInfo.ChunkProcessAvgTime) + assert.EqualValues(t, 10, st.SyncInfo.SnapshotHeight) + assert.EqualValues(t, 9, status.SyncInfo.SnapshotChunksCount) + assert.EqualValues(t, 10, status.SyncInfo.SnapshotChunksTotal) + assert.EqualValues(t, 9, status.SyncInfo.BackFilledBlocks) + assert.EqualValues(t, 10, status.SyncInfo.BackFillBlocksTotal) } diff --git a/rpc/client/mocks/abci_client.go b/rpc/client/mocks/abci_client.go new file mode 100644 index 0000000000..07683bc261 --- /dev/null +++ b/rpc/client/mocks/abci_client.go @@ -0,0 +1,171 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + bytes "github.com/tendermint/tendermint/libs/bytes" + client "github.com/tendermint/tendermint/rpc/client" + + context "context" + + coretypes "github.com/tendermint/tendermint/rpc/coretypes" + + mock "github.com/stretchr/testify/mock" + + testing "testing" + + types "github.com/tendermint/tendermint/types" +) + +// ABCIClient is an autogenerated mock type for the ABCIClient type +type ABCIClient struct { + mock.Mock +} + +// ABCIInfo provides a mock function with given fields: _a0 +func (_m *ABCIClient) ABCIInfo(_a0 context.Context) (*coretypes.ResultABCIInfo, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultABCIInfo + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultABCIInfo); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultABCIInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ABCIQuery provides a mock function with given fields: ctx, path, data +func (_m *ABCIClient) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { + ret := _m.Called(ctx, path, data) + + var r0 *coretypes.ResultABCIQuery + if rf, ok := ret.Get(0).(func(context.Context, string, bytes.HexBytes) *coretypes.ResultABCIQuery); ok { + r0 = rf(ctx, path, data) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultABCIQuery) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, bytes.HexBytes) error); ok { + r1 = rf(ctx, path, data) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ABCIQueryWithOptions provides a mock function with given fields: ctx, path, data, opts +func (_m *ABCIClient) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { + ret := _m.Called(ctx, path, data, opts) + + var r0 *coretypes.ResultABCIQuery + if rf, ok := ret.Get(0).(func(context.Context, string, bytes.HexBytes, client.ABCIQueryOptions) *coretypes.ResultABCIQuery); ok { + r0 = rf(ctx, path, data, opts) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultABCIQuery) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, bytes.HexBytes, client.ABCIQueryOptions) error); ok { + r1 = rf(ctx, path, data, opts) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BroadcastTxAsync provides a mock function with given fields: _a0, _a1 +func (_m *ABCIClient) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastTx + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BroadcastTxCommit provides a mock function with given fields: _a0, _a1 +func (_m *ABCIClient) BroadcastTxCommit(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastTxCommit + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTxCommit); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastTxCommit) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BroadcastTxSync provides a mock function with given fields: _a0, _a1 +func (_m *ABCIClient) BroadcastTxSync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastTx + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewABCIClient creates a new instance of ABCIClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewABCIClient(t testing.TB) *ABCIClient { + mock := &ABCIClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index 59333b8648..e16a9a8e82 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -12,6 +12,8 @@ import ( mock "github.com/stretchr/testify/mock" + testing "testing" + types "github.com/tendermint/tendermint/types" ) @@ -411,6 +413,29 @@ func (_m *Client) DumpConsensusState(_a0 context.Context) (*coretypes.ResultDump return r0, r1 } +// Events provides a mock function with given fields: ctx, req +func (_m *Client) Events(ctx context.Context, req *coretypes.RequestEvents) (*coretypes.ResultEvents, error) { + ret := _m.Called(ctx, req) + + var r0 *coretypes.ResultEvents + if rf, ok := ret.Get(0).(func(context.Context, *coretypes.RequestEvents) *coretypes.ResultEvents); ok { + r0 = rf(ctx, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultEvents) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *coretypes.RequestEvents) error); ok { + r1 = rf(ctx, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // Genesis provides a mock function with given fields: _a0 func (_m *Client) Genesis(_a0 context.Context) (*coretypes.ResultGenesis, error) { ret := _m.Called(_a0) @@ -526,20 +551,6 @@ func (_m *Client) Health(_a0 context.Context) (*coretypes.ResultHealth, error) { return r0, r1 } -// IsRunning provides a mock function with given fields: -func (_m *Client) IsRunning() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - // NetInfo provides a mock function with given fields: _a0 func (_m *Client) NetInfo(_a0 context.Context) (*coretypes.ResultNetInfo, error) { ret := _m.Called(_a0) @@ -600,13 +611,13 @@ func (_m *Client) RemoveTx(_a0 context.Context, _a1 types.TxKey) error { return r0 } -// Start provides a mock function with given fields: -func (_m *Client) Start() error { - ret := _m.Called() +// Start provides a mock function with given fields: _a0 +func (_m *Client) Start(_a0 context.Context) error { + ret := _m.Called(_a0) var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) } else { r0 = ret.Error(0) } @@ -637,20 +648,6 @@ func (_m *Client) Status(_a0 context.Context) (*coretypes.ResultStatus, error) { return r0, r1 } -// Stop provides a mock function with given fields: -func (_m *Client) Stop() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - // Subscribe provides a mock function with given fields: ctx, subscriber, query, outCapacity func (_m *Client) Subscribe(ctx context.Context, subscriber string, query string, outCapacity ...int) (<-chan coretypes.ResultEvent, error) { _va := make([]interface{}, len(outCapacity)) @@ -727,13 +724,13 @@ func (_m *Client) TxSearch(ctx context.Context, query string, prove bool, page * return r0, r1 } -// UnconfirmedTxs provides a mock function with given fields: ctx, limit -func (_m *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { - ret := _m.Called(ctx, limit) +// UnconfirmedTxs provides a mock function with given fields: ctx, page, perPage +func (_m *Client) UnconfirmedTxs(ctx context.Context, page *int, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) { + ret := _m.Called(ctx, page, perPage) var r0 *coretypes.ResultUnconfirmedTxs - if rf, ok := ret.Get(0).(func(context.Context, *int) *coretypes.ResultUnconfirmedTxs); ok { - r0 = rf(ctx, limit) + if rf, ok := ret.Get(0).(func(context.Context, *int, *int) *coretypes.ResultUnconfirmedTxs); ok { + r0 = rf(ctx, page, perPage) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*coretypes.ResultUnconfirmedTxs) @@ -741,8 +738,8 @@ func (_m *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.Re } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *int) error); ok { - r1 = rf(ctx, limit) + if rf, ok := ret.Get(1).(func(context.Context, *int, *int) error); ok { + r1 = rf(ctx, page, perPage) } else { r1 = ret.Error(1) } @@ -800,3 +797,13 @@ func (_m *Client) Validators(ctx context.Context, height *int64, page *int, perP return r0, r1 } + +// NewClient creates a new instance of Client. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewClient(t testing.TB) *Client { + mock := &Client{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/client/mocks/events_client.go b/rpc/client/mocks/events_client.go new file mode 100644 index 0000000000..eba096284c --- /dev/null +++ b/rpc/client/mocks/events_client.go @@ -0,0 +1,50 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + coretypes "github.com/tendermint/tendermint/rpc/coretypes" + + testing "testing" +) + +// EventsClient is an autogenerated mock type for the EventsClient type +type EventsClient struct { + mock.Mock +} + +// Events provides a mock function with given fields: ctx, req +func (_m *EventsClient) Events(ctx context.Context, req *coretypes.RequestEvents) (*coretypes.ResultEvents, error) { + ret := _m.Called(ctx, req) + + var r0 *coretypes.ResultEvents + if rf, ok := ret.Get(0).(func(context.Context, *coretypes.RequestEvents) *coretypes.ResultEvents); ok { + r0 = rf(ctx, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultEvents) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *coretypes.RequestEvents) error); ok { + r1 = rf(ctx, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewEventsClient creates a new instance of EventsClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewEventsClient(t testing.TB) *EventsClient { + mock := &EventsClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/client/mocks/evidence_client.go b/rpc/client/mocks/evidence_client.go new file mode 100644 index 0000000000..7824a2ae4e --- /dev/null +++ b/rpc/client/mocks/evidence_client.go @@ -0,0 +1,52 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + coretypes "github.com/tendermint/tendermint/rpc/coretypes" + + testing "testing" + + types "github.com/tendermint/tendermint/types" +) + +// EvidenceClient is an autogenerated mock type for the EvidenceClient type +type EvidenceClient struct { + mock.Mock +} + +// BroadcastEvidence provides a mock function with given fields: _a0, _a1 +func (_m *EvidenceClient) BroadcastEvidence(_a0 context.Context, _a1 types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultBroadcastEvidence + if rf, ok := ret.Get(0).(func(context.Context, types.Evidence) *coretypes.ResultBroadcastEvidence); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBroadcastEvidence) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Evidence) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewEvidenceClient creates a new instance of EvidenceClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewEvidenceClient(t testing.TB) *EvidenceClient { + mock := &EvidenceClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/client/mocks/history_client.go b/rpc/client/mocks/history_client.go new file mode 100644 index 0000000000..ecd0190504 --- /dev/null +++ b/rpc/client/mocks/history_client.go @@ -0,0 +1,96 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + coretypes "github.com/tendermint/tendermint/rpc/coretypes" + + testing "testing" +) + +// HistoryClient is an autogenerated mock type for the HistoryClient type +type HistoryClient struct { + mock.Mock +} + +// BlockchainInfo provides a mock function with given fields: ctx, minHeight, maxHeight +func (_m *HistoryClient) BlockchainInfo(ctx context.Context, minHeight int64, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { + ret := _m.Called(ctx, minHeight, maxHeight) + + var r0 *coretypes.ResultBlockchainInfo + if rf, ok := ret.Get(0).(func(context.Context, int64, int64) *coretypes.ResultBlockchainInfo); ok { + r0 = rf(ctx, minHeight, maxHeight) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBlockchainInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, int64, int64) error); ok { + r1 = rf(ctx, minHeight, maxHeight) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Genesis provides a mock function with given fields: _a0 +func (_m *HistoryClient) Genesis(_a0 context.Context) (*coretypes.ResultGenesis, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultGenesis + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultGenesis); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultGenesis) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// GenesisChunked provides a mock function with given fields: _a0, _a1 +func (_m *HistoryClient) GenesisChunked(_a0 context.Context, _a1 uint) (*coretypes.ResultGenesisChunk, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultGenesisChunk + if rf, ok := ret.Get(0).(func(context.Context, uint) *coretypes.ResultGenesisChunk); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultGenesisChunk) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, uint) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewHistoryClient creates a new instance of HistoryClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewHistoryClient(t testing.TB) *HistoryClient { + mock := &HistoryClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/client/mocks/mempool_client.go b/rpc/client/mocks/mempool_client.go new file mode 100644 index 0000000000..0dfea703fe --- /dev/null +++ b/rpc/client/mocks/mempool_client.go @@ -0,0 +1,112 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + coretypes "github.com/tendermint/tendermint/rpc/coretypes" + + testing "testing" + + types "github.com/tendermint/tendermint/types" +) + +// MempoolClient is an autogenerated mock type for the MempoolClient type +type MempoolClient struct { + mock.Mock +} + +// CheckTx provides a mock function with given fields: _a0, _a1 +func (_m *MempoolClient) CheckTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultCheckTx, error) { + ret := _m.Called(_a0, _a1) + + var r0 *coretypes.ResultCheckTx + if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultCheckTx); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultCheckTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NumUnconfirmedTxs provides a mock function with given fields: _a0 +func (_m *MempoolClient) NumUnconfirmedTxs(_a0 context.Context) (*coretypes.ResultUnconfirmedTxs, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultUnconfirmedTxs + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultUnconfirmedTxs); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultUnconfirmedTxs) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// RemoveTx provides a mock function with given fields: _a0, _a1 +func (_m *MempoolClient) RemoveTx(_a0 context.Context, _a1 types.TxKey) error { + ret := _m.Called(_a0, _a1) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, types.TxKey) error); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UnconfirmedTxs provides a mock function with given fields: ctx, page, perPage +func (_m *MempoolClient) UnconfirmedTxs(ctx context.Context, page *int, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) { + ret := _m.Called(ctx, page, perPage) + + var r0 *coretypes.ResultUnconfirmedTxs + if rf, ok := ret.Get(0).(func(context.Context, *int, *int) *coretypes.ResultUnconfirmedTxs); ok { + r0 = rf(ctx, page, perPage) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultUnconfirmedTxs) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int, *int) error); ok { + r1 = rf(ctx, page, perPage) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewMempoolClient creates a new instance of MempoolClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewMempoolClient(t testing.TB) *MempoolClient { + mock := &MempoolClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/client/mocks/network_client.go b/rpc/client/mocks/network_client.go new file mode 100644 index 0000000000..73bb11d612 --- /dev/null +++ b/rpc/client/mocks/network_client.go @@ -0,0 +1,142 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + coretypes "github.com/tendermint/tendermint/rpc/coretypes" + + testing "testing" +) + +// NetworkClient is an autogenerated mock type for the NetworkClient type +type NetworkClient struct { + mock.Mock +} + +// ConsensusParams provides a mock function with given fields: ctx, height +func (_m *NetworkClient) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { + ret := _m.Called(ctx, height) + + var r0 *coretypes.ResultConsensusParams + if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultConsensusParams); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultConsensusParams) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// ConsensusState provides a mock function with given fields: _a0 +func (_m *NetworkClient) ConsensusState(_a0 context.Context) (*coretypes.ResultConsensusState, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultConsensusState + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultConsensusState); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultConsensusState) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DumpConsensusState provides a mock function with given fields: _a0 +func (_m *NetworkClient) DumpConsensusState(_a0 context.Context) (*coretypes.ResultDumpConsensusState, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultDumpConsensusState + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultDumpConsensusState); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultDumpConsensusState) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Health provides a mock function with given fields: _a0 +func (_m *NetworkClient) Health(_a0 context.Context) (*coretypes.ResultHealth, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultHealth + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultHealth); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultHealth) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NetInfo provides a mock function with given fields: _a0 +func (_m *NetworkClient) NetInfo(_a0 context.Context) (*coretypes.ResultNetInfo, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultNetInfo + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultNetInfo); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultNetInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewNetworkClient creates a new instance of NetworkClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewNetworkClient(t testing.TB) *NetworkClient { + mock := &NetworkClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/client/mocks/remote_client.go b/rpc/client/mocks/remote_client.go index c9adcc8aa9..b4271dceff 100644 --- a/rpc/client/mocks/remote_client.go +++ b/rpc/client/mocks/remote_client.go @@ -12,6 +12,8 @@ import ( mock "github.com/stretchr/testify/mock" + testing "testing" + types "github.com/tendermint/tendermint/types" ) @@ -411,6 +413,29 @@ func (_m *RemoteClient) DumpConsensusState(_a0 context.Context) (*coretypes.Resu return r0, r1 } +// Events provides a mock function with given fields: ctx, req +func (_m *RemoteClient) Events(ctx context.Context, req *coretypes.RequestEvents) (*coretypes.ResultEvents, error) { + ret := _m.Called(ctx, req) + + var r0 *coretypes.ResultEvents + if rf, ok := ret.Get(0).(func(context.Context, *coretypes.RequestEvents) *coretypes.ResultEvents); ok { + r0 = rf(ctx, req) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultEvents) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *coretypes.RequestEvents) error); ok { + r1 = rf(ctx, req) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // Genesis provides a mock function with given fields: _a0 func (_m *RemoteClient) Genesis(_a0 context.Context) (*coretypes.ResultGenesis, error) { ret := _m.Called(_a0) @@ -526,20 +551,6 @@ func (_m *RemoteClient) Health(_a0 context.Context) (*coretypes.ResultHealth, er return r0, r1 } -// IsRunning provides a mock function with given fields: -func (_m *RemoteClient) IsRunning() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - // NetInfo provides a mock function with given fields: _a0 func (_m *RemoteClient) NetInfo(_a0 context.Context) (*coretypes.ResultNetInfo, error) { ret := _m.Called(_a0) @@ -614,13 +625,13 @@ func (_m *RemoteClient) RemoveTx(_a0 context.Context, _a1 types.TxKey) error { return r0 } -// Start provides a mock function with given fields: -func (_m *RemoteClient) Start() error { - ret := _m.Called() +// Start provides a mock function with given fields: _a0 +func (_m *RemoteClient) Start(_a0 context.Context) error { + ret := _m.Called(_a0) var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(_a0) } else { r0 = ret.Error(0) } @@ -651,20 +662,6 @@ func (_m *RemoteClient) Status(_a0 context.Context) (*coretypes.ResultStatus, er return r0, r1 } -// Stop provides a mock function with given fields: -func (_m *RemoteClient) Stop() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - // Subscribe provides a mock function with given fields: ctx, subscriber, query, outCapacity func (_m *RemoteClient) Subscribe(ctx context.Context, subscriber string, query string, outCapacity ...int) (<-chan coretypes.ResultEvent, error) { _va := make([]interface{}, len(outCapacity)) @@ -741,13 +738,13 @@ func (_m *RemoteClient) TxSearch(ctx context.Context, query string, prove bool, return r0, r1 } -// UnconfirmedTxs provides a mock function with given fields: ctx, limit -func (_m *RemoteClient) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { - ret := _m.Called(ctx, limit) +// UnconfirmedTxs provides a mock function with given fields: ctx, page, perPage +func (_m *RemoteClient) UnconfirmedTxs(ctx context.Context, page *int, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) { + ret := _m.Called(ctx, page, perPage) var r0 *coretypes.ResultUnconfirmedTxs - if rf, ok := ret.Get(0).(func(context.Context, *int) *coretypes.ResultUnconfirmedTxs); ok { - r0 = rf(ctx, limit) + if rf, ok := ret.Get(0).(func(context.Context, *int, *int) *coretypes.ResultUnconfirmedTxs); ok { + r0 = rf(ctx, page, perPage) } else { if ret.Get(0) != nil { r0 = ret.Get(0).(*coretypes.ResultUnconfirmedTxs) @@ -755,8 +752,8 @@ func (_m *RemoteClient) UnconfirmedTxs(ctx context.Context, limit *int) (*corety } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *int) error); ok { - r1 = rf(ctx, limit) + if rf, ok := ret.Get(1).(func(context.Context, *int, *int) error); ok { + r1 = rf(ctx, page, perPage) } else { r1 = ret.Error(1) } @@ -814,3 +811,13 @@ func (_m *RemoteClient) Validators(ctx context.Context, height *int64, page *int return r0, r1 } + +// NewRemoteClient creates a new instance of RemoteClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewRemoteClient(t testing.TB) *RemoteClient { + mock := &RemoteClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/client/mocks/sign_client.go b/rpc/client/mocks/sign_client.go new file mode 100644 index 0000000000..6c1e674476 --- /dev/null +++ b/rpc/client/mocks/sign_client.go @@ -0,0 +1,260 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + bytes "github.com/tendermint/tendermint/libs/bytes" + + context "context" + + coretypes "github.com/tendermint/tendermint/rpc/coretypes" + + mock "github.com/stretchr/testify/mock" + + testing "testing" +) + +// SignClient is an autogenerated mock type for the SignClient type +type SignClient struct { + mock.Mock +} + +// Block provides a mock function with given fields: ctx, height +func (_m *SignClient) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { + ret := _m.Called(ctx, height) + + var r0 *coretypes.ResultBlock + if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultBlock); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockByHash provides a mock function with given fields: ctx, hash +func (_m *SignClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { + ret := _m.Called(ctx, hash) + + var r0 *coretypes.ResultBlock + if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) *coretypes.ResultBlock); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, bytes.HexBytes) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockResults provides a mock function with given fields: ctx, height +func (_m *SignClient) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { + ret := _m.Called(ctx, height) + + var r0 *coretypes.ResultBlockResults + if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultBlockResults); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBlockResults) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// BlockSearch provides a mock function with given fields: ctx, query, page, perPage, orderBy +func (_m *SignClient) BlockSearch(ctx context.Context, query string, page *int, perPage *int, orderBy string) (*coretypes.ResultBlockSearch, error) { + ret := _m.Called(ctx, query, page, perPage, orderBy) + + var r0 *coretypes.ResultBlockSearch + if rf, ok := ret.Get(0).(func(context.Context, string, *int, *int, string) *coretypes.ResultBlockSearch); ok { + r0 = rf(ctx, query, page, perPage, orderBy) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultBlockSearch) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, *int, *int, string) error); ok { + r1 = rf(ctx, query, page, perPage, orderBy) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Commit provides a mock function with given fields: ctx, height +func (_m *SignClient) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { + ret := _m.Called(ctx, height) + + var r0 *coretypes.ResultCommit + if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultCommit); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultCommit) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Header provides a mock function with given fields: ctx, height +func (_m *SignClient) Header(ctx context.Context, height *int64) (*coretypes.ResultHeader, error) { + ret := _m.Called(ctx, height) + + var r0 *coretypes.ResultHeader + if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultHeader); ok { + r0 = rf(ctx, height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultHeader) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { + r1 = rf(ctx, height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// HeaderByHash provides a mock function with given fields: ctx, hash +func (_m *SignClient) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) { + ret := _m.Called(ctx, hash) + + var r0 *coretypes.ResultHeader + if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) *coretypes.ResultHeader); ok { + r0 = rf(ctx, hash) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultHeader) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, bytes.HexBytes) error); ok { + r1 = rf(ctx, hash) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Tx provides a mock function with given fields: ctx, hash, prove +func (_m *SignClient) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { + ret := _m.Called(ctx, hash, prove) + + var r0 *coretypes.ResultTx + if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes, bool) *coretypes.ResultTx); ok { + r0 = rf(ctx, hash, prove) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultTx) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, bytes.HexBytes, bool) error); ok { + r1 = rf(ctx, hash, prove) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TxSearch provides a mock function with given fields: ctx, query, prove, page, perPage, orderBy +func (_m *SignClient) TxSearch(ctx context.Context, query string, prove bool, page *int, perPage *int, orderBy string) (*coretypes.ResultTxSearch, error) { + ret := _m.Called(ctx, query, prove, page, perPage, orderBy) + + var r0 *coretypes.ResultTxSearch + if rf, ok := ret.Get(0).(func(context.Context, string, bool, *int, *int, string) *coretypes.ResultTxSearch); ok { + r0 = rf(ctx, query, prove, page, perPage, orderBy) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultTxSearch) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, bool, *int, *int, string) error); ok { + r1 = rf(ctx, query, prove, page, perPage, orderBy) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Validators provides a mock function with given fields: ctx, height, page, perPage, requestQuorumInfo +func (_m *SignClient) Validators(ctx context.Context, height *int64, page *int, perPage *int, requestQuorumInfo *bool) (*coretypes.ResultValidators, error) { + ret := _m.Called(ctx, height, page, perPage, requestQuorumInfo) + + var r0 *coretypes.ResultValidators + if rf, ok := ret.Get(0).(func(context.Context, *int64, *int, *int, *bool) *coretypes.ResultValidators); ok { + r0 = rf(ctx, height, page, perPage, requestQuorumInfo) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultValidators) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *int64, *int, *int, *bool) error); ok { + r1 = rf(ctx, height, page, perPage, requestQuorumInfo) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewSignClient creates a new instance of SignClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewSignClient(t testing.TB) *SignClient { + mock := &SignClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/client/mocks/status_client.go b/rpc/client/mocks/status_client.go new file mode 100644 index 0000000000..eee3a471f6 --- /dev/null +++ b/rpc/client/mocks/status_client.go @@ -0,0 +1,50 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + coretypes "github.com/tendermint/tendermint/rpc/coretypes" + + testing "testing" +) + +// StatusClient is an autogenerated mock type for the StatusClient type +type StatusClient struct { + mock.Mock +} + +// Status provides a mock function with given fields: _a0 +func (_m *StatusClient) Status(_a0 context.Context) (*coretypes.ResultStatus, error) { + ret := _m.Called(_a0) + + var r0 *coretypes.ResultStatus + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultStatus); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.ResultStatus) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NewStatusClient creates a new instance of StatusClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewStatusClient(t testing.TB) *StatusClient { + mock := &StatusClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/client/mocks/subscription_client.go b/rpc/client/mocks/subscription_client.go new file mode 100644 index 0000000000..4a520063d5 --- /dev/null +++ b/rpc/client/mocks/subscription_client.go @@ -0,0 +1,85 @@ +// Code generated by mockery. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + coretypes "github.com/tendermint/tendermint/rpc/coretypes" + + testing "testing" +) + +// SubscriptionClient is an autogenerated mock type for the SubscriptionClient type +type SubscriptionClient struct { + mock.Mock +} + +// Subscribe provides a mock function with given fields: ctx, subscriber, query, outCapacity +func (_m *SubscriptionClient) Subscribe(ctx context.Context, subscriber string, query string, outCapacity ...int) (<-chan coretypes.ResultEvent, error) { + _va := make([]interface{}, len(outCapacity)) + for _i := range outCapacity { + _va[_i] = outCapacity[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, subscriber, query) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 <-chan coretypes.ResultEvent + if rf, ok := ret.Get(0).(func(context.Context, string, string, ...int) <-chan coretypes.ResultEvent); ok { + r0 = rf(ctx, subscriber, query, outCapacity...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan coretypes.ResultEvent) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string, string, ...int) error); ok { + r1 = rf(ctx, subscriber, query, outCapacity...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Unsubscribe provides a mock function with given fields: ctx, subscriber, query +func (_m *SubscriptionClient) Unsubscribe(ctx context.Context, subscriber string, query string) error { + ret := _m.Called(ctx, subscriber, query) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { + r0 = rf(ctx, subscriber, query) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// UnsubscribeAll provides a mock function with given fields: ctx, subscriber +func (_m *SubscriptionClient) UnsubscribeAll(ctx context.Context, subscriber string) error { + ret := _m.Called(ctx, subscriber) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, subscriber) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// NewSubscriptionClient creates a new instance of SubscriptionClient. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. +func NewSubscriptionClient(t testing.TB) *SubscriptionClient { + mock := &SubscriptionClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index f91e86433e..d1003791d7 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -1,8 +1,10 @@ package client_test import ( + "bytes" "context" "encoding/base64" + "encoding/json" "fmt" "math" "net/http" @@ -11,16 +13,18 @@ import ( "testing" "time" + "github.com/dashevo/dashd-go/btcjson" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/mempool" - tmjson "github.com/tendermint/tendermint/libs/json" + rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/rpc/client" rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpclocal "github.com/tendermint/tendermint/rpc/client/local" @@ -29,25 +33,40 @@ import ( "github.com/tendermint/tendermint/types" ) -func getHTTPClient(t *testing.T, conf *config.Config) *rpchttp.HTTP { +func getHTTPClient(t *testing.T, logger log.Logger, conf *config.Config) *rpchttp.HTTP { t.Helper() rpcAddr := conf.RPC.ListenAddress - c, err := rpchttp.New(rpcAddr) + c, err := rpchttp.NewWithClient(rpcAddr, http.DefaultClient) require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + require.NoError(t, c.Start(ctx)) + + c.Logger = logger + t.Cleanup(func() { + cancel() + require.NoError(t, c.Stop()) + }) - c.SetLogger(log.TestingLogger()) return c } -func getHTTPClientWithTimeout(t *testing.T, conf *config.Config, timeout time.Duration) *rpchttp.HTTP { +func getHTTPClientWithTimeout(t *testing.T, logger log.Logger, conf *config.Config, timeout time.Duration) *rpchttp.HTTP { t.Helper() rpcAddr := conf.RPC.ListenAddress - c, err := rpchttp.NewWithTimeout(rpcAddr, timeout) + + tclient := &http.Client{Timeout: timeout} + c, err := rpchttp.NewWithClient(rpcAddr, tclient) require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + require.NoError(t, c.Start(ctx)) - c.SetLogger(log.TestingLogger()) + c.Logger = logger + t.Cleanup(func() { + cancel() + require.NoError(t, c.Stop()) + }) return c } @@ -59,719 +78,784 @@ func GetClients(t *testing.T, ns service.Service, conf *config.Config) []client. node, ok := ns.(rpclocal.NodeService) require.True(t, ok) - ncl, err := rpclocal.New(node) + logger := log.NewTestingLogger(t) + ncl, err := rpclocal.New(logger, node) require.NoError(t, err) return []client.Client{ - getHTTPClient(t, conf), ncl, + getHTTPClient(t, logger, conf), } } -func TestNilCustomHTTPClient(t *testing.T) { - require.Panics(t, func() { - _, _ = rpchttp.NewWithClient("http://example.com", nil) - }) - require.Panics(t, func() { - _, _ = rpcclient.NewWithHTTPClient("http://example.com", nil) - }) -} - -func TestParseInvalidAddress(t *testing.T) { - _, conf := NodeSuite(t) - // should remove trailing / - invalidRemote := conf.RPC.ListenAddress + "/" - _, err := rpchttp.New(invalidRemote) - require.NoError(t, err) -} - -func TestCustomHTTPClient(t *testing.T) { - _, conf := NodeSuite(t) - remote := conf.RPC.ListenAddress - c, err := rpchttp.NewWithClient(remote, http.DefaultClient) - require.Nil(t, err) - status, err := c.Status(context.Background()) - require.NoError(t, err) - require.NotNil(t, status) -} - -func TestCorsEnabled(t *testing.T) { - _, conf := NodeSuite(t) - origin := conf.RPC.CORSAllowedOrigins[0] - remote := strings.ReplaceAll(conf.RPC.ListenAddress, "tcp", "http") - - req, err := http.NewRequest("GET", remote, nil) - require.Nil(t, err, "%+v", err) - req.Header.Set("Origin", origin) - c := &http.Client{} - resp, err := c.Do(req) - require.Nil(t, err, "%+v", err) - defer resp.Body.Close() - - assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin) -} - -// Make sure status is correct (we connect properly) -func TestStatus(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - moniker := conf.Moniker - status, err := c.Status(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.Equal(t, moniker, status.NodeInfo.Moniker) - } -} - -// Make sure info is correct (we connect properly) -func TestInfo(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - // status, err := c.Status() - // require.Nil(t, err, "%+v", err) - info, err := c.ABCIInfo(ctx) - require.Nil(t, err, "%d: %+v", i, err) - // TODO: this is not correct - fix merkleeyes! - // assert.EqualValues(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight) - assert.True(t, strings.Contains(info.Response.Data, "size")) - } -} - -func TestNetInfo(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - netinfo, err := nc.NetInfo(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, netinfo.Listening) - assert.Equal(t, 0, len(netinfo.Peers)) - } -} - -func TestDumpConsensusState(t *testing.T) { +func TestClientOperations(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - // FIXME: fix server so it doesn't panic on invalid input - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - cons, err := nc.DumpConsensusState(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.NotEmpty(t, cons.RoundState) - assert.Empty(t, cons.Peers) - } -} + logger := log.NewTestingLogger(t) -func TestConsensusState(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - // FIXME: fix server so it doesn't panic on invalid input - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - cons, err := nc.ConsensusState(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.NotEmpty(t, cons.RoundState) - } -} - -func TestHealth(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + _, conf := NodeSuite(ctx, t, logger) - n, conf := NodeSuite(t) + t.Run("NilCustomHTTPClient", func(t *testing.T) { + _, err := rpchttp.NewWithClient("http://example.com", nil) + require.Error(t, err) - for i, c := range GetClients(t, n, conf) { - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - _, err := nc.Health(ctx) - require.Nil(t, err, "%d: %+v", i, err) - } -} - -func TestGenesisAndValidators(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - - // make sure this is the right genesis file - gen, err := c.Genesis(ctx) - require.Nil(t, err, "%d: %+v", i, err) - // get the genesis validator - require.Equal(t, 1, len(gen.Genesis.Validators)) - gval := gen.Genesis.Validators[0] - - // get the current validators - h := int64(1) - vals, err := c.Validators(ctx, &h, nil, nil, nil) - require.Nil(t, err, "%d: %+v", i, err) - require.Equal(t, 1, len(vals.Validators)) - require.Equal(t, 1, vals.Count) - require.Equal(t, 1, vals.Total) - val := vals.Validators[0] - - // make sure the current set is also the genesis set - assert.Equal(t, gval.Power, val.VotingPower) - assert.Equal(t, gval.PubKey, val.PubKey) - } -} - -func TestGenesisChunked(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for _, c := range GetClients(t, n, conf) { - first, err := c.GenesisChunked(ctx, 0) + _, err = rpcclient.NewWithHTTPClient("http://example.com", nil) + require.Error(t, err) + }) + t.Run("ParseInvalidAddress", func(t *testing.T) { + // should remove trailing / + invalidRemote := conf.RPC.ListenAddress + "/" + _, err := rpchttp.New(invalidRemote) require.NoError(t, err) - - decoded := make([]string, 0, first.TotalChunks) - for i := 0; i < first.TotalChunks; i++ { - chunk, err := c.GenesisChunked(ctx, uint(i)) + }) + t.Run("CustomHTTPClient", func(t *testing.T) { + remote := conf.RPC.ListenAddress + c, err := rpchttp.NewWithClient(remote, http.DefaultClient) + require.NoError(t, err) + status, err := c.Status(ctx) + require.NoError(t, err) + require.NotNil(t, status) + }) + t.Run("CorsEnabled", func(t *testing.T) { + origin := conf.RPC.CORSAllowedOrigins[0] + remote := strings.ReplaceAll(conf.RPC.ListenAddress, "tcp", "http") + + req, err := http.NewRequestWithContext(ctx, "GET", remote, nil) + require.NoError(t, err, "%+v", err) + req.Header.Set("Origin", origin) + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err, "%+v", err) + defer resp.Body.Close() + + assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin) + }) + t.Run("Batching", func(t *testing.T) { + t.Run("JSONRPCCalls", func(t *testing.T) { + logger := log.NewTestingLogger(t) + c := getHTTPClient(t, logger, conf) + testBatchedJSONRPCCalls(ctx, t, c) + }) + t.Run("JSONRPCCallsCancellation", func(t *testing.T) { + _, _, tx1 := MakeTxKV() + _, _, tx2 := MakeTxKV() + + logger := log.NewTestingLogger(t) + c := getHTTPClient(t, logger, conf) + batch := c.NewBatch() + _, err := batch.BroadcastTxCommit(ctx, tx1) require.NoError(t, err) - data, err := base64.StdEncoding.DecodeString(chunk.Data) + _, err = batch.BroadcastTxCommit(ctx, tx2) require.NoError(t, err) - decoded = append(decoded, string(data)) - - } - doc := []byte(strings.Join(decoded, "")) - - var out types.GenesisDoc - require.NoError(t, tmjson.Unmarshal(doc, &out), - "first: %+v, doc: %s", first, string(doc)) - } -} - -func TestABCIQuery(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - // write something - k, v, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) - apph := bres.Height + 1 // this is where the tx will be applied to the state - - // wait before querying - err = client.WaitForHeight(c, apph, nil) - require.NoError(t, err) - res, err := c.ABCIQuery(ctx, "/key", k) - qres := res.Response - if assert.Nil(t, err) && assert.True(t, qres.IsOK()) { - assert.EqualValues(t, v, qres.Value) - } - } + // we should have 2 requests waiting + require.Equal(t, 2, batch.Count()) + // we want to make sure we cleared 2 pending requests + require.Equal(t, 2, batch.Clear()) + // now there should be no batched requests + require.Equal(t, 0, batch.Count()) + }) + t.Run("SendingEmptyRequest", func(t *testing.T) { + logger := log.NewTestingLogger(t) + + c := getHTTPClient(t, logger, conf) + batch := c.NewBatch() + _, err := batch.Send(ctx) + require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") + }) + t.Run("ClearingEmptyRequest", func(t *testing.T) { + logger := log.NewTestingLogger(t) + + c := getHTTPClient(t, logger, conf) + batch := c.NewBatch() + require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") + }) + t.Run("ConcurrentJSONRPC", func(t *testing.T) { + logger := log.NewTestingLogger(t) + + var wg sync.WaitGroup + c := getHTTPClient(t, logger, conf) + for i := 0; i < 50; i++ { + wg.Add(1) + go func() { + defer wg.Done() + testBatchedJSONRPCCalls(ctx, t, c) + }() + } + wg.Wait() + }) + }) } -// Make some app checks -func TestAppCalls(t *testing.T) { +// Make sure info is correct (we connect properly) +func TestClientMethodCalls(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + logger := log.NewTestingLogger(t) - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - - // get an offset of height to avoid racing and guessing - s, err := c.Status(ctx) - require.NoError(t, err) - // sh is start height or status height - sh := s.SyncInfo.LatestBlockHeight - - // look for the future - h := sh + 20 - _, err = c.Block(ctx, &h) - require.Error(t, err) // no block yet + n, conf := NodeSuite(ctx, t, logger) - // write something - k, v, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.NoError(t, err) - require.True(t, bres.DeliverTx.IsOK()) - txh := bres.Height - apph := txh + 1 // this is where the tx will be applied to the state - - // wait before querying - err = client.WaitForHeight(c, apph, nil) - require.NoError(t, err) - - _qres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: false}) - require.NoError(t, err) - qres := _qres.Response - if assert.True(t, qres.IsOK()) { - assert.Equal(t, k, qres.Key) - assert.EqualValues(t, v, qres.Value) - } - - // make sure we can lookup the tx with proof - ptx, err := c.Tx(ctx, bres.Hash, true) - require.NoError(t, err) - assert.EqualValues(t, txh, ptx.Height) - assert.EqualValues(t, tx, ptx.Tx) - - // and we can even check the block is added - block, err := c.Block(ctx, &apph) - require.NoError(t, err) - appHash := block.Block.Header.AppHash - assert.True(t, len(appHash) > 0) - assert.EqualValues(t, apph, block.Block.Header.Height) - - blockByHash, err := c.BlockByHash(ctx, block.BlockID.Hash) - require.NoError(t, err) - require.Equal(t, block, blockByHash) - - // check that the header matches the block hash - header, err := c.Header(ctx, &apph) - require.NoError(t, err) - require.Equal(t, block.Block.Header, *header.Header) - - headerByHash, err := c.HeaderByHash(ctx, block.BlockID.Hash) - require.NoError(t, err) - require.Equal(t, header, headerByHash) - - // now check the results - blockResults, err := c.BlockResults(ctx, &txh) - require.NoError(t, err, "%d: %+v", i, err) - assert.Equal(t, txh, blockResults.Height) - if assert.Equal(t, 1, len(blockResults.TxsResults)) { - // check success code - assert.EqualValues(t, 0, blockResults.TxsResults[0].Code) - } - - // check blockchain info, now that we know there is info - info, err := c.BlockchainInfo(ctx, apph, apph) - require.NoError(t, err) - assert.True(t, info.LastHeight >= apph) - if assert.Equal(t, 1, len(info.BlockMetas)) { - lastMeta := info.BlockMetas[0] - assert.EqualValues(t, apph, lastMeta.Header.Height) - blockData := block.Block - assert.Equal(t, blockData.Header.AppHash, lastMeta.Header.AppHash) - assert.Equal(t, block.BlockID, lastMeta.BlockID) - } - - // and get the corresponding commit with the same apphash - commit, err := c.Commit(ctx, &apph) - require.NoError(t, err) - cappHash := commit.Header.AppHash - assert.Equal(t, appHash, cappHash) - assert.NotNil(t, commit.Commit) - - // compare the commits (note Commit(2) has commit from Block(3)) - h = apph - 1 - commit2, err := c.Commit(ctx, &h) - require.NoError(t, err) - assert.Equal(t, block.Block.LastCommitHash, commit2.Commit.Hash()) - // and we got a proof that works! - _pres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: true}) - require.NoError(t, err) - pres := _pres.Response - assert.True(t, pres.IsOK()) - - // XXX Test proof - } -} - -func TestBlockchainInfo(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + // for broadcast tx tests + pool := getMempool(t, n) - n, conf := NodeSuite(t) + // for evidence tests + pv, err := privval.LoadOrGenFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile()) + require.NoError(t, err) + quorumHash, err := pv.GetFirstQuorumHash(ctx) + require.NoError(t, err) for i, c := range GetClients(t, n, conf) { - err := client.WaitForHeight(c, 10, nil) - require.NoError(t, err) - - res, err := c.BlockchainInfo(ctx, 0, 0) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, res.LastHeight > 0) - assert.True(t, len(res.BlockMetas) > 0) - - res, err = c.BlockchainInfo(ctx, 1, 1) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, res.LastHeight > 0) - assert.True(t, len(res.BlockMetas) == 1) - - res, err = c.BlockchainInfo(ctx, 1, 10000) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, res.LastHeight > 0) - assert.True(t, len(res.BlockMetas) < 100) - for _, m := range res.BlockMetas { - assert.NotNil(t, m) - } + t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { + t.Run("Status", func(t *testing.T) { + status, err := c.Status(ctx) + require.NoError(t, err, "%d: %+v", i, err) + assert.Equal(t, conf.Moniker, status.NodeInfo.Moniker) + }) + t.Run("Info", func(t *testing.T) { + info, err := c.ABCIInfo(ctx) + require.NoError(t, err) + + status, err := c.Status(ctx) + require.NoError(t, err) + + assert.GreaterOrEqual(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight) + assert.True(t, strings.Contains(info.Response.Data, "size")) + }) + t.Run("NetInfo", func(t *testing.T) { + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + netinfo, err := nc.NetInfo(ctx) + require.NoError(t, err, "%d: %+v", i, err) + assert.True(t, netinfo.Listening) + assert.Equal(t, 0, len(netinfo.Peers)) + }) + t.Run("DumpConsensusState", func(t *testing.T) { + // FIXME: fix server so it doesn't panic on invalid input + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + cons, err := nc.DumpConsensusState(ctx) + require.NoError(t, err, "%d: %+v", i, err) + assert.NotEmpty(t, cons.RoundState) + assert.Empty(t, cons.Peers) + }) + t.Run("ConsensusState", func(t *testing.T) { + // FIXME: fix server so it doesn't panic on invalid input + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + cons, err := nc.ConsensusState(ctx) + require.NoError(t, err, "%d: %+v", i, err) + assert.NotEmpty(t, cons.RoundState) + }) + t.Run("Health", func(t *testing.T) { + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + _, err := nc.Health(ctx) + require.NoError(t, err, "%d: %+v", i, err) + }) + t.Run("GenesisAndValidators", func(t *testing.T) { + // make sure this is the right genesis file + gen, err := c.Genesis(ctx) + require.NoError(t, err, "%d: %+v", i, err) + // get the genesis validator + require.Equal(t, 1, len(gen.Genesis.Validators)) + gval := gen.Genesis.Validators[0] + + // get the current validators + h := int64(1) + vals, err := c.Validators(ctx, &h, nil, nil, nil) + require.NoError(t, err, "%d: %+v", i, err) + require.Equal(t, 1, len(vals.Validators)) + require.Equal(t, 1, vals.Count) + require.Equal(t, 1, vals.Total) + val := vals.Validators[0] + + // make sure the current set is also the genesis set + assert.Equal(t, gval.Power, val.VotingPower) + assert.Equal(t, gval.PubKey, val.PubKey) + }) + t.Run("GenesisChunked", func(t *testing.T) { + first, err := c.GenesisChunked(ctx, 0) + require.NoError(t, err) + + decoded := make([]string, 0, first.TotalChunks) + for i := 0; i < first.TotalChunks; i++ { + chunk, err := c.GenesisChunked(ctx, uint(i)) + require.NoError(t, err) + data, err := base64.StdEncoding.DecodeString(chunk.Data) + require.NoError(t, err) + decoded = append(decoded, string(data)) - res, err = c.BlockchainInfo(ctx, 10000, 1) - require.NotNil(t, err) - assert.Nil(t, res) - assert.Contains(t, err.Error(), "can't be greater than max") - } -} + } + doc := []byte(strings.Join(decoded, "")) + + var out types.GenesisDoc + require.NoError(t, json.Unmarshal(doc, &out), + "first: %+v, doc: %s", first, string(doc)) + }) + t.Run("ABCIQuery", func(t *testing.T) { + // write something + k, v, tx := MakeTxKV() + status, err := c.Status(ctx) + require.NoError(t, err) + _, err = c.BroadcastTxSync(ctx, tx) + require.NoError(t, err, "%d: %+v", i, err) + apph := status.SyncInfo.LatestBlockHeight + 2 // this is where the tx will be applied to the state + + // wait before querying + err = client.WaitForHeight(ctx, c, apph, nil) + require.NoError(t, err) + res, err := c.ABCIQuery(ctx, "/key", k) + qres := res.Response + if assert.NoError(t, err) && assert.True(t, qres.IsOK()) { + assert.EqualValues(t, v, qres.Value) + } + }) + t.Run("AppCalls", func(t *testing.T) { + // get an offset of height to avoid racing and guessing + s, err := c.Status(ctx) + require.NoError(t, err) + // sh is start height or status height + sh := s.SyncInfo.LatestBlockHeight + + // look for the future + h := sh + 20 + _, err = c.Block(ctx, &h) + require.Error(t, err) // no block yet + + // write something + k, v, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(ctx, tx) + require.NoError(t, err) + require.True(t, bres.TxResult.IsOK()) + txh := bres.Height + apph := txh + 1 // this is where the tx will be applied to the state + + // wait before querying + err = client.WaitForHeight(ctx, c, apph, nil) + require.NoError(t, err) + + _qres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: false}) + require.NoError(t, err) + qres := _qres.Response + if assert.True(t, qres.IsOK()) { + assert.Equal(t, k, qres.Key) + assert.EqualValues(t, v, qres.Value) + } -func TestBroadcastTxSync(t *testing.T) { - n, conf := NodeSuite(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + // make sure we can lookup the tx with proof + ptx, err := c.Tx(ctx, bres.Hash, true) + require.NoError(t, err) + assert.EqualValues(t, txh, ptx.Height) + assert.EqualValues(t, tx, ptx.Tx) - // TODO (melekes): use mempool which is set on RPC rather than getting it from node - pool := getMempool(t, n) - initMempoolSize := pool.Size() + // and we can even check the block is added + block, err := c.Block(ctx, &apph) + require.NoError(t, err) + appHash := block.Block.Header.AppHash + assert.True(t, len(appHash) > 0) + assert.EqualValues(t, apph, block.Block.Header.Height) + + blockByHash, err := c.BlockByHash(ctx, block.BlockID.Hash) + require.NoError(t, err) + require.Equal(t, block, blockByHash) + + // check that the header matches the block hash + header, err := c.Header(ctx, &apph) + require.NoError(t, err) + require.Equal(t, block.Block.Header, *header.Header) + + headerByHash, err := c.HeaderByHash(ctx, block.BlockID.Hash) + require.NoError(t, err) + require.Equal(t, header, headerByHash) + + // now check the results + blockResults, err := c.BlockResults(ctx, &txh) + require.NoError(t, err, "%d: %+v", i, err) + assert.Equal(t, txh, blockResults.Height) + if assert.Equal(t, 1, len(blockResults.TxsResults)) { + // check success code + assert.EqualValues(t, 0, blockResults.TxsResults[0].Code) + } - for i, c := range GetClients(t, n, conf) { - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxSync(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) - require.Equal(t, bres.Code, abci.CodeTypeOK) // FIXME + // check blockchain info, now that we know there is info + info, err := c.BlockchainInfo(ctx, apph, apph) + require.NoError(t, err) + assert.True(t, info.LastHeight >= apph) + if assert.Equal(t, 1, len(info.BlockMetas)) { + lastMeta := info.BlockMetas[0] + assert.EqualValues(t, apph, lastMeta.Header.Height) + blockData := block.Block + assert.Equal(t, blockData.Header.AppHash, lastMeta.Header.AppHash) + assert.Equal(t, block.BlockID, lastMeta.BlockID) + } - require.Equal(t, initMempoolSize+1, pool.Size()) + // and get the corresponding commit with the same apphash + commit, err := c.Commit(ctx, &apph) + require.NoError(t, err) + cappHash := commit.Header.AppHash + assert.Equal(t, appHash, cappHash) + assert.NotNil(t, commit.Commit) + + // compare the commits (note Commit(2) has commit from Block(3)) + h = apph - 1 + commit2, err := c.Commit(ctx, &h) + require.NoError(t, err) + assert.Equal(t, block.Block.LastCommitHash, commit2.Commit.Hash()) + + // and we got a proof that works! + _pres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: true}) + require.NoError(t, err) + pres := _pres.Response + assert.True(t, pres.IsOK()) + + // XXX Test proof + }) + t.Run("BlockchainInfo", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + err := client.WaitForHeight(ctx, c, 10, nil) + require.NoError(t, err) + + res, err := c.BlockchainInfo(ctx, 0, 0) + require.NoError(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) > 0) + + res, err = c.BlockchainInfo(ctx, 1, 1) + require.NoError(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) == 1) + + res, err = c.BlockchainInfo(ctx, 1, 10000) + require.NoError(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) < 100) + for _, m := range res.BlockMetas { + assert.NotNil(t, m) + } - txs := pool.ReapMaxTxs(len(tx)) - require.EqualValues(t, tx, txs[0]) - pool.Flush() + res, err = c.BlockchainInfo(ctx, 10000, 1) + require.Error(t, err) + assert.Nil(t, res) + assert.Contains(t, err.Error(), "can't be greater than max") + }) + t.Run("BroadcastTxCommit", func(t *testing.T) { + _, _, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(ctx, tx) + require.NoError(t, err, "%d: %+v", i, err) + require.True(t, bres.CheckTx.IsOK()) + require.True(t, bres.TxResult.IsOK()) + + require.Equal(t, 0, pool.Size()) + }) + t.Run("BroadcastTxSync", func(t *testing.T) { + _, _, tx := MakeTxKV() + initMempoolSize := pool.Size() + bres, err := c.BroadcastTxSync(ctx, tx) + require.NoError(t, err, "%d: %+v", i, err) + require.Equal(t, bres.Code, abci.CodeTypeOK) // FIXME + + require.Equal(t, initMempoolSize+1, pool.Size()) + + txs := pool.ReapMaxTxs(len(tx)) + require.EqualValues(t, tx, txs[0]) + pool.Flush() + }) + t.Run("CheckTx", func(t *testing.T) { + _, _, tx := MakeTxKV() + + res, err := c.CheckTx(ctx, tx) + require.NoError(t, err) + assert.Equal(t, abci.CodeTypeOK, res.Code) + + assert.Equal(t, 0, pool.Size(), "mempool must be empty") + }) + t.Run("Events", func(t *testing.T) { + t.Run("Header", func(t *testing.T) { + ctx, cancel := context.WithTimeout(ctx, waitForEventTimeout) + defer cancel() + query := types.QueryForEvent(types.EventNewBlockHeaderValue).String() + evt, err := client.WaitForOneEvent(ctx, c, query) + require.NoError(t, err, "%d: %+v", i, err) + _, ok := evt.(types.EventDataNewBlockHeader) + require.True(t, ok, "%d: %#v", i, evt) + // TODO: more checks... + }) + t.Run("Block", func(t *testing.T) { + const subscriber = "TestBlockEvents" + + eventCh, err := c.Subscribe(ctx, subscriber, types.QueryForEvent(types.EventNewBlockValue).String()) + require.NoError(t, err) + t.Cleanup(func() { + if err := c.UnsubscribeAll(ctx, subscriber); err != nil { + t.Error(err) + } + }) + + var firstBlockHeight int64 + for i := int64(0); i < 3; i++ { + event := <-eventCh + blockEvent, ok := event.Data.(types.EventDataNewBlock) + require.True(t, ok) + + block := blockEvent.Block + + if firstBlockHeight == 0 { + firstBlockHeight = block.Header.Height + } + + require.Equal(t, firstBlockHeight+i, block.Header.Height) + } + }) + t.Run("BroadcastTxAsync", func(t *testing.T) { + testTxEventsSent(ctx, t, "async", c) + }) + t.Run("BroadcastTxSync", func(t *testing.T) { + testTxEventsSent(ctx, t, "sync", c) + }) + }) + t.Run("Evidence", func(t *testing.T) { + t.Run("BroadcastDuplicateVote", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + chainID := conf.ChainID() + + // make sure that the node has produced enough blocks + waitForBlock(ctx, t, c, 2) + evidenceHeight := int64(1) + block, _ := c.Block(ctx, &evidenceHeight) + ts := block.Block.Time + correct, fakes := makeEvidences(t, pv, chainID, btcjson.LLMQType_5_60, quorumHash, ts) + + result, err := c.BroadcastEvidence(ctx, correct) + require.NoError(t, err, "BroadcastEvidence(%s) failed", correct) + assert.Equal(t, correct.Hash(), result.Hash, "expected result hash to match evidence hash") + + status, err := c.Status(ctx) + require.NoError(t, err) + err = client.WaitForHeight(ctx, c, status.SyncInfo.LatestBlockHeight+2, nil) + require.NoError(t, err) + + result2, err := c.ABCIQuery(ctx, "/val", pv.Key.ProTxHash) + require.NoError(t, err) + qres := result2.Response + require.True(t, qres.IsOK()) + + var v abci.ValidatorUpdate + err = abci.ReadMessage(bytes.NewReader(qres.Value), &v) + require.NoError(t, err, "Error reading query result, value %v", qres.Value) + + require.EqualValues(t, pv.Key.ProTxHash, v.ProTxHash, "Stored PubKey not equal with expected, value %v", string(qres.Value)) + require.Equal(t, types.DefaultDashVotingPower, v.Power, "Stored Power not equal with expected, value %v", string(qres.Value)) + + for _, fake := range fakes { + _, err := c.BroadcastEvidence(ctx, fake) + require.Error(t, err, "BroadcastEvidence(%s) succeeded, but the evidence was fake", fake) + } + }) + t.Run("BroadcastEmpty", func(t *testing.T) { + _, err := c.BroadcastEvidence(ctx, nil) + require.Error(t, err) + }) + }) + }) } } func getMempool(t *testing.T, srv service.Service) mempool.Mempool { t.Helper() n, ok := srv.(interface { - Mempool() mempool.Mempool + RPCEnvironment() *rpccore.Environment }) require.True(t, ok) - return n.Mempool() + return n.RPCEnvironment().Mempool } -func TestBroadcastTxCommit(t *testing.T) { +// these cases are roughly the same as the TestClientMethodCalls, but +// they have to loop over their clients in the individual test cases, +// so making a separate suite makes more sense, though isn't strictly +// speaking desirable. +func TestClientMethodCallsAdvanced(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, conf := NodeSuite(t) + logger := log.NewTestingLogger(t) + n, conf := NodeSuite(ctx, t, logger) pool := getMempool(t, n) - for i, c := range GetClients(t, n, conf) { - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) - require.True(t, bres.CheckTx.IsOK()) - require.True(t, bres.DeliverTx.IsOK()) - - require.Equal(t, 0, pool.Size()) - } -} - -func TestUnconfirmedTxs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, _, tx := MakeTxKV() - ch := make(chan *abci.Response, 1) - - n, conf := NodeSuite(t) - pool := getMempool(t, n) - err := pool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempool.TxInfo{}) - - require.NoError(t, err) - - // wait for tx to arrive in mempoool. - select { - case <-ch: - case <-time.After(5 * time.Second): - t.Error("Timed out waiting for CheckTx callback") - } - - for _, c := range GetClients(t, n, conf) { - mc := c.(client.MempoolClient) - limit := 1 - res, err := mc.UnconfirmedTxs(ctx, &limit) - require.NoError(t, err) - - assert.Equal(t, 1, res.Count) - assert.Equal(t, 1, res.Total) - assert.Equal(t, pool.SizeBytes(), res.TotalBytes) - assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs)) - } - - pool.Flush() -} - -func TestNumUnconfirmedTxs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, _, tx := MakeTxKV() - n, conf := NodeSuite(t) - ch := make(chan *abci.Response, 1) - pool := getMempool(t, n) + t.Run("UnconfirmedTxs", func(t *testing.T) { + // populate mempool with 5 tx + txs := make([]types.Tx, 5) + ch := make(chan error, 5) + for i := 0; i < 5; i++ { + _, _, tx := MakeTxKV() - err := pool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempool.TxInfo{}) - require.NoError(t, err) + txs[i] = tx + err := pool.CheckTx(ctx, tx, func(_ *abci.ResponseCheckTx) { ch <- nil }, mempool.TxInfo{}) - // wait for tx to arrive in mempoool. - select { - case <-ch: - case <-time.After(5 * time.Second): - t.Error("Timed out waiting for CheckTx callback") - } + require.NoError(t, err) + } + // wait for tx to arrive in mempoool. + for i := 0; i < 5; i++ { + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for CheckTx callback") + } + } + close(ch) - mempoolSize := pool.Size() - for i, c := range GetClients(t, n, conf) { - mc, ok := c.(client.MempoolClient) - require.True(t, ok, "%d", i) - res, err := mc.NumUnconfirmedTxs(ctx) - require.Nil(t, err, "%d: %+v", i, err) - - assert.Equal(t, mempoolSize, res.Count) - assert.Equal(t, mempoolSize, res.Total) - assert.Equal(t, pool.SizeBytes(), res.TotalBytes) - } + for _, c := range GetClients(t, n, conf) { + for i := 1; i <= 2; i++ { + mc := c.(client.MempoolClient) + page, perPage := i, 3 + res, err := mc.UnconfirmedTxs(ctx, &page, &perPage) + require.NoError(t, err) - pool.Flush() -} + if i == 2 { + perPage = 2 + } + assert.Equal(t, perPage, res.Count) + assert.Equal(t, 5, res.Total) + assert.Equal(t, pool.SizeBytes(), res.TotalBytes) + for _, tx := range res.Txs { + assert.Contains(t, txs, tx) + } + } + } -func TestCheckTx(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + pool.Flush() + }) + t.Run("NumUnconfirmedTxs", func(t *testing.T) { + ch := make(chan struct{}) - n, conf := NodeSuite(t) - pool := getMempool(t, n) + pool := getMempool(t, n) - for _, c := range GetClients(t, n, conf) { _, _, tx := MakeTxKV() - res, err := c.CheckTx(ctx, tx) + err := pool.CheckTx(ctx, tx, func(_ *abci.ResponseCheckTx) { close(ch) }, mempool.TxInfo{}) require.NoError(t, err) - assert.Equal(t, abci.CodeTypeOK, res.Code) - assert.Equal(t, 0, pool.Size(), "mempool must be empty") - } -} - -func TestTx(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - n, conf := NodeSuite(t) - - c := getHTTPClient(t, conf) - - // first we broadcast a tx - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%+v", err) - - txHeight := bres.Height - txHash := bres.Hash - - anotherTxHash := types.Tx("a different tx").Hash() - - cases := []struct { - valid bool - prove bool - hash []byte - }{ - // only valid if correct hash provided - {true, false, txHash}, - {true, true, txHash}, - {false, false, anotherTxHash}, - {false, true, anotherTxHash}, - {false, false, nil}, - {false, true, nil}, - } - - for i, c := range GetClients(t, n, conf) { - for j, tc := range cases { - t.Logf("client %d, case %d", i, j) - - // now we query for the tx. - // since there's only one tx, we know index=0. - ptx, err := c.Tx(ctx, tc.hash, tc.prove) - - if !tc.valid { - require.NotNil(t, err) - } else { - require.Nil(t, err, "%+v", err) - assert.EqualValues(t, txHeight, ptx.Height) - assert.EqualValues(t, tx, ptx.Tx) - assert.Zero(t, ptx.Index) - assert.True(t, ptx.TxResult.IsOK()) - assert.EqualValues(t, txHash, ptx.Hash) - - // time to verify the proof - proof := ptx.Proof - if tc.prove && assert.EqualValues(t, tx, proof.Data) { - assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) - } - } + // wait for tx to arrive in mempoool. + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for CheckTx callback") } - } -} -func TestTxSearchWithTimeout(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - timeoutClient := getHTTPClientWithTimeout(t, conf, 10*time.Second) + mempoolSize := pool.Size() + for i, c := range GetClients(t, n, conf) { + mc, ok := c.(client.MempoolClient) + require.True(t, ok, "%d", i) + res, err := mc.NumUnconfirmedTxs(ctx) + require.NoError(t, err, "%d: %+v", i, err) - _, _, tx := MakeTxKV() - _, err := timeoutClient.BroadcastTxCommit(ctx, tx) - require.NoError(t, err) + assert.Equal(t, mempoolSize, res.Count) + assert.Equal(t, mempoolSize, res.Total) + assert.Equal(t, pool.SizeBytes(), res.TotalBytes) + } - // query using a compositeKey (see kvstore application) - result, err := timeoutClient.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") -} + pool.Flush() + }) + t.Run("Tx", func(t *testing.T) { + logger := log.NewTestingLogger(t) -func TestTxSearch(t *testing.T) { - n, conf := NodeSuite(t) - c := getHTTPClient(t, conf) + c := getHTTPClient(t, logger, conf) - // first we broadcast a few txs - for i := 0; i < 10; i++ { + // first we broadcast a tx _, _, tx := MakeTxKV() - _, err := c.BroadcastTxCommit(context.Background(), tx) - require.NoError(t, err) - } - - // since we're not using an isolated test server, we'll have lingering transactions - // from other tests as well - result, err := c.TxSearch(context.Background(), "tx.height >= 0", true, nil, nil, "asc") - require.NoError(t, err) - txCount := len(result.Txs) - - // pick out the last tx to have something to search for in tests - find := result.Txs[len(result.Txs)-1] - anotherTxHash := types.Tx("a different tx").Hash() + bres, err := c.BroadcastTxCommit(ctx, tx) + require.NoError(t, err, "%+v", err) + + txHeight := bres.Height + txHash := bres.Hash + + anotherTxHash := types.Tx("a different tx").Hash() + + cases := []struct { + valid bool + prove bool + hash []byte + }{ + // only valid if correct hash provided + {true, false, txHash}, + {true, true, txHash}, + {false, false, anotherTxHash}, + {false, true, anotherTxHash}, + {false, false, nil}, + {false, true, nil}, + } - for i, c := range GetClients(t, n, conf) { - t.Logf("client %d", i) - - // now we query for the tx. - result, err := c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%v'", find.Hash), true, nil, nil, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 1) - require.Equal(t, find.Hash, result.Txs[0].Hash) - - ptx := result.Txs[0] - assert.EqualValues(t, find.Height, ptx.Height) - assert.EqualValues(t, find.Tx, ptx.Tx) - assert.Zero(t, ptx.Index) - assert.True(t, ptx.TxResult.IsOK()) - assert.EqualValues(t, find.Hash, ptx.Hash) - - // time to verify the proof - if assert.EqualValues(t, find.Tx, ptx.Proof.Data) { - assert.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) + for _, c := range GetClients(t, n, conf) { + t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { + for j, tc := range cases { + t.Run(fmt.Sprintf("Case%d", j), func(t *testing.T) { + // now we query for the tx. + // since there's only one tx, we know index=0. + ptx, err := c.Tx(ctx, tc.hash, tc.prove) + + if !tc.valid { + require.Error(t, err) + } else { + require.NoError(t, err, "%+v", err) + assert.EqualValues(t, txHeight, ptx.Height) + assert.EqualValues(t, tx, ptx.Tx) + assert.Zero(t, ptx.Index) + assert.True(t, ptx.TxResult.IsOK()) + assert.EqualValues(t, txHash, ptx.Hash) + + // time to verify the proof + proof := ptx.Proof + if tc.prove && assert.EqualValues(t, tx, proof.Data) { + assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) + } + } + }) + } + }) } + }) + t.Run("TxSearchWithTimeout", func(t *testing.T) { + logger := log.NewTestingLogger(t) - // query by height - result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 1) + timeoutClient := getHTTPClientWithTimeout(t, logger, conf, 10*time.Second) - // query for non existing tx - result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, nil, nil, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 0) + _, _, tx := MakeTxKV() + _, err := timeoutClient.BroadcastTxCommit(ctx, tx) + require.NoError(t, err) // query using a compositeKey (see kvstore application) - result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") - - // query using an index key - result, err = c.TxSearch(context.Background(), "app.index_key='index is working'", false, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") - - // query using an noindex key - result, err = c.TxSearch(context.Background(), "app.noindex_key='index is working'", false, nil, nil, "asc") - require.Nil(t, err) - require.Equal(t, len(result.Txs), 0, "expected a lot of transactions") - - // query using a compositeKey (see kvstore application) and height - result, err = c.TxSearch(context.Background(), - "app.creator='Cosmoshi Netowoko' AND tx.height<10000", true, nil, nil, "asc") - require.Nil(t, err) + result, err := timeoutClient.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") + require.NoError(t, err) require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + }) + t.Run("TxSearch", func(t *testing.T) { + t.Skip("Test Asserts Non-Deterministic Results") + logger := log.NewTestingLogger(t) - // query a non existing tx with page 1 and txsPerPage 1 - perPage := 1 - result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Neetowoko'", true, nil, &perPage, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 0) - - // check sorting - result, err = c.TxSearch(context.Background(), "tx.height >= 1", false, nil, nil, "asc") - require.Nil(t, err) - for k := 0; k < len(result.Txs)-1; k++ { - require.LessOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) - require.LessOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) - } + c := getHTTPClient(t, logger, conf) - result, err = c.TxSearch(context.Background(), "tx.height >= 1", false, nil, nil, "desc") - require.Nil(t, err) - for k := 0; k < len(result.Txs)-1; k++ { - require.GreaterOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) - require.GreaterOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) - } - // check pagination - perPage = 3 - var ( - seen = map[int64]bool{} - maxHeight int64 - pages = int(math.Ceil(float64(txCount) / float64(perPage))) - ) - - for page := 1; page <= pages; page++ { - page := page - result, err := c.TxSearch(context.Background(), "tx.height >= 1", false, &page, &perPage, "asc") + // first we broadcast a few txs + for i := 0; i < 10; i++ { + _, _, tx := MakeTxKV() + _, err := c.BroadcastTxSync(ctx, tx) require.NoError(t, err) - if page < pages { - require.Len(t, result.Txs, perPage) - } else { - require.LessOrEqual(t, len(result.Txs), perPage) - } - require.Equal(t, txCount, result.TotalCount) - for _, tx := range result.Txs { - require.False(t, seen[tx.Height], - "Found duplicate height %v in page %v", tx.Height, page) - require.Greater(t, tx.Height, maxHeight, - "Found decreasing height %v (max seen %v) in page %v", tx.Height, maxHeight, page) - seen[tx.Height] = true - maxHeight = tx.Height - } } - require.Len(t, seen, txCount) - } -} -func TestBatchedJSONRPCCalls(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + // since we're not using an isolated test server, we'll have lingering transactions + // from other tests as well + result, err := c.TxSearch(ctx, "tx.height >= 0", true, nil, nil, "asc") + require.NoError(t, err) + txCount := len(result.Txs) + + // pick out the last tx to have something to search for in tests + find := result.Txs[len(result.Txs)-1] + anotherTxHash := types.Tx("a different tx").Hash() + + for _, c := range GetClients(t, n, conf) { + t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { + // now we query for the tx. + result, err := c.TxSearch(ctx, fmt.Sprintf("tx.hash='%v'", find.Hash), true, nil, nil, "asc") + require.NoError(t, err) + require.Len(t, result.Txs, 1) + require.Equal(t, find.Hash, result.Txs[0].Hash) + + ptx := result.Txs[0] + assert.EqualValues(t, find.Height, ptx.Height) + assert.EqualValues(t, find.Tx, ptx.Tx) + assert.Zero(t, ptx.Index) + assert.True(t, ptx.TxResult.IsOK()) + assert.EqualValues(t, find.Hash, ptx.Hash) - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - testBatchedJSONRPCCalls(ctx, t, c) + // time to verify the proof + if assert.EqualValues(t, find.Tx, ptx.Proof.Data) { + assert.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) + } + + // query by height + result, err = c.TxSearch(ctx, fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") + require.NoError(t, err) + require.Len(t, result.Txs, 1) + + // query for non existing tx + result, err = c.TxSearch(ctx, fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, nil, nil, "asc") + require.NoError(t, err) + require.Len(t, result.Txs, 0) + + // query using a compositeKey (see kvstore application) + result, err = c.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") + require.NoError(t, err) + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using an index key + result, err = c.TxSearch(ctx, "app.index_key='index is working'", false, nil, nil, "asc") + require.NoError(t, err) + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using an noindex key + result, err = c.TxSearch(ctx, "app.noindex_key='index is working'", false, nil, nil, "asc") + require.NoError(t, err) + require.Equal(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using a compositeKey (see kvstore application) and height + result, err = c.TxSearch(ctx, + "app.creator='Cosmoshi Netowoko' AND tx.height<10000", true, nil, nil, "asc") + require.NoError(t, err) + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query a non existing tx with page 1 and txsPerPage 1 + perPage := 1 + result, err = c.TxSearch(ctx, "app.creator='Cosmoshi Neetowoko'", true, nil, &perPage, "asc") + require.NoError(t, err) + require.Len(t, result.Txs, 0) + + // check sorting + result, err = c.TxSearch(ctx, "tx.height >= 1", false, nil, nil, "asc") + require.NoError(t, err) + for k := 0; k < len(result.Txs)-1; k++ { + require.LessOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) + require.LessOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) + } + + result, err = c.TxSearch(ctx, "tx.height >= 1", false, nil, nil, "desc") + require.NoError(t, err) + for k := 0; k < len(result.Txs)-1; k++ { + require.GreaterOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) + require.GreaterOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) + } + // check pagination + perPage = 3 + var ( + seen = map[int64]bool{} + maxHeight int64 + pages = int(math.Ceil(float64(txCount) / float64(perPage))) + ) + + for page := 1; page <= pages; page++ { + page := page + result, err := c.TxSearch(ctx, "tx.height >= 1", false, &page, &perPage, "asc") + require.NoError(t, err) + if page < pages { + require.Len(t, result.Txs, perPage) + } else { + require.LessOrEqual(t, len(result.Txs), perPage) + } + require.Equal(t, txCount, result.TotalCount) + for _, tx := range result.Txs { + require.False(t, seen[tx.Height], + "Found duplicate height %v in page %v", tx.Height, page) + require.Greater(t, tx.Height, maxHeight, + "Found decreasing height %v (max seen %v) in page %v", tx.Height, maxHeight, page) + seen[tx.Height] = true + maxHeight = tx.Height + } + } + require.Len(t, seen, txCount) + }) + } + }) } func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) { @@ -797,7 +881,7 @@ func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) require.Equal(t, *bresult2, *r2) apph := tmmath.MaxInt64(bresult1.Height, bresult2.Height) + 1 - err = client.WaitForHeight(c, apph, nil) + err = client.WaitForHeight(ctx, c, apph, nil) require.NoError(t, err) q1, err := batch.ABCIQuery(ctx, "/key", k1) @@ -822,60 +906,3 @@ func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) require.Equal(t, qresult1.Response.Value, v1) require.Equal(t, qresult2.Response.Value, v2) } - -func TestBatchedJSONRPCCallsCancellation(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - _, _, tx1 := MakeTxKV() - _, _, tx2 := MakeTxKV() - - batch := c.NewBatch() - _, err := batch.BroadcastTxCommit(ctx, tx1) - require.NoError(t, err) - _, err = batch.BroadcastTxCommit(ctx, tx2) - require.NoError(t, err) - // we should have 2 requests waiting - require.Equal(t, 2, batch.Count()) - // we want to make sure we cleared 2 pending requests - require.Equal(t, 2, batch.Clear()) - // now there should be no batched requests - require.Equal(t, 0, batch.Count()) -} - -func TestSendingEmptyRequestBatch(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - batch := c.NewBatch() - _, err := batch.Send(ctx) - require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") -} - -func TestClearingEmptyRequestBatch(t *testing.T) { - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - batch := c.NewBatch() - require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") -} - -func TestConcurrentJSONRPCBatching(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - var wg sync.WaitGroup - c := getHTTPClient(t, conf) - for i := 0; i < 50; i++ { - wg.Add(1) - go func() { - defer wg.Done() - testBatchedJSONRPCCalls(ctx, t, c) - }() - } - wg.Wait() -} diff --git a/rpc/coretypes/requests.go b/rpc/coretypes/requests.go new file mode 100644 index 0000000000..ed90a11655 --- /dev/null +++ b/rpc/coretypes/requests.go @@ -0,0 +1,190 @@ +package coretypes + +import ( + "encoding/json" + "strconv" + "time" + + "github.com/tendermint/tendermint/internal/jsontypes" + "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/types" +) + +type RequestSubscribe struct { + Query string `json:"query"` +} + +type RequestUnsubscribe struct { + Query string `json:"query"` +} + +type RequestBlockchainInfo struct { + MinHeight Int64 `json:"minHeight"` + MaxHeight Int64 `json:"maxHeight"` +} + +type RequestGenesisChunked struct { + Chunk Int64 `json:"chunk"` +} + +type RequestBlockInfo struct { + Height *Int64 `json:"height"` +} + +type RequestBlockByHash struct { + Hash bytes.HexBytes `json:"hash"` +} + +type RequestCheckTx struct { + Tx types.Tx `json:"tx"` +} + +type RequestRemoveTx struct { + TxKey types.TxKey `json:"txkey"` +} + +type RequestTx struct { + Hash bytes.HexBytes `json:"hash"` + Prove bool `json:"prove"` +} + +type RequestTxSearch struct { + Query string `json:"query"` + Prove bool `json:"prove"` + Page *Int64 `json:"page"` + PerPage *Int64 `json:"per_page"` + OrderBy string `json:"order_by"` +} + +type RequestBlockSearch struct { + Query string `json:"query"` + Page *Int64 `json:"page"` + PerPage *Int64 `json:"per_page"` + OrderBy string `json:"order_by"` +} + +type RequestValidators struct { + Height *Int64 `json:"height"` + Page *Int64 `json:"page"` + PerPage *Int64 `json:"per_page"` + + RequestQuorumInfo *bool `json:"request_quorum_info"` +} + +type RequestConsensusParams struct { + Height *Int64 `json:"height"` +} + +type RequestUnconfirmedTxs struct { + Page *Int64 `json:"page"` + PerPage *Int64 `json:"per_page"` +} + +type RequestBroadcastTx struct { + Tx types.Tx `json:"tx"` +} + +type RequestABCIQuery struct { + Path string `json:"path"` + Data bytes.HexBytes `json:"data"` + Height Int64 `json:"height"` + Prove bool `json:"prove"` +} + +type RequestBroadcastEvidence struct { + Evidence types.Evidence +} + +type requestBroadcastEvidenceJSON struct { + Evidence json.RawMessage `json:"evidence"` +} + +func (r RequestBroadcastEvidence) MarshalJSON() ([]byte, error) { + ev, err := jsontypes.Marshal(r.Evidence) + if err != nil { + return nil, err + } + return json.Marshal(requestBroadcastEvidenceJSON{ + Evidence: ev, + }) +} + +func (r *RequestBroadcastEvidence) UnmarshalJSON(data []byte) error { + var val requestBroadcastEvidenceJSON + if err := json.Unmarshal(data, &val); err != nil { + return err + } + if err := jsontypes.Unmarshal(val.Evidence, &r.Evidence); err != nil { + return err + } + return nil +} + +// RequestEvents is the argument for the "/events" RPC endpoint. +type RequestEvents struct { + // Optional filter spec. If nil or empty, all items are eligible. + Filter *EventFilter `json:"filter"` + + // The maximum number of eligible items to return. + // If zero or negative, the server will report a default number. + MaxItems int `json:"maxItems"` + + // Return only items after this cursor. If empty, the limit is just + // before the the beginning of the event log. + After string `json:"after"` + + // Return only items before this cursor. If empty, the limit is just + // after the head of the event log. + Before string `json:"before"` + + // Wait for up to this long for events to be available. + WaitTime time.Duration `json:"waitTime"` +} + +// An EventFilter specifies which events are selected by an /events request. +type EventFilter struct { + Query string `json:"query"` +} + +// Int64 is a wrapper for int64 that encodes to JSON as a string and can be +// decoded from either a string or a number value. +type Int64 int64 + +func (z *Int64) UnmarshalJSON(data []byte) error { + var s string + if len(data) != 0 && data[0] == '"' { + if err := json.Unmarshal(data, &s); err != nil { + return err + } + } else { + s = string(data) + } + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + *z = Int64(v) + return nil +} + +func (z Int64) MarshalJSON() ([]byte, error) { + return []byte(strconv.FormatInt(int64(z), 10)), nil +} + +// IntPtr returns a pointer to the value of *z as an int, or nil if z == nil. +func (z *Int64) IntPtr() *int { + if z == nil { + return nil + } + v := int(*z) + return &v +} + +// Int64Ptr returns an *Int64 that points to the same value as v, or nil. +func Int64Ptr(v *int) *Int64 { + if v == nil { + return nil + } + z := Int64(*v) + return &z +} diff --git a/rpc/coretypes/responses.go b/rpc/coretypes/responses.go index 739884ba0c..8188986b71 100644 --- a/rpc/coretypes/responses.go +++ b/rpc/coretypes/responses.go @@ -9,6 +9,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/internal/jsontypes" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/types" ) @@ -27,7 +28,7 @@ var ( // List of blocks type ResultBlockchainInfo struct { - LastHeight int64 `json:"last_height"` + LastHeight int64 `json:"last_height,string"` BlockMetas []*types.BlockMeta `json:"block_metas"` } @@ -41,8 +42,8 @@ type ResultGenesis struct { // document to JSON and then splitting the resulting payload into // 16 megabyte blocks and then base64 encoding each block. type ResultGenesisChunk struct { - ChunkNumber int `json:"chunk"` - TotalChunks int `json:"total"` + ChunkNumber int `json:"chunk,string"` + TotalChunks int `json:"total,string"` Data string `json:"data"` } @@ -65,13 +66,13 @@ type ResultCommit struct { // ABCI results from a block type ResultBlockResults struct { - Height int64 `json:"height"` - TxsResults []*abci.ResponseDeliverTx `json:"txs_results"` - TotalGasUsed int64 `json:"total_gas_used"` - BeginBlockEvents []abci.Event `json:"begin_block_events"` - EndBlockEvents []abci.Event `json:"end_block_events"` - ValidatorSetUpdate *abci.ValidatorSetUpdate `json:"validator_set_updates"` - ConsensusParamUpdates *types.ConsensusParams `json:"consensus_param_updates"` + Height int64 `json:"height,string"` + TxsResults []*abci.ExecTxResult `json:"txs_results"` + TotalGasUsed int64 `json:"total_gas_used,string"` + FinalizeBlockEvents []abci.Event `json:"finalize_block_events"` + ValidatorUpdates []abci.ValidatorUpdate `json:"validator_updates"` + ValidatorSetUpdate *abci.ValidatorSetUpdate `json:"validator_set_updates"` + ConsensusParamUpdates *types.ConsensusParams `json:"consensus_param_updates"` } // NewResultCommit is a helper to initialize the ResultCommit with @@ -92,28 +93,32 @@ func NewResultCommit(header *types.Header, commit *types.Commit, type SyncInfo struct { LatestBlockHash bytes.HexBytes `json:"latest_block_hash"` LatestAppHash bytes.HexBytes `json:"latest_app_hash"` - LatestBlockHeight int64 `json:"latest_block_height"` + LatestBlockHeight int64 `json:"latest_block_height,string"` LatestBlockTime time.Time `json:"latest_block_time"` EarliestBlockHash bytes.HexBytes `json:"earliest_block_hash"` EarliestAppHash bytes.HexBytes `json:"earliest_app_hash"` - EarliestBlockHeight int64 `json:"earliest_block_height"` + EarliestBlockHeight int64 `json:"earliest_block_height,string"` EarliestBlockTime time.Time `json:"earliest_block_time"` - MaxPeerBlockHeight int64 `json:"max_peer_block_height"` + MaxPeerBlockHeight int64 `json:"max_peer_block_height,string"` CatchingUp bool `json:"catching_up"` - TotalSyncedTime time.Duration `json:"total_synced_time"` - RemainingTime time.Duration `json:"remaining_time"` + TotalSyncedTime time.Duration `json:"total_synced_time,string"` + RemainingTime time.Duration `json:"remaining_time,string"` - TotalSnapshots int64 `json:"total_snapshots"` - ChunkProcessAvgTime time.Duration `json:"chunk_process_avg_time"` - SnapshotHeight int64 `json:"snapshot_height"` - SnapshotChunksCount int64 `json:"snapshot_chunks_count"` - SnapshotChunksTotal int64 `json:"snapshot_chunks_total"` - BackFilledBlocks int64 `json:"backfilled_blocks"` - BackFillBlocksTotal int64 `json:"backfill_blocks_total"` + TotalSnapshots int64 `json:"total_snapshots,string"` + ChunkProcessAvgTime time.Duration `json:"chunk_process_avg_time,string"` + SnapshotHeight int64 `json:"snapshot_height,string"` + SnapshotChunksCount int64 `json:"snapshot_chunks_count,string"` + SnapshotChunksTotal int64 `json:"snapshot_chunks_total,string"` + BackFilledBlocks int64 `json:"backfilled_blocks,string"` + BackFillBlocksTotal int64 `json:"backfill_blocks_total,string"` +} + +type ApplicationInfo struct { + Version string `json:"version"` } // Info about the node's validator @@ -124,9 +129,11 @@ type ValidatorInfo struct { // Node Status type ResultStatus struct { - NodeInfo types.NodeInfo `json:"node_info"` - SyncInfo SyncInfo `json:"sync_info"` - ValidatorInfo ValidatorInfo `json:"validator_info"` + NodeInfo types.NodeInfo `json:"node_info"` + ApplicationInfo ApplicationInfo `json:"application_info,omitempty"` + SyncInfo SyncInfo `json:"sync_info"` + ValidatorInfo ValidatorInfo `json:"validator_info"` + LightClientInfo types.LightClientInfo `json:"light_client_info,omitempty"` } // Is TxIndexing enabled @@ -141,7 +148,7 @@ func (s *ResultStatus) TxIndexEnabled() bool { type ResultNetInfo struct { Listening bool `json:"listening"` Listeners []string `json:"listeners"` - NPeers int `json:"n_peers"` + NPeers int `json:"n_peers,string"` Peers []Peer `json:"peers"` } @@ -161,22 +168,78 @@ type Peer struct { URL string `json:"url"` } -// Validators for a height. +// ResultValidators for a height. type ResultValidators struct { - BlockHeight int64 `json:"block_height"` - Validators []*types.Validator `json:"validators"` - ThresholdPublicKey *crypto.PubKey `json:"threshold_public_key"` + BlockHeight int64 + Validators []*types.Validator + + Count int // Count of actual validators in this result + Total int // Total number of validators + + // dash fields + ThresholdPublicKey *crypto.PubKey + QuorumType btcjson.LLMQType + QuorumHash *crypto.QuorumHash +} + +type resultValidatorsJSON struct { + BlockHeight int64 `json:"block_height"` + Validators []*types.Validator `json:"validators"` + + Count int `json:"count,string"` // Count of actual validators in this result + Total int `json:"total,string"` // Total number of validators + + // dash fields + ThresholdPublicKey json.RawMessage `json:"threshold_public_key"` QuorumType btcjson.LLMQType `json:"quorum_type"` QuorumHash *crypto.QuorumHash `json:"quorum_hash"` - // Count of actual validators in this result - Count int `json:"count"` - // Total number of validators - Total int `json:"total"` +} + +func (r ResultValidators) MarshalJSON() ([]byte, error) { + res := resultValidatorsJSON{ + BlockHeight: r.BlockHeight, + Validators: r.Validators, + Count: r.Count, + Total: r.Total, + QuorumType: r.QuorumType, + QuorumHash: r.QuorumHash, + } + var err error + if r.ThresholdPublicKey != nil { + res.ThresholdPublicKey, err = jsontypes.Marshal(*r.ThresholdPublicKey) + if err != nil { + return nil, err + } + } + return json.Marshal(res) +} + +func (r *ResultValidators) UnmarshalJSON(data []byte) error { + var res resultValidatorsJSON + err := json.Unmarshal(data, &res) + if err != nil { + return err + } + r.Total = res.Total + r.Count = res.Count + r.BlockHeight = res.BlockHeight + r.Validators = res.Validators + r.QuorumType = res.QuorumType + r.QuorumHash = res.QuorumHash + var thresholdPubKey crypto.PubKey + if res.ThresholdPublicKey != nil { + err = jsontypes.Unmarshal(res.ThresholdPublicKey, &thresholdPubKey) + if err != nil { + return err + } + r.ThresholdPublicKey = &thresholdPubKey + } + return nil } // ConsensusParams for given height type ResultConsensusParams struct { - BlockHeight int64 `json:"block_height"` + BlockHeight int64 `json:"block_height,string"` ConsensusParams types.ConsensusParams `json:"consensus_params"` } @@ -205,16 +268,15 @@ type ResultBroadcastTx struct { Log string `json:"log"` Codespace string `json:"codespace"` MempoolError string `json:"mempool_error"` - - Hash bytes.HexBytes `json:"hash"` + Hash bytes.HexBytes `json:"hash"` } // CheckTx and DeliverTx results type ResultBroadcastTxCommit struct { - CheckTx abci.ResponseCheckTx `json:"check_tx"` - DeliverTx abci.ResponseDeliverTx `json:"deliver_tx"` - Hash bytes.HexBytes `json:"hash"` - Height int64 `json:"height"` + CheckTx abci.ResponseCheckTx `json:"check_tx"` + TxResult abci.ExecTxResult `json:"tx_result"` + Hash bytes.HexBytes `json:"hash"` + Height int64 `json:"height,string"` } // ResultCheckTx wraps abci.ResponseCheckTx. @@ -224,31 +286,31 @@ type ResultCheckTx struct { // Result of querying for a tx type ResultTx struct { - Hash bytes.HexBytes `json:"hash"` - Height int64 `json:"height"` - Index uint32 `json:"index"` - TxResult abci.ResponseDeliverTx `json:"tx_result"` - Tx types.Tx `json:"tx"` - Proof types.TxProof `json:"proof,omitempty"` + Hash bytes.HexBytes `json:"hash"` + Height int64 `json:"height,string"` + Index uint32 `json:"index"` + TxResult abci.ExecTxResult `json:"tx_result"` + Tx types.Tx `json:"tx"` + Proof types.TxProof `json:"proof,omitempty"` } // Result of searching for txs type ResultTxSearch struct { Txs []*ResultTx `json:"txs"` - TotalCount int `json:"total_count"` + TotalCount int `json:"total_count,string"` } // ResultBlockSearch defines the RPC response type for a block search by events. type ResultBlockSearch struct { Blocks []*ResultBlock `json:"blocks"` - TotalCount int `json:"total_count"` + TotalCount int `json:"total_count,string"` } // List of mempool txs type ResultUnconfirmedTxs struct { - Count int `json:"n_txs"` - Total int `json:"total"` - TotalBytes int64 `json:"total_bytes"` + Count int `json:"n_txs,string"` + Total int `json:"total,string"` + TotalBytes int64 `json:"total_bytes,string"` Txs []types.Tx `json:"txs"` } @@ -278,8 +340,89 @@ type ( // Event data from a subscription type ResultEvent struct { - SubscriptionID string `json:"subscription_id"` - Query string `json:"query"` - Data types.TMEventData `json:"data"` - Events []abci.Event `json:"events"` + SubscriptionID string + Query string + Data types.EventData + Events []abci.Event +} + +type resultEventJSON struct { + SubscriptionID string `json:"subscription_id"` + Query string `json:"query"` + Data json.RawMessage `json:"data"` + Events []abci.Event `json:"events"` +} + +func (r ResultEvent) MarshalJSON() ([]byte, error) { + evt, err := jsontypes.Marshal(r.Data) + if err != nil { + return nil, err + } + return json.Marshal(resultEventJSON{ + SubscriptionID: r.SubscriptionID, + Query: r.Query, + Data: evt, + Events: r.Events, + }) +} + +func (r *ResultEvent) UnmarshalJSON(data []byte) error { + var res resultEventJSON + if err := json.Unmarshal(data, &res); err != nil { + return err + } + if err := jsontypes.Unmarshal(res.Data, &r.Data); err != nil { + return err + } + r.SubscriptionID = res.SubscriptionID + r.Query = res.Query + r.Events = res.Events + return nil +} + +// Evidence is an argument wrapper for a types.Evidence value, that handles +// encoding and decoding through JSON. +type Evidence struct { + Value types.Evidence +} + +func (e Evidence) MarshalJSON() ([]byte, error) { return jsontypes.Marshal(e.Value) } +func (e *Evidence) UnmarshalJSON(data []byte) error { return jsontypes.Unmarshal(data, &e.Value) } + +// ResultEvents is the response from the "/events" RPC endpoint. +type ResultEvents struct { + // The items matching the request parameters, from newest + // to oldest, if any were available within the timeout. + Items []*EventItem `json:"items"` + + // This is true if there is at least one older matching item + // available in the log that was not returned. + More bool `json:"more"` + + // The cursor of the oldest item in the log at the time of this reply, + // or "" if the log is empty. + Oldest string `json:"oldest"` + + // The cursor of the newest item in the log at the time of this reply, + // or "" if the log is empty. + Newest string `json:"newest"` +} + +type EventItem struct { + // The cursor of this item. + Cursor string `json:"cursor"` + + // The event label of this item (for example, "Vote"). + Event string `json:"event,omitempty"` + + // The encoded event data for this item. The content is a JSON object with + // the following structure: + // + // { + // "type": "type-tag", + // "value": + // } + // + // The known type tags are defined by the tendermint/types package. + Data json.RawMessage `json:"data"` } diff --git a/rpc/coretypes/responses_test.go b/rpc/coretypes/responses_test.go index a85f3f7775..d4ced795a4 100644 --- a/rpc/coretypes/responses_test.go +++ b/rpc/coretypes/responses_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/types" ) diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go deleted file mode 100644 index 4a4a136877..0000000000 --- a/rpc/grpc/api.go +++ /dev/null @@ -1,41 +0,0 @@ -package coregrpc - -import ( - "context" - - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/internal/rpc/core" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" -) - -type broadcastAPI struct { - env *core.Environment -} - -func (bapi *broadcastAPI) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { - // kvstore so we can check if the server is up - return &ResponsePing{}, nil -} - -// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36. -func (bapi *broadcastAPI) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { - // NOTE: there's no way to get client's remote address - // see https://stackoverflow.com/questions/33684570/session-and-remote-ip-address-in-grpc-go - res, err := bapi.env.BroadcastTxCommit(&rpctypes.Context{}, req.Tx) - if err != nil { - return nil, err - } - - return &ResponseBroadcastTx{ - CheckTx: &abci.ResponseCheckTx{ - Code: res.CheckTx.Code, - Data: res.CheckTx.Data, - Log: res.CheckTx.Log, - }, - DeliverTx: &abci.ResponseDeliverTx{ - Code: res.DeliverTx.Code, - Data: res.DeliverTx.Data, - Log: res.DeliverTx.Log, - }, - }, nil -} diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go deleted file mode 100644 index 2b6ea2b91e..0000000000 --- a/rpc/grpc/client_server.go +++ /dev/null @@ -1,44 +0,0 @@ -package coregrpc - -import ( - "context" - "net" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/tendermint/tendermint/internal/rpc/core" - tmnet "github.com/tendermint/tendermint/libs/net" -) - -// Config is an gRPC server configuration. -type Config struct { - MaxOpenConnections int -} - -// StartGRPCServer starts a new gRPC BroadcastAPIServer using the given -// net.Listener. -// NOTE: This function blocks - you may want to call it in a go-routine. -// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36 -func StartGRPCServer(env *core.Environment, ln net.Listener) error { - grpcServer := grpc.NewServer() - RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{env: env}) - return grpcServer.Serve(ln) -} - -// StartGRPCClient dials the gRPC server using protoAddr and returns a new -// BroadcastAPIClient. -func StartGRPCClient(protoAddr string) BroadcastAPIClient { - conn, err := grpc.Dial(protoAddr, - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithContextDialer(dialerFunc), - ) - if err != nil { - panic(err) - } - return NewBroadcastAPIClient(conn) -} - -func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { - return tmnet.Connect(addr) -} diff --git a/rpc/grpc/grpc_test.go b/rpc/grpc/grpc_test.go deleted file mode 100644 index 97b4faaf96..0000000000 --- a/rpc/grpc/grpc_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package coregrpc_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/libs/service" - coregrpc "github.com/tendermint/tendermint/rpc/grpc" - rpctest "github.com/tendermint/tendermint/rpc/test" -) - -func NodeSuite(t *testing.T) (service.Service, *config.Config) { - t.Helper() - - ctx, cancel := context.WithCancel(context.Background()) - - conf, err := rpctest.CreateConfig(t.Name()) - require.NoError(t, err) - - // start a tendermint node in the background to test against - app := kvstore.NewApplication() - - node, closer, err := rpctest.StartTendermint(ctx, conf, app) - require.NoError(t, err) - t.Cleanup(func() { - _ = closer(ctx) - cancel() - }) - return node, conf -} - -func TestBroadcastTx(t *testing.T) { - _, conf := NodeSuite(t) - - res, err := rpctest.GetGRPCClient(conf).BroadcastTx( - context.Background(), - &coregrpc.RequestBroadcastTx{Tx: []byte("this is a tx")}, - ) - require.NoError(t, err) - require.EqualValues(t, 0, res.CheckTx.Code) - require.EqualValues(t, 0, res.DeliverTx.Code) -} diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go deleted file mode 100644 index b9cbee03fc..0000000000 --- a/rpc/grpc/types.pb.go +++ /dev/null @@ -1,924 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/rpc/grpc/types.proto - -package coregrpc - -import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - types "github.com/tendermint/tendermint/abci/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type RequestPing struct { -} - -func (m *RequestPing) Reset() { *m = RequestPing{} } -func (m *RequestPing) String() string { return proto.CompactTextString(m) } -func (*RequestPing) ProtoMessage() {} -func (*RequestPing) Descriptor() ([]byte, []int) { - return fileDescriptor_0ffff5682c662b95, []int{0} -} -func (m *RequestPing) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestPing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestPing.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestPing) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestPing.Merge(m, src) -} -func (m *RequestPing) XXX_Size() int { - return m.Size() -} -func (m *RequestPing) XXX_DiscardUnknown() { - xxx_messageInfo_RequestPing.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestPing proto.InternalMessageInfo - -type RequestBroadcastTx struct { - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` -} - -func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} } -func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) } -func (*RequestBroadcastTx) ProtoMessage() {} -func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { - return fileDescriptor_0ffff5682c662b95, []int{1} -} -func (m *RequestBroadcastTx) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestBroadcastTx.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestBroadcastTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestBroadcastTx.Merge(m, src) -} -func (m *RequestBroadcastTx) XXX_Size() int { - return m.Size() -} -func (m *RequestBroadcastTx) XXX_DiscardUnknown() { - xxx_messageInfo_RequestBroadcastTx.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestBroadcastTx proto.InternalMessageInfo - -func (m *RequestBroadcastTx) GetTx() []byte { - if m != nil { - return m.Tx - } - return nil -} - -type ResponsePing struct { -} - -func (m *ResponsePing) Reset() { *m = ResponsePing{} } -func (m *ResponsePing) String() string { return proto.CompactTextString(m) } -func (*ResponsePing) ProtoMessage() {} -func (*ResponsePing) Descriptor() ([]byte, []int) { - return fileDescriptor_0ffff5682c662b95, []int{2} -} -func (m *ResponsePing) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponsePing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponsePing.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponsePing) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponsePing.Merge(m, src) -} -func (m *ResponsePing) XXX_Size() int { - return m.Size() -} -func (m *ResponsePing) XXX_DiscardUnknown() { - xxx_messageInfo_ResponsePing.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponsePing proto.InternalMessageInfo - -type ResponseBroadcastTx struct { - CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx,proto3" json:"check_tx,omitempty"` - DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx,proto3" json:"deliver_tx,omitempty"` -} - -func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } -func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } -func (*ResponseBroadcastTx) ProtoMessage() {} -func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { - return fileDescriptor_0ffff5682c662b95, []int{3} -} -func (m *ResponseBroadcastTx) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseBroadcastTx.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseBroadcastTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseBroadcastTx.Merge(m, src) -} -func (m *ResponseBroadcastTx) XXX_Size() int { - return m.Size() -} -func (m *ResponseBroadcastTx) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseBroadcastTx.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseBroadcastTx proto.InternalMessageInfo - -func (m *ResponseBroadcastTx) GetCheckTx() *types.ResponseCheckTx { - if m != nil { - return m.CheckTx - } - return nil -} - -func (m *ResponseBroadcastTx) GetDeliverTx() *types.ResponseDeliverTx { - if m != nil { - return m.DeliverTx - } - return nil -} - -func init() { - proto.RegisterType((*RequestPing)(nil), "tendermint.rpc.grpc.RequestPing") - proto.RegisterType((*RequestBroadcastTx)(nil), "tendermint.rpc.grpc.RequestBroadcastTx") - proto.RegisterType((*ResponsePing)(nil), "tendermint.rpc.grpc.ResponsePing") - proto.RegisterType((*ResponseBroadcastTx)(nil), "tendermint.rpc.grpc.ResponseBroadcastTx") -} - -func init() { proto.RegisterFile("tendermint/rpc/grpc/types.proto", fileDescriptor_0ffff5682c662b95) } - -var fileDescriptor_0ffff5682c662b95 = []byte{ - // 316 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2f, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x2a, 0x48, 0xd6, 0x4f, 0x07, 0x11, 0x25, 0x95, - 0x05, 0xa9, 0xc5, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xc2, 0x08, 0x05, 0x7a, 0x45, 0x05, - 0xc9, 0x7a, 0x20, 0x05, 0x52, 0xd2, 0x48, 0xba, 0x12, 0x93, 0x92, 0x33, 0x91, 0x75, 0x28, 0xf1, - 0x72, 0x71, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x04, 0x64, 0xe6, 0xa5, 0x2b, 0xa9, 0x70, - 0x09, 0x41, 0xb9, 0x4e, 0x45, 0xf9, 0x89, 0x29, 0xc9, 0x89, 0xc5, 0x25, 0x21, 0x15, 0x42, 0x7c, - 0x5c, 0x4c, 0x25, 0x15, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x4c, 0x25, 0x15, 0x4a, 0x7c, - 0x5c, 0x3c, 0x41, 0xa9, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x60, 0x5d, 0x53, 0x19, 0xb9, 0x84, - 0x61, 0x02, 0xc8, 0xfa, 0xac, 0xb9, 0x38, 0x92, 0x33, 0x52, 0x93, 0xb3, 0xe3, 0xa1, 0xba, 0xb9, - 0x8d, 0x14, 0xf4, 0x90, 0x5c, 0x08, 0x72, 0x8c, 0x1e, 0x4c, 0x9f, 0x33, 0x48, 0x61, 0x48, 0x45, - 0x10, 0x7b, 0x32, 0x84, 0x21, 0xe4, 0xc8, 0xc5, 0x95, 0x92, 0x9a, 0x93, 0x59, 0x96, 0x5a, 0x04, - 0xd2, 0xce, 0x04, 0xd6, 0xae, 0x84, 0x53, 0xbb, 0x0b, 0x44, 0x69, 0x48, 0x45, 0x10, 0x67, 0x0a, - 0x8c, 0x69, 0xb4, 0x97, 0x91, 0x8b, 0x07, 0xee, 0x1e, 0xc7, 0x00, 0x4f, 0x21, 0x6f, 0x2e, 0x16, - 0x90, 0x83, 0x85, 0x50, 0x9c, 0x01, 0x0b, 0x28, 0x3d, 0xa4, 0x80, 0x90, 0x52, 0xc4, 0xa1, 0x02, - 0xe1, 0x6b, 0xa1, 0x04, 0x2e, 0x6e, 0x64, 0xcf, 0xaa, 0xe3, 0x33, 0x13, 0x49, 0xa1, 0x94, 0x06, - 0x5e, 0xa3, 0x91, 0x54, 0x3a, 0xf9, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, - 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, - 0x94, 0x51, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0x52, 0xf4, 0x62, - 0x49, 0x1f, 0xd6, 0xc9, 0xf9, 0x45, 0xa9, 0x20, 0x46, 0x12, 0x1b, 0x38, 0xc6, 0x8d, 0x01, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xf6, 0x4b, 0x02, 0xd8, 0x46, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// BroadcastAPIClient is the client API for BroadcastAPI service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type BroadcastAPIClient interface { - Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) - BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) -} - -type broadcastAPIClient struct { - cc *grpc.ClientConn -} - -func NewBroadcastAPIClient(cc *grpc.ClientConn) BroadcastAPIClient { - return &broadcastAPIClient{cc} -} - -func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { - out := new(ResponsePing) - err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/Ping", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { - out := new(ResponseBroadcastTx) - err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// BroadcastAPIServer is the server API for BroadcastAPI service. -type BroadcastAPIServer interface { - Ping(context.Context, *RequestPing) (*ResponsePing, error) - BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) -} - -// UnimplementedBroadcastAPIServer can be embedded to have forward compatible implementations. -type UnimplementedBroadcastAPIServer struct { -} - -func (*UnimplementedBroadcastAPIServer) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { - return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") -} -func (*UnimplementedBroadcastAPIServer) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { - return nil, status.Errorf(codes.Unimplemented, "method BroadcastTx not implemented") -} - -func RegisterBroadcastAPIServer(s *grpc.Server, srv BroadcastAPIServer) { - s.RegisterService(&_BroadcastAPI_serviceDesc, srv) -} - -func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestPing) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BroadcastAPIServer).Ping(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/Ping", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BroadcastAPIServer).Ping(ctx, req.(*RequestPing)) - } - return interceptor(ctx, in, info, handler) -} - -func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestBroadcastTx) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx)) - } - return interceptor(ctx, in, info, handler) -} - -var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tendermint.rpc.grpc.BroadcastAPI", - HandlerType: (*BroadcastAPIServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Ping", - Handler: _BroadcastAPI_Ping_Handler, - }, - { - MethodName: "BroadcastTx", - Handler: _BroadcastAPI_BroadcastTx_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "tendermint/rpc/grpc/types.proto", -} - -func (m *RequestPing) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *RequestBroadcastTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResponsePing) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponsePing) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DeliverTx != nil { - { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.CheckTx != nil { - { - size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *RequestPing) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *RequestBroadcastTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Tx) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - -func (m *ResponsePing) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *ResponseBroadcastTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.CheckTx != nil { - l = m.CheckTx.Size() - n += 1 + l + sovTypes(uint64(l)) - } - if m.DeliverTx != nil { - l = m.DeliverTx.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - -func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTypes(x uint64) (n int) { - return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *RequestPing) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestPing: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestPing: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestBroadcastTx: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) - if m.Tx == nil { - m.Tx = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponsePing) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponsePing: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponsePing: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseBroadcastTx: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CheckTx == nil { - m.CheckTx = &types.ResponseCheckTx{} - } - if err := m.CheckTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeliverTx == nil { - m.DeliverTx = &types.ResponseDeliverTx{} - } - if err := m.DeliverTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTypes(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTypes - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTypes - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTypes - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") -) diff --git a/rpc/jsonrpc/client/args_test.go b/rpc/jsonrpc/client/args_test.go deleted file mode 100644 index 2506f30734..0000000000 --- a/rpc/jsonrpc/client/args_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package client - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type Tx []byte - -type Foo struct { - Bar int - Baz string -} - -func TestArgToJSON(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - - cases := []struct { - input interface{} - expected string - }{ - {[]byte("1234"), "0x31323334"}, - {Tx("654"), "0x363534"}, - {Foo{7, "hello"}, `{"Bar":"7","Baz":"hello"}`}, - } - - for i, tc := range cases { - args := map[string]interface{}{"data": tc.input} - err := argsToJSON(args) - require.Nil(err, "%d: %+v", i, err) - require.Equal(1, len(args), "%d", i) - data, ok := args["data"].(string) - require.True(ok, "%d: %#v", i, args["data"]) - assert.Equal(tc.expected, data, "%d", i) - } -} diff --git a/rpc/jsonrpc/client/decode.go b/rpc/jsonrpc/client/decode.go index f69926cb74..2babcf70c3 100644 --- a/rpc/jsonrpc/client/decode.go +++ b/rpc/jsonrpc/client/decode.go @@ -2,125 +2,69 @@ package client import ( "encoding/json" - "errors" "fmt" - tmjson "github.com/tendermint/tendermint/libs/json" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) -func unmarshalResponseBytes( - responseBytes []byte, - expectedID rpctypes.JSONRPCIntID, - result interface{}, -) (interface{}, error) { - +func unmarshalResponseBytes(responseBytes []byte, expectedID string, result interface{}) error { // Read response. If rpc/core/types is imported, the result will unmarshal // into the correct type. - response := &rpctypes.RPCResponse{} - if err := json.Unmarshal(responseBytes, response); err != nil { - return nil, fmt.Errorf("error unmarshaling: %w", err) + var response rpctypes.RPCResponse + if err := json.Unmarshal(responseBytes, &response); err != nil { + return fmt.Errorf("unmarshaling response: %w", err) } if response.Error != nil { - return nil, response.Error + return response.Error } - if err := validateAndVerifyID(response, expectedID); err != nil { - return nil, fmt.Errorf("wrong ID: %w", err) + if got := response.ID(); got != expectedID { + return fmt.Errorf("got response ID %q, wanted %q", got, expectedID) } // Unmarshal the RawMessage into the result. - if err := tmjson.Unmarshal(response.Result, result); err != nil { - return nil, fmt.Errorf("error unmarshaling result: %w", err) + if err := json.Unmarshal(response.Result, result); err != nil { + return fmt.Errorf("error unmarshaling result: %w", err) } - - return result, nil + return nil } -func unmarshalResponseBytesArray( - responseBytes []byte, - expectedIDs []rpctypes.JSONRPCIntID, - results []interface{}, -) ([]interface{}, error) { - - var ( - responses []rpctypes.RPCResponse - ) - +func unmarshalResponseBytesArray(responseBytes []byte, expectedIDs []string, results []interface{}) error { + var responses []rpctypes.RPCResponse if err := json.Unmarshal(responseBytes, &responses); err != nil { - return nil, fmt.Errorf("error unmarshaling: %w", err) - } - - // No response error checking here as there may be a mixture of successful - // and unsuccessful responses. - - if len(results) != len(responses) { - return nil, fmt.Errorf( - "expected %d result objects into which to inject responses, but got %d", - len(responses), - len(results), - ) + return fmt.Errorf("unmarshaling responses: %w", err) + } else if len(responses) != len(results) { + return fmt.Errorf("got %d results, wanted %d", len(responses), len(results)) } // Intersect IDs from responses with expectedIDs. - ids := make([]rpctypes.JSONRPCIntID, len(responses)) - var ok bool + ids := make([]string, len(responses)) for i, resp := range responses { - ids[i], ok = resp.ID.(rpctypes.JSONRPCIntID) - if !ok { - return nil, fmt.Errorf("expected JSONRPCIntID, got %T", resp.ID) - } + ids[i] = resp.ID() } if err := validateResponseIDs(ids, expectedIDs); err != nil { - return nil, fmt.Errorf("wrong IDs: %w", err) + return fmt.Errorf("wrong IDs: %w", err) } - for i := 0; i < len(responses); i++ { - if err := tmjson.Unmarshal(responses[i].Result, results[i]); err != nil { - return nil, fmt.Errorf("error unmarshaling #%d result: %w", i, err) + for i, resp := range responses { + if err := json.Unmarshal(resp.Result, results[i]); err != nil { + return fmt.Errorf("unmarshaling result %d: %w", i, err) } } - - return results, nil + return nil } -func validateResponseIDs(ids, expectedIDs []rpctypes.JSONRPCIntID) error { - m := make(map[rpctypes.JSONRPCIntID]bool, len(expectedIDs)) - for _, expectedID := range expectedIDs { - m[expectedID] = true +func validateResponseIDs(ids, expectedIDs []string) error { + m := make(map[string]struct{}, len(expectedIDs)) + for _, id := range expectedIDs { + m[id] = struct{}{} } for i, id := range ids { - if m[id] { - delete(m, id) - } else { - return fmt.Errorf("unsolicited ID #%d: %v", i, id) + if _, ok := m[id]; !ok { + return fmt.Errorf("unexpected response ID %d: %q", i, id) } } - - return nil -} - -// From the JSON-RPC 2.0 spec: -// id: It MUST be the same as the value of the id member in the Request Object. -func validateAndVerifyID(res *rpctypes.RPCResponse, expectedID rpctypes.JSONRPCIntID) error { - if err := validateResponseID(res.ID); err != nil { - return err - } - if expectedID != res.ID.(rpctypes.JSONRPCIntID) { // validateResponseID ensured res.ID has the right type - return fmt.Errorf("response ID (%d) does not match request ID (%d)", res.ID, expectedID) - } - return nil -} - -func validateResponseID(id interface{}) error { - if id == nil { - return errors.New("no ID") - } - _, ok := id.(rpctypes.JSONRPCIntID) - if !ok { - return fmt.Errorf("expected JSONRPCIntID, but got: %T", id) - } return nil } diff --git a/rpc/jsonrpc/client/encode.go b/rpc/jsonrpc/client/encode.go deleted file mode 100644 index e085f51a24..0000000000 --- a/rpc/jsonrpc/client/encode.go +++ /dev/null @@ -1,46 +0,0 @@ -package client - -import ( - "fmt" - "net/url" - "reflect" - - tmjson "github.com/tendermint/tendermint/libs/json" -) - -func argsToURLValues(args map[string]interface{}) (url.Values, error) { - values := make(url.Values) - if len(args) == 0 { - return values, nil - } - - err := argsToJSON(args) - if err != nil { - return nil, err - } - - for key, val := range args { - values.Set(key, val.(string)) - } - - return values, nil -} - -func argsToJSON(args map[string]interface{}) error { - for k, v := range args { - rt := reflect.TypeOf(v) - isByteSlice := rt.Kind() == reflect.Slice && rt.Elem().Kind() == reflect.Uint8 - if isByteSlice { - bytes := reflect.ValueOf(v).Bytes() - args[k] = fmt.Sprintf("0x%X", bytes) - continue - } - - data, err := tmjson.Marshal(v) - if err != nil { - return err - } - args[k] = string(data) - } - return nil -} diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index 9c73b8a8c7..c1cad7097e 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -4,15 +4,16 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/url" "strings" + "sync" "time" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -105,15 +106,15 @@ func (u parsedURL) GetTrimmedURL() string { //------------------------------------------------------------- -// HTTPClient is a common interface for JSON-RPC HTTP clients. -type HTTPClient interface { - // Call calls the given method with the params and returns a result. - Call(ctx context.Context, method string, params map[string]interface{}, result interface{}) (interface{}, error) -} - -// Caller implementers can facilitate calling the JSON-RPC endpoint. +// A Caller handles the round trip of a single JSON-RPC request. The +// implementation is responsible for assigning request IDs, marshaling +// parameters, and unmarshaling results. type Caller interface { - Call(ctx context.Context, method string, params map[string]interface{}, result interface{}) (interface{}, error) + // Call sends a new request for method to the server with the given + // parameters. If params == nil, the request has empty parameters. + // If result == nil, any result value must be discarded without error. + // Otherwise the concrete value of result must be a pointer. + Call(ctx context.Context, method string, params, result interface{}) error } //------------------------------------------------------------- @@ -129,12 +130,10 @@ type Client struct { client *http.Client - mtx tmsync.Mutex + mtx sync.Mutex nextReqID int } -var _ HTTPClient = (*Client)(nil) - // Both Client and RequestBatch can facilitate calls to the JSON // RPC endpoint. var _ Caller = (*Client)(nil) @@ -151,11 +150,11 @@ func New(remote string) (*Client, error) { } // NewWithHTTPClient returns a Client pointed at the given address using a -// custom http client. An error is returned on invalid remote. The function -// panics when client is nil. +// custom HTTP client. It reports an error if c == nil or if remote is not a +// valid URL. func NewWithHTTPClient(remote string, c *http.Client) (*Client, error) { if c == nil { - panic("nil http.Client") + return nil, errors.New("nil client") } parsedURL, err := newParsedURL(remote) @@ -181,28 +180,23 @@ func NewWithHTTPClient(remote string, c *http.Client) (*Client, error) { // Call issues a POST HTTP request. Requests are JSON encoded. Content-Type: // application/json. -func (c *Client) Call( - ctx context.Context, - method string, - params map[string]interface{}, - result interface{}, -) (interface{}, error) { +func (c *Client) Call(ctx context.Context, method string, params, result interface{}) error { id := c.nextRequestID() - request, err := rpctypes.MapToRequest(id, method, params) - if err != nil { - return nil, fmt.Errorf("failed to encode params: %w", err) + request := rpctypes.NewRequest(id) + if err := request.SetMethodAndParams(method, params); err != nil { + return fmt.Errorf("failed to encode params: %w", err) } requestBytes, err := json.Marshal(request) if err != nil { - return nil, fmt.Errorf("failed to marshal request: %w", err) + return fmt.Errorf("failed to marshal request: %w", err) } requestBuf := bytes.NewBuffer(requestBytes) httpRequest, err := http.NewRequestWithContext(ctx, http.MethodPost, c.address, requestBuf) if err != nil { - return nil, fmt.Errorf("request setup failed: %w", err) + return fmt.Errorf("request setup failed: %w", err) } httpRequest.Header.Set("Content-Type", "application/json") @@ -213,17 +207,16 @@ func (c *Client) Call( httpResponse, err := c.client.Do(httpRequest) if err != nil { - return nil, err + return err } - defer httpResponse.Body.Close() - - responseBytes, err := ioutil.ReadAll(httpResponse.Body) + responseBytes, err := io.ReadAll(httpResponse.Body) + httpResponse.Body.Close() if err != nil { - return nil, fmt.Errorf("failed to read response body: %w", err) + return fmt.Errorf("reading response body: %w", err) } - return unmarshalResponseBytes(responseBytes, id, result) + return unmarshalResponseBytes(responseBytes, request.ID(), result) } // NewRequestBatch starts a batch of requests for this client. @@ -264,28 +257,30 @@ func (c *Client) sendBatch(ctx context.Context, requests []*jsonRPCBufferedReque return nil, fmt.Errorf("post: %w", err) } - defer httpResponse.Body.Close() - - responseBytes, err := ioutil.ReadAll(httpResponse.Body) + responseBytes, err := io.ReadAll(httpResponse.Body) + httpResponse.Body.Close() if err != nil { - return nil, fmt.Errorf("read response body: %w", err) + return nil, fmt.Errorf("reading response body: %w", err) } // collect ids to check responses IDs in unmarshalResponseBytesArray - ids := make([]rpctypes.JSONRPCIntID, len(requests)) + ids := make([]string, len(requests)) for i, req := range requests { - ids[i] = req.request.ID.(rpctypes.JSONRPCIntID) + ids[i] = req.request.ID() } - return unmarshalResponseBytesArray(responseBytes, ids, results) + if err := unmarshalResponseBytesArray(responseBytes, ids, results); err != nil { + return nil, err + } + return results, nil } -func (c *Client) nextRequestID() rpctypes.JSONRPCIntID { +func (c *Client) nextRequestID() int { c.mtx.Lock() + defer c.mtx.Unlock() id := c.nextReqID c.nextReqID++ - c.mtx.Unlock() - return rpctypes.JSONRPCIntID(id) + return id } //------------------------------------------------------------------------------------ @@ -303,7 +298,7 @@ type jsonRPCBufferedRequest struct { type RequestBatch struct { client *Client - mtx tmsync.Mutex + mtx sync.Mutex requests []*jsonRPCBufferedRequest } @@ -347,19 +342,13 @@ func (b *RequestBatch) Send(ctx context.Context) ([]interface{}, error) { // Call enqueues a request to call the given RPC method with the specified // parameters, in the same way that the `Client.Call` function would. -func (b *RequestBatch) Call( - _ context.Context, - method string, - params map[string]interface{}, - result interface{}, -) (interface{}, error) { - id := b.client.nextRequestID() - request, err := rpctypes.MapToRequest(id, method, params) - if err != nil { - return nil, err +func (b *RequestBatch) Call(_ context.Context, method string, params, result interface{}) error { + request := rpctypes.NewRequest(b.client.nextRequestID()) + if err := request.SetMethodAndParams(method, params); err != nil { + return err } b.enqueue(&jsonRPCBufferedRequest{request: request, result: result}) - return result, nil + return nil } //------------------------------------------------------------- diff --git a/rpc/jsonrpc/client/http_json_client_test.go b/rpc/jsonrpc/client/http_json_client_test.go index 5a03af5125..d6b455d63b 100644 --- a/rpc/jsonrpc/client/http_json_client_test.go +++ b/rpc/jsonrpc/client/http_json_client_test.go @@ -1,7 +1,7 @@ package client import ( - "io/ioutil" + "io" "log" "net/http" "net/http/httptest" @@ -21,13 +21,13 @@ func TestHTTPClientMakeHTTPDialer(t *testing.T) { defer tsTLS.Close() // This silences a TLS handshake error, caused by the dialer just immediately // disconnecting, which we can just ignore. - tsTLS.Config.ErrorLog = log.New(ioutil.Discard, "", 0) + tsTLS.Config.ErrorLog = log.New(io.Discard, "", 0) for _, testURL := range []string{ts.URL, tsTLS.URL} { u, err := newParsedURL(testURL) require.NoError(t, err) dialFn, err := makeHTTPDialer(testURL) - require.Nil(t, err) + require.NoError(t, err) addr, err := dialFn(u.Scheme, u.GetHostWithPath()) require.NoError(t, err) @@ -92,7 +92,7 @@ func TestMakeHTTPDialerURL(t *testing.T) { u, err := newParsedURL(remote) require.NoError(t, err) dialFn, err := makeHTTPDialer(remote) - require.Nil(t, err) + require.NoError(t, err) addr, err := dialFn(u.Scheme, u.GetHostWithPath()) require.NoError(t, err) @@ -105,7 +105,7 @@ func TestMakeHTTPDialerURL(t *testing.T) { u, err := newParsedURL(errorURL) require.NoError(t, err) dialFn, err := makeHTTPDialer(errorURL) - require.Nil(t, err) + require.NoError(t, err) addr, err := dialFn(u.Scheme, u.GetHostWithPath()) require.Error(t, err) diff --git a/rpc/jsonrpc/client/http_uri_client.go b/rpc/jsonrpc/client/http_uri_client.go deleted file mode 100644 index cd4ff06868..0000000000 --- a/rpc/jsonrpc/client/http_uri_client.go +++ /dev/null @@ -1,85 +0,0 @@ -package client - -import ( - "context" - "fmt" - "io/ioutil" - "net/http" - "strings" - - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" -) - -const ( - // URIClientRequestID in a request ID used by URIClient - URIClientRequestID = rpctypes.JSONRPCIntID(-1) -) - -// URIClient is a JSON-RPC client, which sends POST form HTTP requests to the -// remote server. -// -// URIClient is safe for concurrent use by multiple goroutines. -type URIClient struct { - address string - client *http.Client -} - -var _ HTTPClient = (*URIClient)(nil) - -// NewURI returns a new client. -// An error is returned on invalid remote. -// The function panics when remote is nil. -func NewURI(remote string) (*URIClient, error) { - parsedURL, err := newParsedURL(remote) - if err != nil { - return nil, err - } - - httpClient, err := DefaultHTTPClient(remote) - if err != nil { - return nil, err - } - - parsedURL.SetDefaultSchemeHTTP() - - uriClient := &URIClient{ - address: parsedURL.GetTrimmedURL(), - client: httpClient, - } - - return uriClient, nil -} - -// Call issues a POST form HTTP request. -func (c *URIClient) Call(ctx context.Context, method string, - params map[string]interface{}, result interface{}) (interface{}, error) { - - values, err := argsToURLValues(params) - if err != nil { - return nil, fmt.Errorf("failed to encode params: %w", err) - } - - req, err := http.NewRequestWithContext( - ctx, - http.MethodPost, - c.address+"/"+method, - strings.NewReader(values.Encode()), - ) - if err != nil { - return nil, fmt.Errorf("new request: %w", err) - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - resp, err := c.client.Do(req) - if err != nil { - return nil, fmt.Errorf("post: %w", err) - } - defer resp.Body.Close() - - responseBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, fmt.Errorf("read response body: %w", err) - } - - return unmarshalResponseBytes(responseBytes, URIClientRequestID, result) -} diff --git a/rpc/jsonrpc/client/integration_test.go b/rpc/jsonrpc/client/integration_test.go index 26f24d2555..f53b28802d 100644 --- a/rpc/jsonrpc/client/integration_test.go +++ b/rpc/jsonrpc/client/integration_test.go @@ -8,6 +8,7 @@ package client import ( "bytes" + "context" "errors" "net" "regexp" @@ -15,45 +16,36 @@ import ( "time" "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/libs/log" ) func TestWSClientReconnectWithJitter(t *testing.T) { - n := 8 - maxReconnectAttempts := 3 - // Max wait time is ceil(1+0.999) + ceil(2+0.999) + ceil(4+0.999) + ceil(...) = 2 + 3 + 5 = 10s + ... - maxSleepTime := time.Second * time.Duration(((1< 0 { // ticker with a predefined period @@ -369,10 +344,6 @@ func (c *WSClient) writeRoutine() { defer func() { ticker.Stop() c.conn.Close() - // err != nil { - // ignore error; it will trigger in tests - // likely because it's closing an already closed connection - // } c.wg.Done() }() @@ -402,13 +373,9 @@ func (c *WSClient) writeRoutine() { c.reconnectAfter <- err return } - c.mtx.Lock() - c.sentLastPingAt = time.Now() - c.mtx.Unlock() - c.Logger.Debug("sent ping") case <-c.readRoutineQuit: return - case <-c.Quit(): + case <-ctx.Done(): if err := c.conn.WriteMessage( websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), @@ -422,27 +389,12 @@ func (c *WSClient) writeRoutine() { // The client ensures that there is at most one reader to a connection by // executing all reads from this goroutine. -func (c *WSClient) readRoutine() { +func (c *WSClient) readRoutine(ctx context.Context) { defer func() { c.conn.Close() - // err != nil { - // ignore error; it will trigger in tests - // likely because it's closing an already closed connection - // } c.wg.Done() }() - c.conn.SetPongHandler(func(string) error { - // gather latency stats - c.mtx.RLock() - t := c.sentLastPingAt - c.mtx.RUnlock() - c.PingPongLatencyTimer.UpdateSince(t) - - c.Logger.Debug("got pong") - return nil - }) - for { // reset deadline for every message type (control or data) if c.readWait > 0 { @@ -469,24 +421,12 @@ func (c *WSClient) readRoutine() { continue } - if err = validateResponseID(response.ID); err != nil { - c.Logger.Error("error in response ID", "id", response.ID, "err", err) - continue - } - // TODO: events resulting from /subscribe do not work with -> // because they are implemented as responses with the subscribe request's // ID. According to the spec, they should be notifications (requests // without IDs). // https://github.com/tendermint/tendermint/issues/2949 - // c.mtx.Lock() - // if _, ok := c.sentIDs[response.ID.(types.JSONRPCIntID)]; !ok { - // c.Logger.Error("unsolicited response ID", "id", response.ID, "expected", c.sentIDs) - // c.mtx.Unlock() - // continue - // } - // delete(c.sentIDs, response.ID.(types.JSONRPCIntID)) - // c.mtx.Unlock() + // // Combine a non-blocking read on BaseService.Quit with a non-blocking write on ResponsesCh to avoid blocking // c.wg.Wait() in c.Stop(). Note we rely on Quit being closed so that it sends unlimited Quit signals to stop // both readRoutine and writeRoutine @@ -494,7 +434,8 @@ func (c *WSClient) readRoutine() { c.Logger.Info("got response", "id", response.ID, "result", response.Result) select { - case <-c.Quit(): + case <-ctx.Done(): + return case c.ResponsesCh <- response: } } diff --git a/rpc/jsonrpc/client/ws_client_test.go b/rpc/jsonrpc/client/ws_client_test.go index 208313e794..5bbb5fc25a 100644 --- a/rpc/jsonrpc/client/ws_client_test.go +++ b/rpc/jsonrpc/client/ws_client_test.go @@ -9,19 +9,20 @@ import ( "testing" "time" + "github.com/fortytw2/leaktest" "github.com/gorilla/websocket" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/log" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) -var wsCallTimeout = 5 * time.Second +const wsCallTimeout = 5 * time.Second -type myHandler struct { +type myTestHandler struct { closeConnAfterRead bool - mtx tmsync.RWMutex + mtx sync.RWMutex + t *testing.T } var upgrader = websocket.Upgrader{ @@ -29,11 +30,10 @@ var upgrader = websocket.Upgrader{ WriteBufferSize: 1024, } -func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { +func (h *myTestHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { conn, err := upgrader.Upgrade(w, r, nil) - if err != nil { - panic(err) - } + require.NoError(h.t, err) + defer conn.Close() for { messageType, in, err := conn.ReadMessage() @@ -43,20 +43,21 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { var req rpctypes.RPCRequest err = json.Unmarshal(in, &req) - if err != nil { - panic(err) - } + require.NoError(h.t, err) + + func() { + h.mtx.RLock() + defer h.mtx.RUnlock() - h.mtx.RLock() - if h.closeConnAfterRead { - if err := conn.Close(); err != nil { - panic(err) + if h.closeConnAfterRead { + require.NoError(h.t, conn.Close()) } - } - h.mtx.RUnlock() + }() res := json.RawMessage(`{}`) - emptyRespBytes, _ := json.Marshal(rpctypes.RPCResponse{Result: res, ID: req.ID}) + + emptyRespBytes, err := json.Marshal(req.MakeResponse(res)) + require.NoError(h.t, err) if err := conn.WriteMessage(messageType, emptyRespBytes); err != nil { return } @@ -64,25 +65,26 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func TestWSClientReconnectsAfterReadFailure(t *testing.T) { - var wg sync.WaitGroup + t.Cleanup(leaktest.Check(t)) // start server - h := &myHandler{} + h := &myTestHandler{t: t} s := httptest.NewServer(h) defer s.Close() - c := startClient(t, "//"+s.Listener.Addr().String()) - defer c.Stop() // nolint:errcheck // ignore for tests + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + c := startClient(ctx, t, "//"+s.Listener.Addr().String()) - wg.Add(1) - go callWgDoneOnResult(t, c, &wg) + go handleResponses(ctx, t, c) h.mtx.Lock() h.closeConnAfterRead = true h.mtx.Unlock() // results in WS read error, no send retry because write succeeded - call(t, "a", c) + call(ctx, t, "a", c) // expect to reconnect almost immediately time.Sleep(10 * time.Millisecond) @@ -91,23 +93,23 @@ func TestWSClientReconnectsAfterReadFailure(t *testing.T) { h.mtx.Unlock() // should succeed - call(t, "b", c) - - wg.Wait() + call(ctx, t, "b", c) } func TestWSClientReconnectsAfterWriteFailure(t *testing.T) { - var wg sync.WaitGroup + t.Cleanup(leaktest.Check(t)) // start server - h := &myHandler{} + h := &myTestHandler{t: t} s := httptest.NewServer(h) + defer s.Close() - c := startClient(t, "//"+s.Listener.Addr().String()) - defer c.Stop() // nolint:errcheck // ignore for tests + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - wg.Add(2) - go callWgDoneOnResult(t, c, &wg) + c := startClient(ctx, t, "//"+s.Listener.Addr().String()) + + go handleResponses(ctx, t, c) // hacky way to abort the connection before write if err := c.conn.Close(); err != nil { @@ -115,30 +117,32 @@ func TestWSClientReconnectsAfterWriteFailure(t *testing.T) { } // results in WS write error, the client should resend on reconnect - call(t, "a", c) + call(ctx, t, "a", c) // expect to reconnect almost immediately time.Sleep(10 * time.Millisecond) // should succeed - call(t, "b", c) - - wg.Wait() + call(ctx, t, "b", c) } func TestWSClientReconnectFailure(t *testing.T) { + t.Cleanup(leaktest.Check(t)) + // start server - h := &myHandler{} + h := &myTestHandler{t: t} s := httptest.NewServer(h) - c := startClient(t, "//"+s.Listener.Addr().String()) - defer c.Stop() // nolint:errcheck // ignore for tests + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + c := startClient(ctx, t, "//"+s.Listener.Addr().String()) go func() { for { select { case <-c.ResponsesCh: - case <-c.Quit(): + case <-ctx.Done(): return } } @@ -152,9 +156,9 @@ func TestWSClientReconnectFailure(t *testing.T) { // results in WS write error // provide timeout to avoid blocking - ctx, cancel := context.WithTimeout(context.Background(), wsCallTimeout) + cctx, cancel := context.WithTimeout(ctx, wsCallTimeout) defer cancel() - if err := c.Call(ctx, "a", make(map[string]interface{})); err != nil { + if err := c.Call(cctx, "a", make(map[string]interface{})); err != nil { t.Error(err) } @@ -164,7 +168,7 @@ func TestWSClientReconnectFailure(t *testing.T) { done := make(chan struct{}) go func() { // client should block on this - call(t, "b", c) + call(ctx, t, "b", c) close(done) }() @@ -178,44 +182,55 @@ func TestWSClientReconnectFailure(t *testing.T) { } func TestNotBlockingOnStop(t *testing.T) { - timeout := 2 * time.Second - s := httptest.NewServer(&myHandler{}) - c := startClient(t, "//"+s.Listener.Addr().String()) - c.Call(context.Background(), "a", make(map[string]interface{})) // nolint:errcheck // ignore for tests - // Let the readRoutine get around to blocking - time.Sleep(time.Second) - passCh := make(chan struct{}) + t.Cleanup(leaktest.Check(t)) + + s := httptest.NewServer(&myTestHandler{t: t}) + defer s.Close() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + c := startClient(ctx, t, "//"+s.Listener.Addr().String()) + require.NoError(t, c.Call(ctx, "a", make(map[string]interface{}))) + + time.Sleep(200 * time.Millisecond) // give service routines time to start ⚠️ + done := make(chan struct{}) go func() { - // Unless we have a non-blocking write to ResponsesCh from readRoutine - // this blocks forever ont the waitgroup - err := c.Stop() - require.NoError(t, err) - passCh <- struct{}{} + cancel() + if assert.NoError(t, c.Stop()) { + close(done) + } }() select { - case <-passCh: - // Pass - case <-time.After(timeout): - t.Fatalf("WSClient did failed to stop within %v seconds - is one of the read/write routines blocking?", - timeout.Seconds()) + case <-done: + t.Log("Stopped client successfully") + case <-time.After(2 * time.Second): + t.Fatal("Timed out waiting for client to stop") } } -func startClient(t *testing.T, addr string) *WSClient { +func startClient(ctx context.Context, t *testing.T, addr string) *WSClient { + t.Helper() + + t.Cleanup(leaktest.Check(t)) + c, err := NewWS(addr, "/websocket") - require.Nil(t, err) - err = c.Start() - require.Nil(t, err) - c.SetLogger(log.TestingLogger()) + require.NoError(t, err) + require.NoError(t, c.Start(ctx)) return c } -func call(t *testing.T, method string, c *WSClient) { - err := c.Call(context.Background(), method, make(map[string]interface{})) - require.NoError(t, err) +func call(ctx context.Context, t *testing.T, method string, c *WSClient) { + t.Helper() + + err := c.Call(ctx, method, make(map[string]interface{})) + if ctx.Err() == nil { + require.NoError(t, err) + } } -func callWgDoneOnResult(t *testing.T, c *WSClient, wg *sync.WaitGroup) { +func handleResponses(ctx context.Context, t *testing.T, c *WSClient) { + t.Helper() + for { select { case resp := <-c.ResponsesCh: @@ -224,9 +239,9 @@ func callWgDoneOnResult(t *testing.T, c *WSClient, wg *sync.WaitGroup) { return } if resp.Result != nil { - wg.Done() + return } - case <-c.Quit(): + case <-ctx.Done(): return } } diff --git a/rpc/jsonrpc/doc.go b/rpc/jsonrpc/doc.go index 813f055f54..58b522861b 100644 --- a/rpc/jsonrpc/doc.go +++ b/rpc/jsonrpc/doc.go @@ -55,7 +55,7 @@ // Define some routes // // var Routes = map[string]*rpcserver.RPCFunc{ -// "status": rpcserver.NewRPCFunc(Status, "arg", false), +// "status": rpcserver.NewRPCFunc(Status), // } // // An rpc function: diff --git a/rpc/jsonrpc/jsonrpc_test.go b/rpc/jsonrpc/jsonrpc_test.go index 5013590b67..236db9b320 100644 --- a/rpc/jsonrpc/jsonrpc_test.go +++ b/rpc/jsonrpc/jsonrpc_test.go @@ -5,14 +5,14 @@ import ( "context" crand "crypto/rand" "encoding/json" - "fmt" + "errors" mrand "math/rand" "net/http" - "os" "os/exec" "testing" "time" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -20,7 +20,6 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/rpc/jsonrpc/client" "github.com/tendermint/tendermint/rpc/jsonrpc/server" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Client and Server should work over tcp or unix sockets @@ -35,182 +34,189 @@ const ( testVal = "acbd" ) -var ( - ctx = context.Background() -) +type RequestEcho struct { + Value string `json:"arg"` +} type ResultEcho struct { Value string `json:"value"` } +type RequestEchoInt struct { + Value int `json:"arg"` +} + type ResultEchoInt struct { Value int `json:"value"` } +type RequestEchoBytes struct { + Value []byte `json:"arg"` +} + type ResultEchoBytes struct { Value []byte `json:"value"` } +type RequestEchoDataBytes struct { + Value tmbytes.HexBytes `json:"arg"` +} + type ResultEchoDataBytes struct { Value tmbytes.HexBytes `json:"value"` } // Define some routes var Routes = map[string]*server.RPCFunc{ - "echo": server.NewRPCFunc(EchoResult, "arg", false), - "echo_ws": server.NewWSRPCFunc(EchoWSResult, "arg"), - "echo_bytes": server.NewRPCFunc(EchoBytesResult, "arg", false), - "echo_data_bytes": server.NewRPCFunc(EchoDataBytesResult, "arg", false), - "echo_int": server.NewRPCFunc(EchoIntResult, "arg", false), -} - -func EchoResult(ctx *rpctypes.Context, v string) (*ResultEcho, error) { - return &ResultEcho{v}, nil + "echo": server.NewRPCFunc(EchoResult), + "echo_ws": server.NewWSRPCFunc(EchoWSResult), + "echo_bytes": server.NewRPCFunc(EchoBytesResult), + "echo_data_bytes": server.NewRPCFunc(EchoDataBytesResult), + "echo_int": server.NewRPCFunc(EchoIntResult), } -func EchoWSResult(ctx *rpctypes.Context, v string) (*ResultEcho, error) { - return &ResultEcho{v}, nil +func EchoResult(ctx context.Context, v *RequestEcho) (*ResultEcho, error) { + return &ResultEcho{v.Value}, nil } -func EchoIntResult(ctx *rpctypes.Context, v int) (*ResultEchoInt, error) { - return &ResultEchoInt{v}, nil +func EchoWSResult(ctx context.Context, v *RequestEcho) (*ResultEcho, error) { + return &ResultEcho{v.Value}, nil } -func EchoBytesResult(ctx *rpctypes.Context, v []byte) (*ResultEchoBytes, error) { - return &ResultEchoBytes{v}, nil +func EchoIntResult(ctx context.Context, v *RequestEchoInt) (*ResultEchoInt, error) { + return &ResultEchoInt{v.Value}, nil } -func EchoDataBytesResult(ctx *rpctypes.Context, v tmbytes.HexBytes) (*ResultEchoDataBytes, error) { - return &ResultEchoDataBytes{v}, nil +func EchoBytesResult(ctx context.Context, v *RequestEchoBytes) (*ResultEchoBytes, error) { + return &ResultEchoBytes{v.Value}, nil } -func TestMain(m *testing.M) { - setup() - code := m.Run() - os.Exit(code) +func EchoDataBytesResult(ctx context.Context, v *RequestEchoDataBytes) (*ResultEchoDataBytes, error) { + return &ResultEchoDataBytes{v.Value}, nil } // launch unix and tcp servers -func setup() { - logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) - +func setup(ctx context.Context, t *testing.T, logger log.Logger) error { cmd := exec.Command("rm", "-f", unixSocket) err := cmd.Start() if err != nil { - panic(err) + return err } if err = cmd.Wait(); err != nil { - panic(err) + return err } tcpLogger := logger.With("socket", "tcp") mux := http.NewServeMux() server.RegisterRPCFuncs(mux, Routes, tcpLogger) - wm := server.NewWebsocketManager(Routes, server.ReadWait(5*time.Second), server.PingPeriod(1*time.Second)) - wm.SetLogger(tcpLogger) + wm := server.NewWebsocketManager(tcpLogger, Routes, server.ReadWait(5*time.Second), server.PingPeriod(1*time.Second)) mux.HandleFunc(websocketEndpoint, wm.WebsocketHandler) config := server.DefaultConfig() listener1, err := server.Listen(tcpAddr, config.MaxOpenConnections) if err != nil { - panic(err) + return err } go func() { - if err := server.Serve(listener1, mux, tcpLogger, config); err != nil { - panic(err) + if err := server.Serve(ctx, listener1, mux, tcpLogger, config); err != nil { + if !errors.Is(err, http.ErrServerClosed) { + require.NoError(t, err) + } } }() unixLogger := logger.With("socket", "unix") mux2 := http.NewServeMux() server.RegisterRPCFuncs(mux2, Routes, unixLogger) - wm = server.NewWebsocketManager(Routes) - wm.SetLogger(unixLogger) + wm = server.NewWebsocketManager(unixLogger, Routes) mux2.HandleFunc(websocketEndpoint, wm.WebsocketHandler) listener2, err := server.Listen(unixAddr, config.MaxOpenConnections) if err != nil { - panic(err) + return err } go func() { - if err := server.Serve(listener2, mux2, unixLogger, config); err != nil { - panic(err) + if err := server.Serve(ctx, listener2, mux2, unixLogger, config); err != nil { + if !errors.Is(err, http.ErrServerClosed) { + require.NoError(t, err) + } } }() // wait for servers to start time.Sleep(time.Second * 2) + return nil } -func echoViaHTTP(cl client.Caller, val string) (string, error) { +func echoViaHTTP(ctx context.Context, cl client.Caller, val string) (string, error) { params := map[string]interface{}{ "arg": val, } result := new(ResultEcho) - if _, err := cl.Call(ctx, "echo", params, result); err != nil { + if err := cl.Call(ctx, "echo", params, result); err != nil { return "", err } return result.Value, nil } -func echoIntViaHTTP(cl client.Caller, val int) (int, error) { +func echoIntViaHTTP(ctx context.Context, cl client.Caller, val int) (int, error) { params := map[string]interface{}{ "arg": val, } result := new(ResultEchoInt) - if _, err := cl.Call(ctx, "echo_int", params, result); err != nil { + if err := cl.Call(ctx, "echo_int", params, result); err != nil { return 0, err } return result.Value, nil } -func echoBytesViaHTTP(cl client.Caller, bytes []byte) ([]byte, error) { +func echoBytesViaHTTP(ctx context.Context, cl client.Caller, bytes []byte) ([]byte, error) { params := map[string]interface{}{ "arg": bytes, } result := new(ResultEchoBytes) - if _, err := cl.Call(ctx, "echo_bytes", params, result); err != nil { + if err := cl.Call(ctx, "echo_bytes", params, result); err != nil { return []byte{}, err } return result.Value, nil } -func echoDataBytesViaHTTP(cl client.Caller, bytes tmbytes.HexBytes) (tmbytes.HexBytes, error) { +func echoDataBytesViaHTTP(ctx context.Context, cl client.Caller, bytes tmbytes.HexBytes) (tmbytes.HexBytes, error) { params := map[string]interface{}{ "arg": bytes, } result := new(ResultEchoDataBytes) - if _, err := cl.Call(ctx, "echo_data_bytes", params, result); err != nil { + if err := cl.Call(ctx, "echo_data_bytes", params, result); err != nil { return []byte{}, err } return result.Value, nil } -func testWithHTTPClient(t *testing.T, cl client.HTTPClient) { +func testWithHTTPClient(ctx context.Context, t *testing.T, cl client.Caller) { val := testVal - got, err := echoViaHTTP(cl, val) - require.Nil(t, err) + got, err := echoViaHTTP(ctx, cl, val) + require.NoError(t, err) assert.Equal(t, got, val) val2 := randBytes(t) - got2, err := echoBytesViaHTTP(cl, val2) - require.Nil(t, err) + got2, err := echoBytesViaHTTP(ctx, cl, val2) + require.NoError(t, err) assert.Equal(t, got2, val2) val3 := tmbytes.HexBytes(randBytes(t)) - got3, err := echoDataBytesViaHTTP(cl, val3) - require.Nil(t, err) + got3, err := echoDataBytesViaHTTP(ctx, cl, val3) + require.NoError(t, err) assert.Equal(t, got3, val3) val4 := mrand.Intn(10000) - got4, err := echoIntViaHTTP(cl, val4) - require.Nil(t, err) + got4, err := echoIntViaHTTP(ctx, cl, val4) + require.NoError(t, err) assert.Equal(t, got4, val4) } -func echoViaWS(cl *client.WSClient, val string) (string, error) { +func echoViaWS(ctx context.Context, cl *client.WSClient, val string) (string, error) { params := map[string]interface{}{ "arg": val, } - err := cl.Call(context.Background(), "echo", params) + err := cl.Call(ctx, "echo", params) if err != nil { return "", err } @@ -228,11 +234,11 @@ func echoViaWS(cl *client.WSClient, val string) (string, error) { return result.Value, nil } -func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) { +func echoBytesViaWS(ctx context.Context, cl *client.WSClient, bytes []byte) ([]byte, error) { params := map[string]interface{}{ "arg": bytes, } - err := cl.Call(context.Background(), "echo_bytes", params) + err := cl.Call(ctx, "echo_bytes", params) if err != nil { return []byte{}, err } @@ -250,144 +256,116 @@ func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) { return result.Value, nil } -func testWithWSClient(t *testing.T, cl *client.WSClient) { +func testWithWSClient(ctx context.Context, t *testing.T, cl *client.WSClient) { val := testVal - got, err := echoViaWS(cl, val) - require.Nil(t, err) + got, err := echoViaWS(ctx, cl, val) + require.NoError(t, err) assert.Equal(t, got, val) val2 := randBytes(t) - got2, err := echoBytesViaWS(cl, val2) - require.Nil(t, err) + got2, err := echoBytesViaWS(ctx, cl, val2) + require.NoError(t, err) assert.Equal(t, got2, val2) } //------------- -func TestServersAndClientsBasic(t *testing.T) { - serverAddrs := [...]string{tcpAddr, unixAddr} - for _, addr := range serverAddrs { - cl1, err := client.NewURI(addr) - require.Nil(t, err) - fmt.Printf("=== testing server on %s using URI client", addr) - testWithHTTPClient(t, cl1) - - cl2, err := client.New(addr) - require.Nil(t, err) - fmt.Printf("=== testing server on %s using JSONRPC client", addr) - testWithHTTPClient(t, cl2) - - cl3, err := client.NewWS(addr, websocketEndpoint) - require.Nil(t, err) - cl3.SetLogger(log.TestingLogger()) - err = cl3.Start() - require.Nil(t, err) - fmt.Printf("=== testing server on %s using WS client", addr) - testWithWSClient(t, cl3) - err = cl3.Stop() - require.NoError(t, err) - } -} - -func TestHexStringArg(t *testing.T) { - cl, err := client.NewURI(tcpAddr) - require.Nil(t, err) - // should NOT be handled as hex - val := "0xabc" - got, err := echoViaHTTP(cl, val) - require.Nil(t, err) - assert.Equal(t, got, val) -} - -func TestQuotedStringArg(t *testing.T) { - cl, err := client.NewURI(tcpAddr) - require.Nil(t, err) - // should NOT be unquoted - val := "\"abc\"" - got, err := echoViaHTTP(cl, val) - require.Nil(t, err) - assert.Equal(t, got, val) -} - -func TestWSNewWSRPCFunc(t *testing.T) { - cl, err := client.NewWS(tcpAddr, websocketEndpoint) - require.Nil(t, err) - cl.SetLogger(log.TestingLogger()) - err = cl.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := cl.Stop(); err != nil { - t.Error(err) +func TestRPC(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + logger := log.NewNopLogger() + + t.Cleanup(leaktest.Check(t)) + require.NoError(t, setup(ctx, t, logger)) + t.Run("ServersAndClientsBasic", func(t *testing.T) { + serverAddrs := [...]string{tcpAddr, unixAddr} + for _, addr := range serverAddrs { + t.Run(addr, func(t *testing.T) { + tctx, tcancel := context.WithCancel(ctx) + defer tcancel() + + logger := log.NewNopLogger() + + cl2, err := client.New(addr) + require.NoError(t, err) + t.Logf("testing server with JSONRPC client") + testWithHTTPClient(tctx, t, cl2) + + cl3, err := client.NewWS(addr, websocketEndpoint) + require.NoError(t, err) + cl3.Logger = logger + err = cl3.Start(tctx) + require.NoError(t, err) + t.Logf("testing server with WS client") + testWithWSClient(tctx, t, cl3) + }) } }) + t.Run("WSNewWSRPCFunc", func(t *testing.T) { + t.Cleanup(leaktest.CheckTimeout(t, 4*time.Second)) - val := testVal - params := map[string]interface{}{ - "arg": val, - } - err = cl.Call(context.Background(), "echo_ws", params) - require.Nil(t, err) + cl, err := client.NewWS(tcpAddr, websocketEndpoint) + require.NoError(t, err) + cl.Logger = log.NewNopLogger() + tctx, tcancel := context.WithCancel(ctx) + defer tcancel() + + require.NoError(t, cl.Start(tctx)) + t.Cleanup(func() { + if err := cl.Stop(); err != nil { + t.Error(err) + } + }) + + val := testVal + params := map[string]interface{}{ + "arg": val, + } + err = cl.Call(tctx, "echo_ws", params) + require.NoError(t, err) - msg := <-cl.ResponsesCh - if msg.Error != nil { - t.Fatal(err) - } - result := new(ResultEcho) - err = json.Unmarshal(msg.Result, result) - require.Nil(t, err) - got := result.Value - assert.Equal(t, got, val) -} + select { + case <-tctx.Done(): + t.Fatal(tctx.Err()) + case msg := <-cl.ResponsesCh: + if msg.Error != nil { + t.Fatal(err) + } + result := new(ResultEcho) + err = json.Unmarshal(msg.Result, result) + require.NoError(t, err) + got := result.Value + assert.Equal(t, got, val) -func TestWSHandlesArrayParams(t *testing.T) { - cl, err := client.NewWS(tcpAddr, websocketEndpoint) - require.Nil(t, err) - cl.SetLogger(log.TestingLogger()) - err = cl.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := cl.Stop(); err != nil { - t.Error(err) } }) + t.Run("WSClientPingPong", func(t *testing.T) { + // TestWSClientPingPong checks that a client & server exchange pings + // & pongs so connection stays alive. + t.Cleanup(leaktest.CheckTimeout(t, 4*time.Second)) - val := testVal - params := []interface{}{val} - err = cl.CallWithArrayParams(context.Background(), "echo_ws", params) - require.Nil(t, err) + cl, err := client.NewWS(tcpAddr, websocketEndpoint) + require.NoError(t, err) + cl.Logger = log.NewNopLogger() - msg := <-cl.ResponsesCh - if msg.Error != nil { - t.Fatalf("%+v", err) - } - result := new(ResultEcho) - err = json.Unmarshal(msg.Result, result) - require.Nil(t, err) - got := result.Value - assert.Equal(t, got, val) -} + tctx, tcancel := context.WithCancel(ctx) + defer tcancel() -// TestWSClientPingPong checks that a client & server exchange pings -// & pongs so connection stays alive. -func TestWSClientPingPong(t *testing.T) { - cl, err := client.NewWS(tcpAddr, websocketEndpoint) - require.Nil(t, err) - cl.SetLogger(log.TestingLogger()) - err = cl.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := cl.Stop(); err != nil { - t.Error(err) - } - }) + require.NoError(t, cl.Start(tctx)) + t.Cleanup(func() { + if err := cl.Stop(); err != nil { + t.Error(err) + } + }) - time.Sleep(6 * time.Second) + time.Sleep(6 * time.Second) + }) } func randBytes(t *testing.T) []byte { n := mrand.Intn(10) + 2 buf := make([]byte, n) _, err := crand.Read(buf) - require.Nil(t, err) + require.NoError(t, err) return bytes.ReplaceAll(buf, []byte("="), []byte{100}) } diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index 311d632d39..2eeded2d72 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -1,18 +1,15 @@ package server import ( + "bytes" "encoding/json" - "errors" "fmt" "html/template" - "io/ioutil" + "io" "net/http" - "reflect" "strings" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -20,130 +17,67 @@ import ( // jsonrpc calls grab the given method's function info and runs reflect.Call func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - b, err := ioutil.ReadAll(r.Body) + return func(w http.ResponseWriter, hreq *http.Request) { + // For POST requests, reject a non-root URL path. This should not happen + // in the standard configuration, since the wrapper checks the path. + if hreq.URL.Path != "/" { + writeRPCResponse(w, logger, rpctypes.RPCRequest{}.MakeErrorf( + rpctypes.CodeInvalidRequest, "invalid path: %q", hreq.URL.Path)) + return + } + + b, err := io.ReadAll(hreq.Body) if err != nil { - res := rpctypes.RPCInvalidRequestError(nil, - fmt.Errorf("error reading request body: %w", err), - ) - if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } + writeRPCResponse(w, logger, rpctypes.RPCRequest{}.MakeErrorf( + rpctypes.CodeInvalidRequest, "reading request body: %v", err)) return } // if its an empty request (like from a browser), just display a list of // functions if len(b) == 0 { - writeListOfEndpoints(w, r, funcMap) + writeListOfEndpoints(w, hreq, funcMap) return } - // first try to unmarshal the incoming request as an array of RPC requests - var ( - requests []rpctypes.RPCRequest - responses []rpctypes.RPCResponse - ) - if err := json.Unmarshal(b, &requests); err != nil { - // next, try to unmarshal as a single request - var request rpctypes.RPCRequest - if err := json.Unmarshal(b, &request); err != nil { - res := rpctypes.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err)) - if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } - return - } - requests = []rpctypes.RPCRequest{request} + requests, err := parseRequests(b) + if err != nil { + writeRPCResponse(w, logger, rpctypes.RPCRequest{}.MakeErrorf( + rpctypes.CodeParseError, "decoding request: %v", err)) + return } - // Set the default response cache to true unless - // 1. Any RPC request error. - // 2. Any RPC request doesn't allow to be cached. - // 3. Any RPC request has the height argument and the value is 0 (the default). - var c = true - for _, request := range requests { - request := request - - // A Notification is a Request object without an "id" member. - // The Server MUST NOT reply to a Notification, including those that are within a batch request. - if request.ID == nil { - logger.Debug( - "HTTPJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)", - "req", request, - ) + var responses []rpctypes.RPCResponse + for _, req := range requests { + // Ignore notifications, which this service does not support. + if req.IsNotification() { + logger.Debug("Ignoring notification", "req", req) continue } - if len(r.URL.Path) > 1 { - responses = append( - responses, - rpctypes.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)), - ) - c = false - continue - } - rpcFunc, ok := funcMap[request.Method] + + rpcFunc, ok := funcMap[req.Method] if !ok || rpcFunc.ws { - responses = append(responses, rpctypes.RPCMethodNotFoundError(request.ID)) - c = false + responses = append(responses, req.MakeErrorf(rpctypes.CodeMethodNotFound, req.Method)) continue } - ctx := &rpctypes.Context{JSONReq: &request, HTTPReq: r} - args := []reflect.Value{reflect.ValueOf(ctx)} - if len(request.Params) > 0 { - fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) - if err != nil { - responses = append( - responses, - rpctypes.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), - ) - c = false - continue - } - args = append(args, fnArgs...) - } - - if hasDefaultHeight(request, args) { - c = false - } - - returns := rpcFunc.f.Call(args) - logger.Debug("HTTPJSONRPC", "method", request.Method, "args", args, "returns", returns) - result, err := unreflectResult(returns) - switch e := err.(type) { - // if no error then return a success response - case nil: - responses = append(responses, rpctypes.NewRPCSuccessResponse(request.ID, result)) - - // if this already of type RPC error then forward that error - case *rpctypes.RPCError: - responses = append(responses, rpctypes.NewRPCErrorResponse(request.ID, e.Code, e.Message, e.Data)) - c = false - default: // we need to unwrap the error and parse it accordingly - switch errors.Unwrap(err) { - // check if the error was due to an invald request - case coretypes.ErrZeroOrNegativeHeight, coretypes.ErrZeroOrNegativePerPage, - coretypes.ErrPageOutOfRange, coretypes.ErrInvalidRequest: - responses = append(responses, rpctypes.RPCInvalidRequestError(request.ID, err)) - c = false - // lastly default all remaining errors as internal errors - default: // includes ctypes.ErrHeightNotAvailable and ctypes.ErrHeightExceedsChainHead - responses = append(responses, rpctypes.RPCInternalError(request.ID, err)) - c = false - } - } - - if c && !rpcFunc.cache { - c = false + req := req + ctx := rpctypes.WithCallInfo(hreq.Context(), &rpctypes.CallInfo{ + RPCRequest: &req, + HTTPRequest: hreq, + }) + result, err := rpcFunc.Call(ctx, req.Params) + if err != nil { + responses = append(responses, req.MakeError(err)) + } else { + responses = append(responses, req.MakeResponse(result)) } } - if len(responses) > 0 { - if wErr := WriteRPCResponseHTTP(w, c, responses...); wErr != nil { - logger.Error("failed to write responses", "err", wErr) - } + if len(responses) == 0 { + return } + writeRPCResponse(w, logger, responses...) } } @@ -160,81 +94,22 @@ func handleInvalidJSONRPCPaths(next http.HandlerFunc) http.HandlerFunc { } } -func mapParamsToArgs( - rpcFunc *RPCFunc, - params map[string]json.RawMessage, - argsOffset int, -) ([]reflect.Value, error) { - - values := make([]reflect.Value, len(rpcFunc.argNames)) - for i, argName := range rpcFunc.argNames { - argType := rpcFunc.args[i+argsOffset] - - if p, ok := params[argName]; ok && p != nil && len(p) > 0 { - val := reflect.New(argType) - err := tmjson.Unmarshal(p, val.Interface()) - if err != nil { - return nil, err - } - values[i] = val.Elem() - } else { // use default for that type - values[i] = reflect.Zero(argType) - } - } - - return values, nil -} - -func arrayParamsToArgs( - rpcFunc *RPCFunc, - params []json.RawMessage, - argsOffset int, -) ([]reflect.Value, error) { - - if len(rpcFunc.argNames) != len(params) { - return nil, fmt.Errorf("expected %v parameters (%v), got %v (%v)", - len(rpcFunc.argNames), rpcFunc.argNames, len(params), params) +// parseRequests parses a JSON-RPC request or request batch from data. +func parseRequests(data []byte) ([]rpctypes.RPCRequest, error) { + var reqs []rpctypes.RPCRequest + var err error + + isArray := bytes.HasPrefix(bytes.TrimSpace(data), []byte("[")) + if isArray { + err = json.Unmarshal(data, &reqs) + } else { + reqs = append(reqs, rpctypes.RPCRequest{}) + err = json.Unmarshal(data, &reqs[0]) } - - values := make([]reflect.Value, len(params)) - for i, p := range params { - argType := rpcFunc.args[i+argsOffset] - val := reflect.New(argType) - err := tmjson.Unmarshal(p, val.Interface()) - if err != nil { - return nil, err - } - values[i] = val.Elem() + if err != nil { + return nil, err } - return values, nil -} - -// raw is unparsed json (from json.RawMessage) encoding either a map or an -// array. -// -// Example: -// rpcFunc.args = [rpctypes.Context string] -// rpcFunc.argNames = ["arg"] -func jsonParamsToArgs(rpcFunc *RPCFunc, raw []byte) ([]reflect.Value, error) { - const argsOffset = 1 - - // TODO: Make more efficient, perhaps by checking the first character for '{' or '['? - // First, try to get the map. - var m map[string]json.RawMessage - err := json.Unmarshal(raw, &m) - if err == nil { - return mapParamsToArgs(rpcFunc, m, argsOffset) - } - - // Otherwise, try an array. - var a []json.RawMessage - err = json.Unmarshal(raw, &a) - if err == nil { - return arrayParamsToArgs(rpcFunc, a, argsOffset) - } - - // Otherwise, bad format, we cannot parse - return nil, fmt.Errorf("unknown type for JSON params: %v. Expected map or array", err) + return reqs, nil } // writes a list of available rpc endpoints as an html page @@ -243,17 +118,15 @@ func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[st noArgs := make(map[string]string) for name, rf := range funcMap { base := fmt.Sprintf("//%s/%s", r.Host, name) - // N.B. Check argNames, not args, since the type list includes the type - // of the leading context argument. - if len(rf.argNames) == 0 { + if len(rf.args) == 0 { noArgs[name] = base - } else { - query := append([]string(nil), rf.argNames...) - for i, arg := range query { - query[i] = arg + "=_" - } - hasArgs[name] = base + "?" + strings.Join(query, "&") + continue + } + var query []string + for _, arg := range rf.args { + query = append(query, arg.name+"=_") } + hasArgs[name] = base + "?" + strings.Join(query, "&") } w.Header().Set("Content-Type", "text/html") _ = listOfEndpoints.Execute(w, map[string]map[string]string{ @@ -262,15 +135,6 @@ func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[st }) } -func hasDefaultHeight(r rpctypes.RPCRequest, h []reflect.Value) bool { - switch r.Method { - case "block", "block_results", "commit", "consensus_params", "validators": - return len(h) < 2 || h[1].IsZero() - default: - return false - } -} - var listOfEndpoints = template.Must(template.New("list").Parse(` List of RPC Endpoints diff --git a/rpc/jsonrpc/server/http_json_handler_test.go b/rpc/jsonrpc/server/http_json_handler_test.go index 64e7597fd1..77c74ffbcf 100644 --- a/rpc/jsonrpc/server/http_json_handler_test.go +++ b/rpc/jsonrpc/server/http_json_handler_test.go @@ -1,8 +1,9 @@ package server import ( + "context" "encoding/json" - "io/ioutil" + "io" "net/http" "net/http/httptest" "strings" @@ -16,9 +17,16 @@ import ( ) func testMux() *http.ServeMux { + type testArgs struct { + S string `json:"s"` + I json.Number `json:"i"` + } + type blockArgs struct { + H json.Number `json:"h"` + } funcMap := map[string]*RPCFunc{ - "c": NewRPCFunc(func(ctx *rpctypes.Context, s string, i int) (string, error) { return "foo", nil }, "s,i", false), - "block": NewRPCFunc(func(ctx *rpctypes.Context, h int) (string, error) { return "block", nil }, "height", true), + "c": NewRPCFunc(func(ctx context.Context, arg *testArgs) (string, error) { return "foo", nil }), + "block": NewRPCFunc(func(ctx context.Context, arg *blockArgs) (string, error) { return "block", nil }), } mux := http.NewServeMux() logger := log.NewNopLogger() @@ -37,24 +45,24 @@ func TestRPCParams(t *testing.T) { tests := []struct { payload string wantErr string - expectedID interface{} + expectedID string }{ // bad - {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found", rpctypes.JSONRPCStringID("0")}, - {`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found", rpctypes.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found", `"0"`}, + {`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found", `"0"`}, // id not captured in JSON parsing failures - {`{"method": "c", "id": "0", "params": a}`, "invalid character", nil}, - {`{"method": "c", "id": "0", "params": ["a"]}`, "got 1", rpctypes.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": ["a", "b"]}`, "invalid character", rpctypes.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string", rpctypes.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": a}`, "invalid character", ""}, + {`{"method": "c", "id": "0", "params": ["a"]}`, "got 1", `"0"`}, + {`{"method": "c", "id": "0", "params": ["a", "b"]}`, "invalid number", `"0"`}, + {`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string", `"0"`}, // no ID - notification // {`{"jsonrpc": "2.0", "method": "c", "params": ["a", "10"]}`, false, nil}, // good - {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": null}`, "", rpctypes.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": {}}`, "", rpctypes.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": ["a", "10"]}`, "", rpctypes.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": null}`, "", `"0"`}, + {`{"method": "c", "id": "0", "params": {}}`, "", `"0"`}, + {`{"method": "c", "id": "0", "params": ["a", "10"]}`, "", `"0"`}, } for i, tt := range tests { @@ -62,19 +70,17 @@ func TestRPCParams(t *testing.T) { rec := httptest.NewRecorder() mux.ServeHTTP(rec, req) res := rec.Result() - defer res.Body.Close() + // Always expecting back a JSONRPCResponse assert.NotZero(t, res.StatusCode, "#%d: should always return code", i) - blob, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Errorf("#%d: err reading body: %v", i, err) - continue - } + blob, err := io.ReadAll(res.Body) + require.NoError(t, err, "#%d: reading body", i) + require.NoError(t, res.Body.Close()) recv := new(rpctypes.RPCResponse) assert.Nil(t, json.Unmarshal(blob, recv), "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) assert.NotEqual(t, recv, new(rpctypes.RPCResponse), "#%d: not expecting a blank RPCResponse", i) - assert.Equal(t, tt.expectedID, recv.ID, "#%d: expected ID not matched in RPCResponse", i) + assert.Equal(t, tt.expectedID, recv.ID(), "#%d: expected ID not matched in RPCResponse", i) if tt.wantErr == "" { assert.Nil(t, recv.Error, "#%d: not expecting an error", i) } else { @@ -90,19 +96,19 @@ func TestJSONRPCID(t *testing.T) { tests := []struct { payload string wantErr bool - expectedID interface{} + expectedID string }{ // good id - {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": ["a", "10"]}`, false, rpctypes.JSONRPCStringID("0")}, - {`{"jsonrpc": "2.0", "method": "c", "id": "abc", "params": ["a", "10"]}`, false, rpctypes.JSONRPCStringID("abc")}, - {`{"jsonrpc": "2.0", "method": "c", "id": 0, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(0)}, - {`{"jsonrpc": "2.0", "method": "c", "id": 1, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(1)}, - {`{"jsonrpc": "2.0", "method": "c", "id": 1.3, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(1)}, - {`{"jsonrpc": "2.0", "method": "c", "id": -1, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(-1)}, + {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": ["a", "10"]}`, false, `"0"`}, + {`{"jsonrpc": "2.0", "method": "c", "id": "abc", "params": ["a", "10"]}`, false, `"abc"`}, + {`{"jsonrpc": "2.0", "method": "c", "id": 0, "params": ["a", "10"]}`, false, `0`}, + {`{"jsonrpc": "2.0", "method": "c", "id": 1, "params": ["a", "10"]}`, false, `1`}, + {`{"jsonrpc": "2.0", "method": "c", "id": -1, "params": ["a", "10"]}`, false, `-1`}, // bad id - {`{"jsonrpc": "2.0", "method": "c", "id": {}, "params": ["a", "10"]}`, true, nil}, - {`{"jsonrpc": "2.0", "method": "c", "id": [], "params": ["a", "10"]}`, true, nil}, + {`{"jsonrpc": "2.0", "method": "c", "id": {}, "params": ["a", "10"]}`, true, ""}, // object + {`{"jsonrpc": "2.0", "method": "c", "id": [], "params": ["a", "10"]}`, true, ""}, // array + {`{"jsonrpc": "2.0", "method": "c", "id": 1.3, "params": ["a", "10"]}`, true, ""}, // fractional } for i, tt := range tests { @@ -112,7 +118,7 @@ func TestJSONRPCID(t *testing.T) { res := rec.Result() // Always expecting back a JSONRPCResponse assert.NotZero(t, res.StatusCode, "#%d: should always return code", i) - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { t.Errorf("#%d: err reading body: %v", i, err) continue @@ -121,10 +127,10 @@ func TestJSONRPCID(t *testing.T) { recv := new(rpctypes.RPCResponse) err = json.Unmarshal(blob, recv) - assert.Nil(t, err, "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) + assert.NoError(t, err, "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) if !tt.wantErr { assert.NotEqual(t, recv, new(rpctypes.RPCResponse), "#%d: not expecting a blank RPCResponse", i) - assert.Equal(t, tt.expectedID, recv.ID, "#%d: expected ID not matched in RPCResponse", i) + assert.Equal(t, tt.expectedID, recv.ID(), "#%d: expected ID not matched in RPCResponse", i) assert.Nil(t, recv.Error, "#%d: not expecting an error", i) } else { assert.True(t, recv.Error.Code < 0, "#%d: not expecting a positive JSONRPC code", i) @@ -142,9 +148,9 @@ func TestRPCNotification(t *testing.T) { // Always expecting back a JSONRPCResponse require.True(t, statusOK(res.StatusCode), "should always return 2XX") - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) res.Body.Close() - require.Nil(t, err, "reading from the body should not give back an error") + require.NoError(t, err, "reading from the body should not give back an error") require.Equal(t, len(blob), 0, "a notification SHOULD NOT be responded to by the server") } @@ -178,7 +184,7 @@ func TestRPCNotificationInBatch(t *testing.T) { res := rec.Result() // Always expecting back a JSONRPCResponse assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i) - blob, err := ioutil.ReadAll(res.Body) + blob, err := io.ReadAll(res.Body) if err != nil { t.Errorf("#%d: err reading body: %v", i, err) continue @@ -237,11 +243,11 @@ func TestRPCResponseCache(t *testing.T) { // Always expecting back a JSONRPCResponse require.True(t, statusOK(res.StatusCode), "should always return 2XX") - require.Equal(t, "max-age=31536000", res.Header.Get("Cache-control")) + require.Equal(t, "", res.Header.Get("Cache-control")) - _, err := ioutil.ReadAll(res.Body) + _, err := io.ReadAll(res.Body) res.Body.Close() - require.Nil(t, err, "reading from the body should not give back an error") + require.NoError(t, err, "reading from the body should not give back an error") // send a request with default height. body = strings.NewReader(`{"jsonrpc": "2.0","method":"block","id": 0, "params": ["0"]}`) @@ -254,7 +260,7 @@ func TestRPCResponseCache(t *testing.T) { require.True(t, statusOK(res.StatusCode), "should always return 2XX") require.Equal(t, "", res.Header.Get("Cache-control")) - _, err = ioutil.ReadAll(res.Body) + _, err = io.ReadAll(res.Body) res.Body.Close() - require.Nil(t, err, "reading from the body should not give back an error") + require.NoError(t, err, "reading from the body should not give back an error") } diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index 49e1e510ea..0b715835d0 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -2,13 +2,12 @@ package server import ( - "bufio" + "context" "encoding/json" "errors" "fmt" "net" "net/http" - "os" "runtime/debug" "strings" "time" @@ -46,217 +45,219 @@ func DefaultConfig() *Config { } // Serve creates a http.Server and calls Serve with the given listener. It -// wraps handler with RecoverAndLogHandler and a handler, which limits the max -// body size to config.MaxBodyBytes. -// -// NOTE: This function blocks - you may want to call it in a go-routine. -func Serve(listener net.Listener, handler http.Handler, logger log.Logger, config *Config) error { +// wraps handler to recover panics and limit the request body size. +func Serve(ctx context.Context, listener net.Listener, handler http.Handler, logger log.Logger, config *Config) error { logger.Info(fmt.Sprintf("Starting RPC HTTP server on %s", listener.Addr())) + h := recoverAndLogHandler(MaxBytesHandler(handler, config.MaxBodyBytes), logger) s := &http.Server{ - Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), + Handler: h, ReadTimeout: config.ReadTimeout, WriteTimeout: config.WriteTimeout, MaxHeaderBytes: config.MaxHeaderBytes, } - err := s.Serve(listener) - logger.Info("RPC HTTP server stopped", "err", err) - return err + sig := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + sctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + _ = s.Shutdown(sctx) + case <-sig: + } + }() + + if err := s.Serve(listener); err != nil { + logger.Info("RPC HTTP server stopped", "err", err) + close(sig) + return err + } + return nil } // Serve creates a http.Server and calls ServeTLS with the given listener, -// certFile and keyFile. It wraps handler with RecoverAndLogHandler and a -// handler, which limits the max body size to config.MaxBodyBytes. -// -// NOTE: This function blocks - you may want to call it in a go-routine. -func ServeTLS( - listener net.Listener, - handler http.Handler, - certFile, keyFile string, - logger log.Logger, - config *Config, -) error { - logger.Info(fmt.Sprintf("Starting RPC HTTPS server on %s (cert: %q, key: %q)", - listener.Addr(), certFile, keyFile)) +// certFile and keyFile. It wraps handler to recover panics and limit the +// request body size. +func ServeTLS(ctx context.Context, listener net.Listener, handler http.Handler, certFile, keyFile string, logger log.Logger, config *Config) error { + logger.Info("Starting RPC HTTPS server", + "listenterAddr", listener.Addr(), + "certFile", certFile, + "keyFile", keyFile) + s := &http.Server{ - Handler: RecoverAndLogHandler(maxBytesHandler{h: handler, n: config.MaxBodyBytes}, logger), + Handler: recoverAndLogHandler(MaxBytesHandler(handler, config.MaxBodyBytes), logger), ReadTimeout: config.ReadTimeout, WriteTimeout: config.WriteTimeout, MaxHeaderBytes: config.MaxHeaderBytes, } - err := s.ServeTLS(listener, certFile, keyFile) + sig := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + sctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + _ = s.Shutdown(sctx) + case <-sig: + } + }() - logger.Error("RPC HTTPS server stopped", "err", err) - return err + if err := s.ServeTLS(listener, certFile, keyFile); err != nil { + logger.Error("RPC HTTPS server stopped", "err", err) + close(sig) + return err + } + return nil } -// WriteRPCResponseHTTPError marshals res as JSON (with indent) and writes it -// to w. -// -// Maps JSON RPC error codes to HTTP Status codes as follows: -// -// HTTP Status code message -// 500 -32700 Parse error. -// 400 -32600 Invalid Request. -// 404 -32601 Method not found. -// 500 -32602 Invalid params. -// 500 -32603 Internal error. -// 500 -32099..-32000 Server error. +// writeInternalError writes an internal server error (500) to w with the text +// of err in the body. This is a fallback used when a handler is unable to +// write the expected response. +func writeInternalError(w http.ResponseWriter, err error) { + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintln(w, err.Error()) +} + +// writeHTTPResponse writes a JSON-RPC response to w. If rsp encodes an error, +// the response body is its error object; otherwise its responses is the result. // -// source: https://www.jsonrpc.org/historical/json-rpc-over-http.html -func WriteRPCResponseHTTPError( - w http.ResponseWriter, - res rpctypes.RPCResponse, -) error { - if res.Error == nil { - panic("tried to write http error response without RPC error") +// Unless there is an error encoding the response, the status is 200 OK. +func writeHTTPResponse(w http.ResponseWriter, log log.Logger, rsp rpctypes.RPCResponse) { + var body []byte + var err error + if rsp.Error != nil { + body, err = json.Marshal(rsp.Error) + } else { + body = rsp.Result } - - jsonBytes, err := json.MarshalIndent(res, "", " ") if err != nil { - return fmt.Errorf("json marshal: %w", err) - } - - var httpCode int - switch res.Error.Code { - case -32600: - httpCode = http.StatusBadRequest - case -32601: - httpCode = http.StatusNotFound - default: - httpCode = http.StatusInternalServerError + log.Error("Error encoding RPC response: %w", err) + writeInternalError(w, err) + return } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(httpCode) - _, err = w.Write(jsonBytes) - return err + w.WriteHeader(http.StatusOK) + _, _ = w.Write(body) } -// WriteRPCResponseHTTP marshals res as JSON (with indent) and writes it to w. -// If the rpc response can be cached, add cache-control to the response header. -func WriteRPCResponseHTTP(w http.ResponseWriter, c bool, res ...rpctypes.RPCResponse) error { - var v interface{} - if len(res) == 1 { - v = res[0] +// writeRPCResponse writes one or more JSON-RPC responses to w. A single +// response is encoded as an object, otherwise the response is sent as a batch +// (array) of response objects. +// +// Unless there is an error encoding the responses, the status is 200 OK. +func writeRPCResponse(w http.ResponseWriter, log log.Logger, rsps ...rpctypes.RPCResponse) { + var body []byte + var err error + if len(rsps) == 1 { + body, err = json.Marshal(rsps[0]) } else { - v = res + body, err = json.Marshal(rsps) } - - jsonBytes, err := json.MarshalIndent(v, "", " ") if err != nil { - return fmt.Errorf("json marshal: %w", err) + log.Error("Error encoding RPC response: %w", err) + writeInternalError(w, err) + return } w.Header().Set("Content-Type", "application/json") - if c { - w.Header().Set("Cache-Control", "max-age=31536000") // expired after one year - } - w.WriteHeader(200) - _, err = w.Write(jsonBytes) - return err + w.WriteHeader(http.StatusOK) + _, _ = w.Write(body) } //----------------------------------------------------------------------------- -// RecoverAndLogHandler wraps an HTTP handler, adding error logging. -// If the inner function panics, the outer function recovers, logs, sends an -// HTTP 500 error response. -func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler { +// recoverAndLogHandler wraps an HTTP handler, adding error logging. If the +// inner handler panics, the wrapper recovers, logs, sends an HTTP 500 error +// response to the client. +func recoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Wrap the ResponseWriter to remember the status - rww := &responseWriterWrapper{-1, w} - begin := time.Now() - - rww.Header().Set("X-Server-Time", fmt.Sprintf("%v", begin.Unix())) + // Capture the HTTP status written by the handler. + var httpStatus int + rww := newStatusWriter(w, &httpStatus) + // Recover panics from inside handler and try to send the client + // 500 Internal server error. If the handler panicked after already + // sending a (partial) response, this is a no-op. defer func() { - // Handle any panics in the panic handler below. Does not use the logger, since we want - // to avoid any further panics. However, we try to return a 500, since it otherwise - // defaults to 200 and there is no other way to terminate the connection. If that - // should panic for whatever reason then the Go HTTP server will handle it and - // terminate the connection - panicing is the de-facto and only way to get the Go HTTP - // server to terminate the request and close the connection/stream: - // https://github.com/golang/go/issues/17790#issuecomment-258481416 - if e := recover(); e != nil { - fmt.Fprintf(os.Stderr, "Panic during RPC panic recovery: %v\n%v\n", e, string(debug.Stack())) - w.WriteHeader(500) + if v := recover(); v != nil { + var err error + switch e := v.(type) { + case error: + err = e + case string: + err = errors.New(e) + case fmt.Stringer: + err = errors.New(e.String()) + default: + err = fmt.Errorf("panic with value %v", v) + } + + logger.Error("Panic in RPC HTTP handler", + "err", err, "stack", string(debug.Stack())) + writeInternalError(rww, err) } }() + // Log timing and response information from the handler. + begin := time.Now() defer func() { - // Send a 500 error if a panic happens during a handler. - // Without this, Chrome & Firefox were retrying aborted ajax requests, - // at least to my localhost. - if e := recover(); e != nil { - - // If RPCResponse - if res, ok := e.(rpctypes.RPCResponse); ok { - if wErr := WriteRPCResponseHTTP(rww, false, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } - } else { - // Panics can contain anything, attempt to normalize it as an error. - var err error - switch e := e.(type) { - case error: - err = e - case string: - err = errors.New(e) - case fmt.Stringer: - err = errors.New(e.String()) - default: - } - - logger.Error("panic in RPC HTTP handler", "err", e, "stack", string(debug.Stack())) - - res := rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), err) - if wErr := WriteRPCResponseHTTPError(rww, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } - } - } - - // Finally, log. - durationMS := time.Since(begin).Nanoseconds() / 1000000 - if rww.Status == -1 { - rww.Status = 200 - } + elapsed := time.Since(begin) logger.Debug("served RPC HTTP response", "method", r.Method, "url", r.URL, - "status", rww.Status, - "duration", durationMS, + "status", httpStatus, + "duration-sec", elapsed.Seconds(), "remoteAddr", r.RemoteAddr, ) }() + rww.Header().Set("X-Server-Time", fmt.Sprintf("%v", begin.Unix())) handler.ServeHTTP(rww, r) }) } -// Remember the status for logging -type responseWriterWrapper struct { - Status int - http.ResponseWriter +// MaxBytesHandler wraps h in a handler that limits the size of the request +// body to at most maxBytes. If maxBytes <= 0, the request body is not limited. +func MaxBytesHandler(h http.Handler, maxBytes int64) http.Handler { + if maxBytes <= 0 { + return h + } + return maxBytesHandler{handler: h, maxBytes: maxBytes} } -func (w *responseWriterWrapper) WriteHeader(status int) { - w.Status = status - w.ResponseWriter.WriteHeader(status) +type maxBytesHandler struct { + handler http.Handler + maxBytes int64 } -// implements http.Hijacker -func (w *responseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return w.ResponseWriter.(http.Hijacker).Hijack() +func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + req.Body = http.MaxBytesReader(w, req.Body, h.maxBytes) + h.handler.ServeHTTP(w, req) } -type maxBytesHandler struct { - h http.Handler - n int64 +// newStatusWriter wraps an http.ResponseWriter to capture the HTTP status code +// in *code. +func newStatusWriter(w http.ResponseWriter, code *int) statusWriter { + return statusWriter{ + ResponseWriter: w, + Hijacker: w.(http.Hijacker), + code: code, + } } -func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - r.Body = http.MaxBytesReader(w, r.Body, h.n) - h.h.ServeHTTP(w, r) +type statusWriter struct { + http.ResponseWriter + http.Hijacker // to support websocket upgrade + + code *int +} + +// WriteHeader implements part of http.ResponseWriter. It delegates to the +// wrapped writer, and as a side effect captures the written code. +// +// Note that if a request does not explicitly call WriteHeader, the code will +// not be updated. +func (w statusWriter) WriteHeader(code int) { + *w.code = code + w.ResponseWriter.WriteHeader(code) } // Listen starts a new net.Listener on the given address. diff --git a/rpc/jsonrpc/server/http_server_test.go b/rpc/jsonrpc/server/http_server_test.go index 39e7135655..838a2ef6ca 100644 --- a/rpc/jsonrpc/server/http_server_test.go +++ b/rpc/jsonrpc/server/http_server_test.go @@ -1,10 +1,10 @@ package server import ( + "context" "crypto/tls" - "errors" "fmt" - "io/ioutil" + "io" "net" "net/http" "net/http/httptest" @@ -13,6 +13,7 @@ import ( "testing" "time" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -27,6 +28,13 @@ type sampleResult struct { func TestMaxOpenConnections(t *testing.T) { const max = 5 // max simultaneous connections + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + // Start the server. var open int32 mux := http.NewServeMux() @@ -42,7 +50,8 @@ func TestMaxOpenConnections(t *testing.T) { l, err := Listen("tcp://127.0.0.1:0", max) require.NoError(t, err) defer l.Close() - go Serve(l, mux, log.TestingLogger(), config) //nolint:errcheck // ignore for tests + + go Serve(ctx, l, mux, logger, config) //nolint:errcheck // ignore for tests // Make N GET calls to the server. attempts := max * 2 @@ -71,6 +80,8 @@ func TestMaxOpenConnections(t *testing.T) { } func TestServeTLS(t *testing.T) { + t.Cleanup(leaktest.Check(t)) + ln, err := net.Listen("tcp", "localhost:0") require.NoError(t, err) defer ln.Close() @@ -80,10 +91,17 @@ func TestServeTLS(t *testing.T) { fmt.Fprint(w, "some body") }) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + chErr := make(chan error, 1) go func() { - // FIXME This goroutine leaks - chErr <- ServeTLS(ln, mux, "test.crt", "test.key", log.TestingLogger(), DefaultConfig()) + select { + case chErr <- ServeTLS(ctx, ln, mux, "test.crt", "test.key", logger, DefaultConfig()): + case <-ctx.Done(): + } }() select { @@ -101,82 +119,54 @@ func TestServeTLS(t *testing.T) { defer res.Body.Close() assert.Equal(t, http.StatusOK, res.StatusCode) - body, err := ioutil.ReadAll(res.Body) + body, err := io.ReadAll(res.Body) require.NoError(t, err) assert.Equal(t, []byte("some body"), body) } -func TestWriteRPCResponseHTTP(t *testing.T) { - id := rpctypes.JSONRPCIntID(-1) +func TestWriteRPCResponse(t *testing.T) { + req := rpctypes.NewRequest(-1) // one argument w := httptest.NewRecorder() - err := WriteRPCResponseHTTP(w, true, rpctypes.NewRPCSuccessResponse(id, &sampleResult{"hello"})) - require.NoError(t, err) + logger := log.NewNopLogger() + writeRPCResponse(w, logger, req.MakeResponse(&sampleResult{"hello"})) resp := w.Result() - body, err := ioutil.ReadAll(resp.Body) - _ = resp.Body.Close() + body, err := io.ReadAll(resp.Body) + require.NoError(t, resp.Body.Close()) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) - assert.Equal(t, "max-age=31536000", resp.Header.Get("Cache-control")) - assert.Equal(t, `{ - "jsonrpc": "2.0", - "id": -1, - "result": { - "value": "hello" - } -}`, string(body)) + assert.Equal(t, "", resp.Header.Get("Cache-control")) + assert.Equal(t, `{"jsonrpc":"2.0","id":-1,"result":{"value":"hello"}}`, string(body)) // multiple arguments w = httptest.NewRecorder() - err = WriteRPCResponseHTTP(w, - false, - rpctypes.NewRPCSuccessResponse(id, &sampleResult{"hello"}), - rpctypes.NewRPCSuccessResponse(id, &sampleResult{"world"})) - require.NoError(t, err) + writeRPCResponse(w, logger, + req.MakeResponse(&sampleResult{"hello"}), + req.MakeResponse(&sampleResult{"world"}), + ) resp = w.Result() - body, err = ioutil.ReadAll(resp.Body) - _ = resp.Body.Close() + body, err = io.ReadAll(resp.Body) + require.NoError(t, resp.Body.Close()) require.NoError(t, err) assert.Equal(t, 200, resp.StatusCode) assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) - assert.Equal(t, `[ - { - "jsonrpc": "2.0", - "id": -1, - "result": { - "value": "hello" - } - }, - { - "jsonrpc": "2.0", - "id": -1, - "result": { - "value": "world" - } - } -]`, string(body)) + assert.Equal(t, `[{"jsonrpc":"2.0","id":-1,"result":{"value":"hello"}},`+ + `{"jsonrpc":"2.0","id":-1,"result":{"value":"world"}}]`, string(body)) } -func TestWriteRPCResponseHTTPError(t *testing.T) { +func TestWriteHTTPResponse(t *testing.T) { w := httptest.NewRecorder() - err := WriteRPCResponseHTTPError(w, rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), errors.New("foo"))) - require.NoError(t, err) + logger := log.NewNopLogger() + req := rpctypes.NewRequest(-1) + writeHTTPResponse(w, logger, req.MakeErrorf(rpctypes.CodeInternalError, "foo")) resp := w.Result() - body, err := ioutil.ReadAll(resp.Body) - _ = resp.Body.Close() + body, err := io.ReadAll(resp.Body) + require.NoError(t, resp.Body.Close()) require.NoError(t, err) - assert.Equal(t, http.StatusInternalServerError, resp.StatusCode) + assert.Equal(t, http.StatusOK, resp.StatusCode) assert.Equal(t, "application/json", resp.Header.Get("Content-Type")) - assert.Equal(t, `{ - "jsonrpc": "2.0", - "id": -1, - "error": { - "code": -32603, - "message": "Internal error", - "data": "foo" - } -}`, string(body)) + assert.Equal(t, `{"code":-32603,"message":"Internal error","data":"foo"}`, string(body)) } diff --git a/rpc/jsonrpc/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go index 07b3616b44..c755bbaf13 100644 --- a/rpc/jsonrpc/server/http_uri_handler.go +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -2,233 +2,104 @@ package server import ( "encoding/hex" + "encoding/json" "errors" "fmt" "net/http" - "reflect" - "regexp" + "strconv" "strings" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) -// HTTP + URI handler - -var reInt = regexp.MustCompile(`^-?[0-9]+$`) +// uriReqID is a placeholder ID used for GET requests, which do not receive a +// JSON-RPC request ID from the caller. +const uriReqID = -1 // convert from a function name to the http handler func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWriter, *http.Request) { - // Always return -1 as there's no ID here. - dummyID := rpctypes.JSONRPCIntID(-1) // URIClientRequestID - - // Exception for websocket endpoints - if rpcFunc.ws { - return func(w http.ResponseWriter, r *http.Request) { - res := rpctypes.RPCMethodNotFoundError(dummyID) - if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } - } - } - - // All other endpoints - return func(w http.ResponseWriter, r *http.Request) { - logger.Debug("HTTP HANDLER", "req", r) - - ctx := &rpctypes.Context{HTTPReq: r} - args := []reflect.Value{reflect.ValueOf(ctx)} - - fnArgs, err := httpParamsToArgs(rpcFunc, r) + return func(w http.ResponseWriter, req *http.Request) { + ctx := rpctypes.WithCallInfo(req.Context(), &rpctypes.CallInfo{ + HTTPRequest: req, + }) + args, err := parseURLParams(rpcFunc.args, req) if err != nil { - res := rpctypes.RPCInvalidParamsError(dummyID, - fmt.Errorf("error converting http params to arguments: %w", err), - ) - if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintln(w, err.Error()) return } - args = append(args, fnArgs...) - - returns := rpcFunc.f.Call(args) - - logger.Debug("HTTPRestRPC", "method", r.URL.Path, "args", args, "returns", returns) - result, err := unreflectResult(returns) - switch e := err.(type) { - // if no error then return a success response - case nil: - res := rpctypes.NewRPCSuccessResponse(dummyID, result) - if wErr := WriteRPCResponseHTTP(w, rpcFunc.cache, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } - - // if this already of type RPC error then forward that error. - case *rpctypes.RPCError: - res := rpctypes.NewRPCErrorResponse(dummyID, e.Code, e.Message, e.Data) - if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } - - default: // we need to unwrap the error and parse it accordingly - var res rpctypes.RPCResponse - - switch errors.Unwrap(err) { - case coretypes.ErrZeroOrNegativeHeight, - coretypes.ErrZeroOrNegativePerPage, - coretypes.ErrPageOutOfRange, - coretypes.ErrInvalidRequest: - res = rpctypes.RPCInvalidRequestError(dummyID, err) - default: // ctypes.ErrHeightNotAvailable, ctypes.ErrHeightExceedsChainHead: - res = rpctypes.RPCInternalError(dummyID, err) - } - - if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { - logger.Error("failed to write response", "res", res, "err", wErr) - } + jreq := rpctypes.NewRequest(uriReqID) + result, err := rpcFunc.Call(ctx, args) + if err == nil { + writeHTTPResponse(w, logger, jreq.MakeResponse(result)) + } else { + writeHTTPResponse(w, logger, jreq.MakeError(err)) } - } } -// Covert an http query to a list of properly typed values. -// To be properly decoded the arg must be a concrete type from tendermint (if its an interface). -func httpParamsToArgs(rpcFunc *RPCFunc, r *http.Request) ([]reflect.Value, error) { - // skip types.Context - const argsOffset = 1 - - values := make([]reflect.Value, len(rpcFunc.argNames)) - - for i, name := range rpcFunc.argNames { - argType := rpcFunc.args[i+argsOffset] - - values[i] = reflect.Zero(argType) // set default for that type - - arg := getParam(r, name) - // log.Notice("param to arg", "argType", argType, "name", name, "arg", arg) - - if arg == "" { - continue - } - - v, ok, err := nonJSONStringToArg(argType, arg) - if err != nil { - return nil, err - } - if ok { - values[i] = v - continue - } - - values[i], err = jsonStringToArg(argType, arg) - if err != nil { - return nil, err - } - } - - return values, nil -} - -func jsonStringToArg(rt reflect.Type, arg string) (reflect.Value, error) { - rv := reflect.New(rt) - err := tmjson.Unmarshal([]byte(arg), rv.Interface()) - if err != nil { - return rv, err +func parseURLParams(args []argInfo, req *http.Request) ([]byte, error) { + if err := req.ParseForm(); err != nil { + return nil, fmt.Errorf("invalid HTTP request: %w", err) } - rv = rv.Elem() - return rv, nil -} - -func nonJSONStringToArg(rt reflect.Type, arg string) (reflect.Value, bool, error) { - if rt.Kind() == reflect.Ptr { - rv1, ok, err := nonJSONStringToArg(rt.Elem(), arg) - switch { - case err != nil: - return reflect.Value{}, false, err - case ok: - rv := reflect.New(rt.Elem()) - rv.Elem().Set(rv1) - return rv, true, nil - default: - return reflect.Value{}, false, nil + getArg := func(name string) (string, bool) { + if req.Form.Has(name) { + return req.Form.Get(name), true } - } else { - return _nonJSONStringToArg(rt, arg) - } -} - -// NOTE: rt.Kind() isn't a pointer. -func _nonJSONStringToArg(rt reflect.Type, arg string) (reflect.Value, bool, error) { - isIntString := reInt.Match([]byte(arg)) - isQuotedString := strings.HasPrefix(arg, `"`) && strings.HasSuffix(arg, `"`) - isHexString := strings.HasPrefix(strings.ToLower(arg), "0x") - - var expectingString, expectingByteSlice, expectingInt bool - switch rt.Kind() { - case reflect.Int, - reflect.Uint, - reflect.Int8, - reflect.Uint8, - reflect.Int16, - reflect.Uint16, - reflect.Int32, - reflect.Uint32, - reflect.Int64, - reflect.Uint64: - expectingInt = true - case reflect.String: - expectingString = true - case reflect.Slice: - expectingByteSlice = rt.Elem().Kind() == reflect.Uint8 + return "", false } - if isIntString && expectingInt { - qarg := `"` + arg + `"` - rv, err := jsonStringToArg(rt, qarg) - if err != nil { - return rv, false, err - } - - return rv, true, nil - } - - if isHexString { - if !expectingString && !expectingByteSlice { - err := fmt.Errorf("got a hex string arg, but expected '%s'", - rt.Kind().String()) - return reflect.ValueOf(nil), false, err - } - - var value []byte - value, err := hex.DecodeString(arg[2:]) - if err != nil { - return reflect.ValueOf(nil), false, err - } - if rt.Kind() == reflect.String { - return reflect.ValueOf(string(value)), true, nil + params := make(map[string]interface{}) + for _, arg := range args { + v, ok := getArg(arg.name) + if !ok { + continue } - return reflect.ValueOf(value), true, nil - } - - if isQuotedString && expectingByteSlice { - v := reflect.New(reflect.TypeOf("")) - err := tmjson.Unmarshal([]byte(arg), v.Interface()) - if err != nil { - return reflect.ValueOf(nil), false, err + if z, err := decodeInteger(v); err == nil { + params[arg.name] = z + } else if b, err := strconv.ParseBool(v); err == nil { + params[arg.name] = b + } else if lc := strings.ToLower(v); strings.HasPrefix(lc, "0x") { + dec, err := hex.DecodeString(lc[2:]) + if err != nil { + return nil, fmt.Errorf("invalid hex string: %w", err) + } else if len(dec) == 0 { + return nil, errors.New("invalid empty hex string") + } + if arg.isBinary { + params[arg.name] = dec + } else { + params[arg.name] = string(dec) + } + } else if isQuotedString(v) { + var dec string + if err := json.Unmarshal([]byte(v), &dec); err != nil { + return nil, fmt.Errorf("invalid quoted string: %w", err) + } + if arg.isBinary { + params[arg.name] = []byte(dec) + } else { + params[arg.name] = dec + } + } else { + params[arg.name] = v } - v = v.Elem() - return reflect.ValueOf([]byte(v.String())), true, nil } + return json.Marshal(params) +} - return reflect.ValueOf(nil), false, nil +// isQuotedString reports whether s is enclosed in double quotes. +func isQuotedString(s string) bool { + return len(s) >= 2 && strings.HasPrefix(s, `"`) && strings.HasSuffix(s, `"`) } -func getParam(r *http.Request, param string) string { - s := r.URL.Query().Get(param) - if s == "" { - s = r.FormValue(param) +// decodeInteger decodes s into an int64. If s is "double quoted" the quotes +// are removed; otherwise s must be a base-10 digit string. +func decodeInteger(s string) (int64, error) { + if isQuotedString(s) { + s = s[1 : len(s)-1] } - return s + return strconv.ParseInt(s, 10, 64) } diff --git a/rpc/jsonrpc/server/parse_test.go b/rpc/jsonrpc/server/parse_test.go index 92ea6f2c0f..4a0e92ad13 100644 --- a/rpc/jsonrpc/server/parse_test.go +++ b/rpc/jsonrpc/server/parse_test.go @@ -1,8 +1,8 @@ package server import ( + "context" "encoding/json" - "fmt" "net/http" "strconv" "testing" @@ -10,7 +10,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/tendermint/tendermint/libs/bytes" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func TestParseJSONMap(t *testing.T) { @@ -19,7 +18,7 @@ func TestParseJSONMap(t *testing.T) { // naive is float,string var p1 map[string]interface{} err := json.Unmarshal(input, &p1) - if assert.Nil(t, err) { + if assert.NoError(t, err) { h, ok := p1["height"].(float64) if assert.True(t, ok, "%#v", p1["height"]) { assert.EqualValues(t, 22, h) @@ -37,7 +36,7 @@ func TestParseJSONMap(t *testing.T) { "height": &tmp, } err = json.Unmarshal(input, &p2) - if assert.Nil(t, err) { + if assert.NoError(t, err) { h, ok := p2["height"].(float64) if assert.True(t, ok, "%#v", p2["height"]) { assert.EqualValues(t, 22, h) @@ -59,7 +58,7 @@ func TestParseJSONMap(t *testing.T) { Value: &bytes.HexBytes{}, } err = json.Unmarshal(input, &p3) - if assert.Nil(t, err) { + if assert.NoError(t, err) { h, ok := p3.Height.(*int) if assert.True(t, ok, "%#v", p3.Height) { assert.Equal(t, 22, *h) @@ -76,7 +75,7 @@ func TestParseJSONMap(t *testing.T) { Height int `json:"height"` }{} err = json.Unmarshal(input, &p4) - if assert.Nil(t, err) { + if assert.NoError(t, err) { assert.EqualValues(t, 22, p4.Height) assert.EqualValues(t, []byte{0x12, 0x34}, p4.Value) } @@ -85,16 +84,16 @@ func TestParseJSONMap(t *testing.T) { // dynamic keys on map, and we can deserialize to the desired types var p5 map[string]*json.RawMessage err = json.Unmarshal(input, &p5) - if assert.Nil(t, err) { + if assert.NoError(t, err) { var h int err = json.Unmarshal(*p5["height"], &h) - if assert.Nil(t, err) { + if assert.NoError(t, err) { assert.Equal(t, 22, h) } var v bytes.HexBytes err = json.Unmarshal(*p5["value"], &v) - if assert.Nil(t, err) { + if assert.NoError(t, err) { assert.Equal(t, bytes.HexBytes{0x12, 0x34}, v) } } @@ -106,7 +105,7 @@ func TestParseJSONArray(t *testing.T) { // naive is float,string var p1 []interface{} err := json.Unmarshal(input, &p1) - if assert.Nil(t, err) { + if assert.NoError(t, err) { v, ok := p1[0].(string) if assert.True(t, ok, "%#v", p1[0]) { assert.EqualValues(t, "1234", v) @@ -121,7 +120,7 @@ func TestParseJSONArray(t *testing.T) { tmp := 0 p2 := []interface{}{&bytes.HexBytes{}, &tmp} err = json.Unmarshal(input, &p2) - if assert.Nil(t, err) { + if assert.NoError(t, err) { v, ok := p2[0].(*bytes.HexBytes) if assert.True(t, ok, "%#v", p2[0]) { assert.EqualValues(t, []byte{0x12, 0x34}, *v) @@ -134,8 +133,12 @@ func TestParseJSONArray(t *testing.T) { } func TestParseJSONRPC(t *testing.T) { - demo := func(ctx *rpctypes.Context, height int, name string) {} - call := NewRPCFunc(demo, "height,name", false) + type demoArgs struct { + Height int `json:"height,string"` + Name string `json:"name"` + } + demo := func(ctx context.Context, _ *demoArgs) error { return nil } + rfunc := NewRPCFunc(demo) cases := []struct { raw string @@ -153,17 +156,19 @@ func TestParseJSONRPC(t *testing.T) { {`[7,"flew",100]`, 0, "", true}, {`{"name": -12, "height": "fred"}`, 0, "", true}, } + ctx := context.Background() for idx, tc := range cases { i := strconv.Itoa(idx) - data := []byte(tc.raw) - vals, err := jsonParamsToArgs(call, data) + vals, err := rfunc.parseParams(ctx, []byte(tc.raw)) if tc.fail { - assert.NotNil(t, err, i) + assert.Error(t, err, i) } else { - assert.Nil(t, err, "%s: %+v", i, err) - if assert.Equal(t, 2, len(vals), i) { - assert.Equal(t, tc.height, vals[0].Int(), i) - assert.Equal(t, tc.name, vals[1].String(), i) + assert.NoError(t, err, "%s: %+v", i, err) + assert.Equal(t, 2, len(vals), i) + p, ok := vals[1].Interface().(*demoArgs) + if assert.True(t, ok) { + assert.Equal(t, tc.height, int64(p.Height), i) + assert.Equal(t, tc.name, p.Name, i) } } @@ -171,43 +176,153 @@ func TestParseJSONRPC(t *testing.T) { } func TestParseURI(t *testing.T) { - demo := func(ctx *rpctypes.Context, height int, name string) {} - call := NewRPCFunc(demo, "height,name", false) + // URI parameter parsing happens in two phases: + // + // Phase 1 swizzles the query parameters into JSON. The result of this + // phase must be valid JSON, but may fail the second stage. + // + // Phase 2 decodes the JSON to obtain the actual arguments. A failure at + // this stage means the JSON is not compatible with the target. - cases := []struct { - raw []string - height int64 - name string - fail bool - }{ - // can parse numbers unquoted and strings quoted - {[]string{"7", `"flew"`}, 7, "flew", false}, - {[]string{"22", `"john"`}, 22, "john", false}, - {[]string{"-10", `"bob"`}, -10, "bob", false}, - // can parse numbers quoted, too - {[]string{`"7"`, `"flew"`}, 7, "flew", false}, - {[]string{`"-10"`, `"bob"`}, -10, "bob", false}, - // cant parse strings uquoted - {[]string{`"-10"`, `bob`}, -10, "bob", true}, - } - for idx, tc := range cases { - i := strconv.Itoa(idx) - // data := []byte(tc.raw) - url := fmt.Sprintf( - "test.com/method?height=%v&name=%v", - tc.raw[0], tc.raw[1]) - req, err := http.NewRequest("GET", url, nil) - assert.NoError(t, err) - vals, err := httpParamsToArgs(call, req) - if tc.fail { - assert.NotNil(t, err, i) - } else { - assert.Nil(t, err, "%s: %+v", i, err) - if assert.Equal(t, 2, len(vals), i) { - assert.Equal(t, tc.height, vals[0].Int(), i) - assert.Equal(t, tc.name, vals[1].String(), i) - } + t.Run("Swizzle", func(t *testing.T) { + tests := []struct { + name string + url string + args []argInfo + want string + fail bool + }{ + { + name: "quoted numbers and strings", + url: `http://localhost?num="7"&str="flew"&neg="-10"`, + args: []argInfo{{name: "neg"}, {name: "num"}, {name: "str"}, {name: "other"}}, + want: `{"neg":-10,"num":7,"str":"flew"}`, + }, + { + name: "unquoted numbers and strings", + url: `http://localhost?num1=7&str1=cabbage&num2=-199&str2=hey+you`, + args: []argInfo{{name: "num1"}, {name: "num2"}, {name: "str1"}, {name: "str2"}, {name: "other"}}, + want: `{"num1":7,"num2":-199,"str1":"cabbage","str2":"hey you"}`, + }, + { + name: "quoted byte strings", + url: `http://localhost?left="Fahrvergnügen"&right="Applesauce"`, + args: []argInfo{{name: "left", isBinary: true}, {name: "right", isBinary: false}}, + want: `{"left":"RmFocnZlcmduw7xnZW4=","right":"Applesauce"}`, + }, + { + name: "hexadecimal byte strings", + url: `http://localhost?lower=0x626f62&upper=0X646F7567`, + args: []argInfo{{name: "upper", isBinary: true}, {name: "lower", isBinary: false}, {name: "other"}}, + want: `{"lower":"bob","upper":"ZG91Zw=="}`, + }, + { + name: "invalid hex odd length", + url: `http://localhost?bad=0xa`, + args: []argInfo{{name: "bad"}, {name: "superbad"}}, + fail: true, + }, + { + name: "invalid hex empty", + url: `http://localhost?bad=0x`, + args: []argInfo{{name: "bad"}}, + fail: true, + }, + { + name: "invalid quoted string", + url: `http://localhost?bad="double""`, + args: []argInfo{{name: "bad"}}, + fail: true, + }, } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + hreq, err := http.NewRequest("GET", test.url, nil) + if err != nil { + t.Fatalf("NewRequest for %q: %v", test.url, err) + } - } + bits, err := parseURLParams(test.args, hreq) + if err != nil && !test.fail { + t.Fatalf("Parse %q: unexpected error: %v", test.url, err) + } else if err == nil && test.fail { + t.Fatalf("Parse %q: got %#q, wanted error", test.url, string(bits)) + } + if got := string(bits); got != test.want { + t.Errorf("Parse %q: got %#q, want %#q", test.url, got, test.want) + } + }) + } + }) + + t.Run("Decode", func(t *testing.T) { + type argValue struct { + Height json.Number `json:"height"` + Name string `json:"name"` + Flag bool `json:"flag"` + } + + echo := NewRPCFunc(func(_ context.Context, arg *argValue) (*argValue, error) { + return arg, nil + }) + + tests := []struct { + name string + url string + fail string + want interface{} + }{ + { + name: "valid all args", + url: `http://localhost?height=235&flag=true&name="bogart"`, + want: &argValue{ + Height: "235", + Flag: true, + Name: "bogart", + }, + }, + { + name: "valid partial args", + url: `http://localhost?height="1987"&name=free+willy`, + want: &argValue{ + Height: "1987", + Name: "free willy", + }, + }, + { + name: "invalid quoted number", + url: `http://localhost?height="-xx"`, + fail: "invalid number literal", + }, + { + name: "invalid unquoted number", + url: `http://localhost?height=25*q`, + fail: "invalid number literal", + }, + { + name: "invalid boolean", + url: `http://localhost?flag="garbage"`, + fail: "flag of type bool", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + hreq, err := http.NewRequest("GET", test.url, nil) + if err != nil { + t.Fatalf("NewRequest for %q: %v", test.url, err) + } + bits, err := parseURLParams(echo.args, hreq) + if err != nil { + t.Fatalf("Parse %#q: unexpected error: %v", test.url, err) + } + rsp, err := echo.Call(context.Background(), bits) + if test.want != nil { + assert.Equal(t, test.want, rsp) + } + if test.fail != "" { + assert.ErrorContains(t, err, test.fail) + } + }) + } + }) } diff --git a/rpc/jsonrpc/server/rpc_func.go b/rpc/jsonrpc/server/rpc_func.go index 24f3c89761..8eba287283 100644 --- a/rpc/jsonrpc/server/rpc_func.go +++ b/rpc/jsonrpc/server/rpc_func.go @@ -1,100 +1,247 @@ package server import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" "net/http" "reflect" "strings" "github.com/tendermint/tendermint/libs/log" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) -// RegisterRPCFuncs adds a route for each function in the funcMap, as well as -// general jsonrpc and websocket handlers for all functions. "result" is the -// interface on which the result objects are registered, and is popualted with -// every RPCResponse +// RegisterRPCFuncs adds a route to mux for each non-websocket function in the +// funcMap, and also a root JSON-RPC POST handler. func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, logger log.Logger) { - // HTTP endpoints - for funcName, rpcFunc := range funcMap { - mux.HandleFunc("/"+funcName, makeHTTPHandler(rpcFunc, logger)) + for name, fn := range funcMap { + if fn.ws { + continue // skip websocket endpoints, not usable via GET calls + } + mux.HandleFunc("/"+name, makeHTTPHandler(fn, logger)) } - // JSONRPC endpoints + // Endpoints for POST. mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, logger))) } // Function introspection -// RPCFunc contains the introspected type information for a function +// RPCFunc contains the introspected type information for a function. type RPCFunc struct { - f reflect.Value // underlying rpc function - args []reflect.Type // type of each function arg - returns []reflect.Type // type of each return arg - argNames []string // name of each argument - ws bool // websocket only - cache bool // allow the RPC response can be cached by the proxy cache server + f reflect.Value // underlying rpc function + param reflect.Type // the parameter struct, or nil + result reflect.Type // the non-error result type, or nil + args []argInfo // names and type information (for URL decoding) + ws bool // websocket only } -// NewRPCFunc wraps a function for introspection. -// f is the function, args are comma separated argument names -// cache is a bool value to allow the client proxy server to cache the RPC results -func NewRPCFunc(f interface{}, args string, cache bool) *RPCFunc { - return newRPCFunc(f, args, false, cache) +// argInfo records the name of a field, along with a bit to tell whether the +// value of the field requires binary data, having underlying type []byte. The +// flag is needed when decoding URL parameters, where we permit quoted strings +// to be passed for either argument type. +type argInfo struct { + name string + isBinary bool // value wants binary data } -// NewWSRPCFunc wraps a function for introspection and use in the websockets. -func NewWSRPCFunc(f interface{}, args string) *RPCFunc { - return newRPCFunc(f, args, true, false) +// Call parses the given JSON parameters and calls the function wrapped by rf +// with the resulting argument value. It reports an error if parameter parsing +// fails, otherwise it returns the result from the wrapped function. +func (rf *RPCFunc) Call(ctx context.Context, params json.RawMessage) (interface{}, error) { + args, err := rf.parseParams(ctx, params) + if err != nil { + return nil, err + } + returns := rf.f.Call(args) + + // Case 1: There is no non-error result type. + if rf.result == nil { + if oerr := returns[0].Interface(); oerr != nil { + return nil, oerr.(error) + } + return nil, nil + } + + // Case 2: There is a non-error result. + if oerr := returns[1].Interface(); oerr != nil { + // In case of error, report the error and ignore the result. + return nil, oerr.(error) + } + return returns[0].Interface(), nil } -func newRPCFunc(f interface{}, args string, ws bool, c bool) *RPCFunc { - var argNames []string - if args != "" { - argNames = strings.Split(args, ",") +// parseParams parses the parameters of a JSON-RPC request and returns the +// corresponding argument values. On success, the first argument value will be +// the value of ctx. +func (rf *RPCFunc) parseParams(ctx context.Context, params json.RawMessage) ([]reflect.Value, error) { + // If rf does not accept parameters, there is no decoding to do, but verify + // that no parameters were passed. + if rf.param == nil { + if !isNullOrEmpty(params) { + return nil, invalidParamsError("no parameters accepted for this method") + } + return []reflect.Value{reflect.ValueOf(ctx)}, nil } - return &RPCFunc{ - f: reflect.ValueOf(f), - args: funcArgTypes(f), - returns: funcReturnTypes(f), - argNames: argNames, - ws: ws, - cache: c, + bits, err := rf.adjustParams(params) + if err != nil { + return nil, invalidParamsError(err.Error()) + } + arg := reflect.New(rf.param) + if err := json.Unmarshal(bits, arg.Interface()); err != nil { + return nil, invalidParamsError(err.Error()) } + return []reflect.Value{reflect.ValueOf(ctx), arg}, nil } -// return a function's argument types -func funcArgTypes(f interface{}) []reflect.Type { - t := reflect.TypeOf(f) - n := t.NumIn() - typez := make([]reflect.Type, n) - for i := 0; i < n; i++ { - typez[i] = t.In(i) +// adjustParams checks whether data is encoded as a JSON array, and if so +// adjusts the values to match the corresponding parameter names. +func (rf *RPCFunc) adjustParams(data []byte) (json.RawMessage, error) { + base := bytes.TrimSpace(data) + if bytes.HasPrefix(base, []byte("[")) { + var args []json.RawMessage + if err := json.Unmarshal(base, &args); err != nil { + return nil, err + } else if len(args) != len(rf.args) { + return nil, fmt.Errorf("got %d arguments, want %d", len(args), len(rf.args)) + } + m := make(map[string]json.RawMessage) + for i, arg := range args { + m[rf.args[i].name] = arg + } + return json.Marshal(m) + } else if bytes.HasPrefix(base, []byte("{")) || bytes.Equal(base, []byte("null")) { + return base, nil } - return typez + return nil, errors.New("parameters must be an object or an array") + } -// return a function's return types -func funcReturnTypes(f interface{}) []reflect.Type { - t := reflect.TypeOf(f) - n := t.NumOut() - typez := make([]reflect.Type, n) - for i := 0; i < n; i++ { - typez[i] = t.Out(i) +// NewRPCFunc constructs an RPCFunc for f, which must be a function whose type +// signature matches one of these schemes: +// +// func(context.Context) error +// func(context.Context) (R, error) +// func(context.Context, *T) error +// func(context.Context, *T) (R, error) +// +// for an arbitrary struct type T and type R. NewRPCFunc will panic if f does +// not have one of these forms. +func NewRPCFunc(f interface{}) *RPCFunc { + rf, err := newRPCFunc(f) + if err != nil { + panic("invalid RPC function: " + err.Error()) } - return typez + return rf } -//------------------------------------------------------------- +// NewWSRPCFunc behaves as NewRPCFunc, but marks the resulting function for use +// via websocket. +func NewWSRPCFunc(f interface{}) *RPCFunc { + rf := NewRPCFunc(f) + rf.ws = true + return rf +} -// NOTE: assume returns is result struct and error. If error is not nil, return it -func unreflectResult(returns []reflect.Value) (interface{}, error) { - errV := returns[1] - if err, ok := errV.Interface().(error); ok && err != nil { - return nil, err +var ( + ctxType = reflect.TypeOf((*context.Context)(nil)).Elem() + errType = reflect.TypeOf((*error)(nil)).Elem() +) + +// newRPCFunc constructs an RPCFunc for f. See the comment at NewRPCFunc. +func newRPCFunc(f interface{}) (*RPCFunc, error) { + if f == nil { + return nil, errors.New("nil function") + } + + // Check the type and signature of f. + fv := reflect.ValueOf(f) + if fv.Kind() != reflect.Func { + return nil, errors.New("not a function") } - rv := returns[0] - // the result is a registered interface, - // we need a pointer to it so we can marshal with type byte - rvp := reflect.New(rv.Type()) - rvp.Elem().Set(rv) - return rvp.Interface(), nil + + var ptype reflect.Type + ft := fv.Type() + if np := ft.NumIn(); np == 0 || np > 2 { + return nil, errors.New("wrong number of parameters") + } else if ft.In(0) != ctxType { + return nil, errors.New("first parameter is not context.Context") + } else if np == 2 { + ptype = ft.In(1) + if ptype.Kind() != reflect.Ptr { + return nil, errors.New("parameter type is not a pointer") + } + ptype = ptype.Elem() + if ptype.Kind() != reflect.Struct { + return nil, errors.New("parameter type is not a struct") + } + } + + var rtype reflect.Type + if no := ft.NumOut(); no < 1 || no > 2 { + return nil, errors.New("wrong number of results") + } else if ft.Out(no-1) != errType { + return nil, errors.New("last result is not error") + } else if no == 2 { + rtype = ft.Out(0) + } + + var args []argInfo + if ptype != nil { + for i := 0; i < ptype.NumField(); i++ { + field := ptype.Field(i) + if tag := strings.SplitN(field.Tag.Get("json"), ",", 2)[0]; tag != "" && tag != "-" { + args = append(args, argInfo{ + name: tag, + isBinary: isByteArray(field.Type), + }) + } else if tag == "-" { + // If the tag is "-" the field should explicitly be ignored, even + // if it is otherwise eligible. + } else if field.IsExported() && !field.Anonymous { + // Examples: Name → name, MaxEffort → maxEffort. + // Note that this is an aesthetic choice; the standard decoder will + // match without regard to case anyway. + name := strings.ToLower(field.Name[:1]) + field.Name[1:] + args = append(args, argInfo{ + name: name, + isBinary: isByteArray(field.Type), + }) + } + } + } + + return &RPCFunc{ + f: fv, + param: ptype, + result: rtype, + args: args, + }, nil +} + +// invalidParamsError returns an RPC invalid parameters error with the given +// detail message. +func invalidParamsError(msg string, args ...interface{}) error { + return &rpctypes.RPCError{ + Code: int(rpctypes.CodeInvalidParams), + Message: rpctypes.CodeInvalidParams.String(), + Data: fmt.Sprintf(msg, args...), + } +} + +// isNullOrEmpty reports whether params is either itself empty or represents an +// empty parameter (null, empty object, or empty array). +func isNullOrEmpty(params json.RawMessage) bool { + return len(params) == 0 || + bytes.Equal(params, []byte("null")) || + bytes.Equal(params, []byte("{}")) || + bytes.Equal(params, []byte("[]")) +} + +// isByteArray reports whether t is (equivalent to) []byte. +func isByteArray(t reflect.Type) bool { + return t.Kind() == reflect.Slice && t.Elem().Kind() == reflect.Uint8 } diff --git a/rpc/jsonrpc/server/ws_handler.go b/rpc/jsonrpc/server/ws_handler.go index 2271d03f86..3a259757b9 100644 --- a/rpc/jsonrpc/server/ws_handler.go +++ b/rpc/jsonrpc/server/ws_handler.go @@ -3,18 +3,14 @@ package server import ( "context" "encoding/json" - "errors" "fmt" "net/http" - "reflect" "runtime/debug" "time" "github.com/gorilla/websocket" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/rpc/client" - "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -40,10 +36,7 @@ type WebsocketManager struct { // NewWebsocketManager returns a new WebsocketManager that passes a map of // functions, connection options and logger to new WS connections. -func NewWebsocketManager( - funcMap map[string]*RPCFunc, - wsConnOptions ...func(*wsConnection), -) *WebsocketManager { +func NewWebsocketManager(logger log.Logger, funcMap map[string]*RPCFunc, wsConnOptions ...func(*wsConnection)) *WebsocketManager { return &WebsocketManager{ funcMap: funcMap, Upgrader: websocket.Upgrader{ @@ -60,22 +53,18 @@ func NewWebsocketManager( return true }, }, - logger: log.NewNopLogger(), + logger: logger, wsConnOptions: wsConnOptions, } } -// SetLogger sets the logger. -func (wm *WebsocketManager) SetLogger(l log.Logger) { - wm.logger = l -} - // WebsocketHandler upgrades the request/response (via http.Hijack) and starts // the wsConnection. func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Request) { wsConn, err := wm.Upgrade(w, r, nil) if err != nil { - // TODO - return http error + // The upgrader has already reported an HTTP error to the client, so we + // need only log it. wm.logger.Error("Failed to upgrade connection", "err", err) return } @@ -87,14 +76,17 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ // register connection logger := wm.logger.With("remote", wsConn.RemoteAddr()) - con := newWSConnection(wsConn, wm.funcMap, logger, wm.wsConnOptions...) - wm.logger.Info("New websocket connection", "remote", con.remoteAddr) - err = con.Start() // BLOCKING - if err != nil { + conn := newWSConnection(wsConn, wm.funcMap, logger, wm.wsConnOptions...) + wm.logger.Info("New websocket connection", "remote", conn.remoteAddr) + + // starting the conn is blocking + if err = conn.Start(r.Context()); err != nil { wm.logger.Error("Failed to start connection", "err", err) + writeInternalError(w, err) return } - if err := con.Stop(); err != nil { + + if err := conn.Stop(); err != nil { wm.logger.Error("error while stopping connection", "error", err) } } @@ -106,7 +98,7 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ // // In case of an error, the connection is stopped. type wsConnection struct { - *client.RunState + Logger log.Logger remoteAddr string baseConn *websocket.Conn @@ -119,12 +111,6 @@ type wsConnection struct { funcMap map[string]*RPCFunc - // write channel capacity - writeChanCapacity int - - // each write times out after this. - writeWait time.Duration - // Connection times out if we haven't received *anything* in this long, not even pings. readWait time.Duration @@ -147,22 +133,15 @@ type wsConnection struct { // description of how to configure ping period and pong wait time. NOTE: if the // write buffer is full, pongs may be dropped, which may cause clients to // disconnect. see https://github.com/gorilla/websocket/issues/97 -func newWSConnection( - baseConn *websocket.Conn, - funcMap map[string]*RPCFunc, - logger log.Logger, - options ...func(*wsConnection), -) *wsConnection { +func newWSConnection(baseConn *websocket.Conn, funcMap map[string]*RPCFunc, logger log.Logger, options ...func(*wsConnection)) *wsConnection { wsc := &wsConnection{ - RunState: client.NewRunState("wsConnection", logger), - remoteAddr: baseConn.RemoteAddr().String(), - baseConn: baseConn, - funcMap: funcMap, - writeWait: defaultWSWriteWait, - writeChanCapacity: defaultWSWriteChanCapacity, - readWait: defaultWSReadWait, - pingPeriod: defaultWSPingPeriod, - readRoutineQuit: make(chan struct{}), + Logger: logger, + remoteAddr: baseConn.RemoteAddr().String(), + baseConn: baseConn, + funcMap: funcMap, + readWait: defaultWSReadWait, + pingPeriod: defaultWSPingPeriod, + readRoutineQuit: make(chan struct{}), } for _, option := range options { option(wsc) @@ -179,22 +158,6 @@ func OnDisconnect(onDisconnect func(remoteAddr string)) func(*wsConnection) { } } -// WriteWait sets the amount of time to wait before a websocket write times out. -// It should only be used in the constructor - not Goroutine-safe. -func WriteWait(writeWait time.Duration) func(*wsConnection) { - return func(wsc *wsConnection) { - wsc.writeWait = writeWait - } -} - -// WriteChanCapacity sets the capacity of the websocket write channel. -// It should only be used in the constructor - not Goroutine-safe. -func WriteChanCapacity(cap int) func(*wsConnection) { - return func(wsc *wsConnection) { - wsc.writeChanCapacity = cap - } -} - // ReadWait sets the amount of time to wait before a websocket read times out. // It should only be used in the constructor - not Goroutine-safe. func ReadWait(readWait time.Duration) func(*wsConnection) { @@ -220,25 +183,19 @@ func ReadLimit(readLimit int64) func(*wsConnection) { } // Start starts the client service routines and blocks until there is an error. -func (wsc *wsConnection) Start() error { - if err := wsc.RunState.Start(); err != nil { - return err - } - wsc.writeChan = make(chan rpctypes.RPCResponse, wsc.writeChanCapacity) +func (wsc *wsConnection) Start(ctx context.Context) error { + wsc.writeChan = make(chan rpctypes.RPCResponse, defaultWSWriteChanCapacity) // Read subscriptions/unsubscriptions to events - go wsc.readRoutine() + go wsc.readRoutine(ctx) // Write responses, BLOCKING. - wsc.writeRoutine() + wsc.writeRoutine(ctx) return nil } // Stop unsubscribes the remote from all subscriptions. func (wsc *wsConnection) Stop() error { - if err := wsc.RunState.Stop(); err != nil { - return err - } if wsc.onDisconnect != nil { wsc.onDisconnect(wsc.remoteAddr) } @@ -259,8 +216,6 @@ func (wsc *wsConnection) GetRemoteAddr() string { // It implements WSRPCConnection. It is Goroutine-safe. func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp rpctypes.RPCResponse) error { select { - case <-wsc.Quit(): - return errors.New("connection was stopped") case <-ctx.Done(): return ctx.Err() case wsc.writeChan <- resp: @@ -271,9 +226,9 @@ func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp rpctypes.RPC // TryWriteRPCResponse attempts to push a response to the writeChan, but does // not block. // It implements WSRPCConnection. It is Goroutine-safe -func (wsc *wsConnection) TryWriteRPCResponse(resp rpctypes.RPCResponse) bool { +func (wsc *wsConnection) TryWriteRPCResponse(ctx context.Context, resp rpctypes.RPCResponse) bool { select { - case <-wsc.Quit(): + case <-ctx.Done(): return false case wsc.writeChan <- resp: return true @@ -293,7 +248,7 @@ func (wsc *wsConnection) Context() context.Context { } // Read from the socket and subscribe to or unsubscribe from events -func (wsc *wsConnection) readRoutine() { +func (wsc *wsConnection) readRoutine(ctx context.Context) { // readRoutine will block until response is written or WS connection is closed writeCtx := context.Background() @@ -303,11 +258,13 @@ func (wsc *wsConnection) readRoutine() { if !ok { err = fmt.Errorf("WSJSONRPC: %v", r) } + req := rpctypes.NewRequest(uriReqID) wsc.Logger.Error("Panic in WSJSONRPC handler", "err", err, "stack", string(debug.Stack())) - if err := wsc.WriteRPCResponse(writeCtx, rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), err)); err != nil { - wsc.Logger.Error("Error writing RPC response", "err", err) + if err := wsc.WriteRPCResponse(writeCtx, + req.MakeErrorf(rpctypes.CodeInternalError, "Panic in handler: %v", err)); err != nil { + wsc.Logger.Error("error writing RPC response", "err", err) } - go wsc.readRoutine() + go wsc.readRoutine(ctx) } }() @@ -317,7 +274,7 @@ func (wsc *wsConnection) readRoutine() { for { select { - case <-wsc.Quit(): + case <-ctx.Done(): return default: // reset deadline for every type of message (control or data) @@ -333,7 +290,7 @@ func (wsc *wsConnection) readRoutine() { wsc.Logger.Error("Failed to read request", "err", err) } if err := wsc.Stop(); err != nil { - wsc.Logger.Error("Error closing websocket connection", "err", err) + wsc.Logger.Error("error closing websocket connection", "err", err) } close(wsc.readRoutineQuit) return @@ -344,15 +301,15 @@ func (wsc *wsConnection) readRoutine() { err = dec.Decode(&request) if err != nil { if err := wsc.WriteRPCResponse(writeCtx, - rpctypes.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err))); err != nil { - wsc.Logger.Error("Error writing RPC response", "err", err) + request.MakeErrorf(rpctypes.CodeParseError, "unmarshaling request: %v", err)); err != nil { + wsc.Logger.Error("error writing RPC response", "err", err) } continue } // A Notification is a Request object without an "id" member. // The Server MUST NOT reply to a Notification, including those that are within a batch request. - if request.ID == nil { + if request.IsNotification() { wsc.Logger.Debug( "WSJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)", "req", request, @@ -363,66 +320,33 @@ func (wsc *wsConnection) readRoutine() { // Now, fetch the RPCFunc and execute it. rpcFunc := wsc.funcMap[request.Method] if rpcFunc == nil { - if err := wsc.WriteRPCResponse(writeCtx, rpctypes.RPCMethodNotFoundError(request.ID)); err != nil { - wsc.Logger.Error("Error writing RPC response", "err", err) + if err := wsc.WriteRPCResponse(writeCtx, + request.MakeErrorf(rpctypes.CodeMethodNotFound, request.Method)); err != nil { + wsc.Logger.Error("error writing RPC response", "err", err) } continue } - ctx := &rpctypes.Context{JSONReq: &request, WSConn: wsc} - args := []reflect.Value{reflect.ValueOf(ctx)} - if len(request.Params) > 0 { - fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) - if err != nil { - if err := wsc.WriteRPCResponse(writeCtx, - rpctypes.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), - ); err != nil { - wsc.Logger.Error("Error writing RPC response", "err", err) - } - continue - } - args = append(args, fnArgs...) - } - - returns := rpcFunc.f.Call(args) - - // TODO: Need to encode args/returns to string if we want to log them - wsc.Logger.Info("WSJSONRPC", "method", request.Method) - + fctx := rpctypes.WithCallInfo(wsc.Context(), &rpctypes.CallInfo{ + RPCRequest: &request, + WSConn: wsc, + }) var resp rpctypes.RPCResponse - result, err := unreflectResult(returns) - switch e := err.(type) { - // if no error then return a success response - case nil: - resp = rpctypes.NewRPCSuccessResponse(request.ID, result) - - // if this already of type RPC error then forward that error - case *rpctypes.RPCError: - resp = rpctypes.NewRPCErrorResponse(request.ID, e.Code, e.Message, e.Data) - - default: // we need to unwrap the error and parse it accordingly - switch errors.Unwrap(err) { - // check if the error was due to an invald request - case coretypes.ErrZeroOrNegativeHeight, coretypes.ErrZeroOrNegativePerPage, - coretypes.ErrPageOutOfRange, coretypes.ErrInvalidRequest: - resp = rpctypes.RPCInvalidRequestError(request.ID, err) - - // lastly default all remaining errors as internal errors - default: // includes ctypes.ErrHeightNotAvailable and ctypes.ErrHeightExceedsChainHead - resp = rpctypes.RPCInternalError(request.ID, err) - } + result, err := rpcFunc.Call(fctx, request.Params) + if err == nil { + resp = request.MakeResponse(result) + } else { + resp = request.MakeError(err) } - if err := wsc.WriteRPCResponse(writeCtx, resp); err != nil { - wsc.Logger.Error("Error writing RPC response", "err", err) + wsc.Logger.Error("error writing RPC response", "err", err) } - } } } // receives on a write channel and writes out on the socket -func (wsc *wsConnection) writeRoutine() { +func (wsc *wsConnection) writeRoutine(ctx context.Context) { pingTicker := time.NewTicker(wsc.pingPeriod) defer pingTicker.Stop() @@ -438,7 +362,7 @@ func (wsc *wsConnection) writeRoutine() { for { select { - case <-wsc.Quit(): + case <-ctx.Done(): return case <-wsc.readRoutineQuit: // error in readRoutine return @@ -454,13 +378,13 @@ func (wsc *wsConnection) writeRoutine() { return } case msg := <-wsc.writeChan: - jsonBytes, err := json.MarshalIndent(msg, "", " ") + data, err := json.Marshal(msg) if err != nil { - wsc.Logger.Error("Failed to marshal RPCResponse to JSON", "err", err) + wsc.Logger.Error("Failed to marshal RPCResponse to JSON", "msg", msg, "err", err) continue } - if err = wsc.writeMessageWithDeadline(websocket.TextMessage, jsonBytes); err != nil { - wsc.Logger.Error("Failed to write response", "err", err, "msg", msg) + if err = wsc.writeMessageWithDeadline(websocket.TextMessage, data); err != nil { + wsc.Logger.Error("Failed to write response", "msg", msg, "err", err) return } } @@ -471,7 +395,7 @@ func (wsc *wsConnection) writeRoutine() { // If some writes don't set it while others do, they may timeout incorrectly // (https://github.com/tendermint/tendermint/issues/553) func (wsc *wsConnection) writeMessageWithDeadline(msgType int, msg []byte) error { - if err := wsc.baseConn.SetWriteDeadline(time.Now().Add(wsc.writeWait)); err != nil { + if err := wsc.baseConn.SetWriteDeadline(time.Now().Add(defaultWSWriteWait)); err != nil { return err } return wsc.baseConn.WriteMessage(msgType, msg) diff --git a/rpc/jsonrpc/server/ws_handler_test.go b/rpc/jsonrpc/server/ws_handler_test.go index b691172a40..ce1bcd9737 100644 --- a/rpc/jsonrpc/server/ws_handler_test.go +++ b/rpc/jsonrpc/server/ws_handler_test.go @@ -1,10 +1,13 @@ package server import ( + "context" + "encoding/json" "net/http" "net/http/httptest" "testing" + "github.com/fortytw2/leaktest" "github.com/gorilla/websocket" "github.com/stretchr/testify/require" @@ -13,9 +16,13 @@ import ( ) func TestWebsocketManagerHandler(t *testing.T) { - s := newWSServer() + logger := log.NewNopLogger() + + s := newWSServer(t, logger) defer s.Close() + t.Cleanup(leaktest.Check(t)) + // check upgrader works d := websocket.Dialer{} c, dialResp, err := d.Dial("ws://"+s.Listener.Addr().String()+"/websocket", nil) @@ -26,14 +33,9 @@ func TestWebsocketManagerHandler(t *testing.T) { } // check basic functionality works - req, err := rpctypes.MapToRequest( - rpctypes.JSONRPCStringID("TestWebsocketManager"), - "c", - map[string]interface{}{"s": "a", "i": 10}, - ) - require.NoError(t, err) - err = c.WriteJSON(req) - require.NoError(t, err) + req := rpctypes.NewRequest(1001) + require.NoError(t, req.SetMethodAndParams("c", map[string]interface{}{"s": "a", "i": 10})) + require.NoError(t, c.WriteJSON(req)) var resp rpctypes.RPCResponse err = c.ReadJSON(&resp) @@ -42,15 +44,22 @@ func TestWebsocketManagerHandler(t *testing.T) { dialResp.Body.Close() } -func newWSServer() *httptest.Server { +func newWSServer(t *testing.T, logger log.Logger) *httptest.Server { + type args struct { + S string `json:"s"` + I json.Number `json:"i"` + } funcMap := map[string]*RPCFunc{ - "c": NewWSRPCFunc(func(ctx *rpctypes.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), + "c": NewWSRPCFunc(func(context.Context, *args) (string, error) { return "foo", nil }), } - wm := NewWebsocketManager(funcMap) - wm.SetLogger(log.TestingLogger()) + wm := NewWebsocketManager(logger, funcMap) mux := http.NewServeMux() mux.HandleFunc("/websocket", wm.WebsocketHandler) - return httptest.NewServer(mux) + srv := httptest.NewServer(mux) + + t.Cleanup(srv.Close) + + return srv } diff --git a/rpc/jsonrpc/test/main.go b/rpc/jsonrpc/test/main.go index d348e1639c..2ed013c177 100644 --- a/rpc/jsonrpc/test/main.go +++ b/rpc/jsonrpc/test/main.go @@ -1,20 +1,23 @@ package main import ( + "context" "fmt" + stdlog "log" "net/http" + "os" + "os/signal" + "syscall" "github.com/tendermint/tendermint/libs/log" - tmos "github.com/tendermint/tendermint/libs/os" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) var routes = map[string]*rpcserver.RPCFunc{ - "hello_world": rpcserver.NewRPCFunc(HelloWorld, "name,num", false), + "hello_world": rpcserver.NewRPCFunc(HelloWorld), } -func HelloWorld(ctx *rpctypes.Context, name string, num int) (Result, error) { +func HelloWorld(ctx context.Context, name string, num int) (Result, error) { return Result{fmt.Sprintf("hi %s %d", name, num)}, nil } @@ -23,22 +26,24 @@ type Result struct { } func main() { - var ( - mux = http.NewServeMux() - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) - ) + mux := http.NewServeMux() - // Stop upon receiving SIGTERM or CTRL-C. - tmos.TrapSignal(logger, func() {}) + logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) + if err != nil { + stdlog.Fatalf("configuring logger: %v", err) + } + + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + defer cancel() rpcserver.RegisterRPCFuncs(mux, routes, logger) config := rpcserver.DefaultConfig() listener, err := rpcserver.Listen("tcp://127.0.0.1:8008", config.MaxOpenConnections) if err != nil { - tmos.Exit(err.Error()) + stdlog.Fatalf("rpc listening: %v", err) } - if err = rpcserver.Serve(listener, mux, logger, config); err != nil { - tmos.Exit(err.Error()) + if err = rpcserver.Serve(ctx, listener, mux, logger, config); err != nil { + logger.Error("rpc serve: %v", err) } } diff --git a/rpc/jsonrpc/types/types.go b/rpc/jsonrpc/types/types.go index 4435c8c5d5..0c0500bf0b 100644 --- a/rpc/jsonrpc/types/types.go +++ b/rpc/jsonrpc/types/types.go @@ -1,137 +1,176 @@ package types import ( + "bytes" "context" "encoding/json" + "errors" "fmt" "net/http" - "reflect" + "regexp" + "strconv" "strings" - tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/rpc/coretypes" ) -// a wrapper to emulate a sum type: jsonrpcid = string | int -// TODO: refactor when Go 2.0 arrives https://github.com/golang/go/issues/19412 -type jsonrpcid interface { - isJSONRPCID() -} +// ErrorCode is the type of JSON-RPC error codes. +type ErrorCode int -// JSONRPCStringID a wrapper for JSON-RPC string IDs -type JSONRPCStringID string - -func (JSONRPCStringID) isJSONRPCID() {} -func (id JSONRPCStringID) String() string { return string(id) } - -// JSONRPCIntID a wrapper for JSON-RPC integer IDs -type JSONRPCIntID int - -func (JSONRPCIntID) isJSONRPCID() {} -func (id JSONRPCIntID) String() string { return fmt.Sprintf("%d", id) } - -func idFromInterface(idInterface interface{}) (jsonrpcid, error) { - switch id := idInterface.(type) { - case string: - return JSONRPCStringID(id), nil - case float64: - // json.Unmarshal uses float64 for all numbers - // (https://golang.org/pkg/encoding/json/#Unmarshal), - // but the JSONRPC2.0 spec says the id SHOULD NOT contain - // decimals - so we truncate the decimals here. - return JSONRPCIntID(int(id)), nil - default: - typ := reflect.TypeOf(id) - return nil, fmt.Errorf("json-rpc ID (%v) is of unknown type (%v)", id, typ) +func (e ErrorCode) String() string { + if s, ok := errorCodeString[e]; ok { + return s } + return fmt.Sprintf("server error: code %d", e) +} + +// Constants defining the standard JSON-RPC error codes. +const ( + CodeParseError ErrorCode = -32700 // Invalid JSON received by the server + CodeInvalidRequest ErrorCode = -32600 // The JSON sent is not a valid request object + CodeMethodNotFound ErrorCode = -32601 // The method does not exist or is unavailable + CodeInvalidParams ErrorCode = -32602 // Invalid method parameters + CodeInternalError ErrorCode = -32603 // Internal JSON-RPC error +) + +var errorCodeString = map[ErrorCode]string{ + CodeParseError: "Parse error", + CodeInvalidRequest: "Invalid request", + CodeMethodNotFound: "Method not found", + CodeInvalidParams: "Invalid params", + CodeInternalError: "Internal error", } //---------------------------------------- // REQUEST type RPCRequest struct { - JSONRPC string `json:"jsonrpc"` - ID jsonrpcid `json:"id,omitempty"` - Method string `json:"method"` - Params json.RawMessage `json:"params"` // must be map[string]interface{} or []interface{} + id json.RawMessage + + Method string + Params json.RawMessage } -// UnmarshalJSON custom JSON unmarshaling due to jsonrpcid being string or int -func (req *RPCRequest) UnmarshalJSON(data []byte) error { - unsafeReq := struct { - JSONRPC string `json:"jsonrpc"` - ID interface{} `json:"id,omitempty"` - Method string `json:"method"` - Params json.RawMessage `json:"params"` // must be map[string]interface{} or []interface{} - }{} - - err := json.Unmarshal(data, &unsafeReq) - if err != nil { - return err - } +// NewRequest returns an empty request with the specified ID. +func NewRequest(id int) RPCRequest { + return RPCRequest{id: []byte(strconv.Itoa(id))} +} - if unsafeReq.ID == nil { // notification - return nil - } +// ID returns a string representation of the request ID. +func (req RPCRequest) ID() string { return string(req.id) } - req.JSONRPC = unsafeReq.JSONRPC - req.Method = unsafeReq.Method - req.Params = unsafeReq.Params - id, err := idFromInterface(unsafeReq.ID) - if err != nil { +// IsNotification reports whether req is a notification (has an empty ID). +func (req RPCRequest) IsNotification() bool { return len(req.id) == 0 } + +type rpcRequestJSON struct { + V string `json:"jsonrpc"` // must be "2.0" + ID json.RawMessage `json:"id,omitempty"` + M string `json:"method"` + P json.RawMessage `json:"params"` +} + +// isNullOrEmpty reports whether data is empty or the JSON "null" value. +func isNullOrEmpty(data json.RawMessage) bool { + return len(data) == 0 || bytes.Equal(data, []byte("null")) +} + +// validID matches the text of a JSON value that is allowed to serve as a +// JSON-RPC request ID. Precondition: Target value is legal JSON. +var validID = regexp.MustCompile(`^(?:".*"|-?\d+)$`) + +// UnmarshalJSON decodes a request from a JSON-RPC 2.0 request object. +func (req *RPCRequest) UnmarshalJSON(data []byte) error { + var wrapper rpcRequestJSON + if err := json.Unmarshal(data, &wrapper); err != nil { return err + } else if wrapper.V != "" && wrapper.V != "2.0" { + return fmt.Errorf("invalid version: %q", wrapper.V) } - req.ID = id + if !isNullOrEmpty(wrapper.ID) { + if !validID.Match(wrapper.ID) { + return fmt.Errorf("invalid request ID: %q", string(wrapper.ID)) + } + req.id = wrapper.ID + } + req.Method = wrapper.M + req.Params = wrapper.P return nil } -func NewRPCRequest(id jsonrpcid, method string, params json.RawMessage) RPCRequest { - return RPCRequest{ - JSONRPC: "2.0", - ID: id, - Method: method, - Params: params, - } +// MarshalJSON marshals a request with the appropriate version tag. +func (req RPCRequest) MarshalJSON() ([]byte, error) { + return json.Marshal(rpcRequestJSON{ + V: "2.0", + ID: req.id, + M: req.Method, + P: req.Params, + }) } func (req RPCRequest) String() string { - return fmt.Sprintf("RPCRequest{%s %s/%X}", req.ID, req.Method, req.Params) + return fmt.Sprintf("RPCRequest{%s %s/%X}", req.ID(), req.Method, req.Params) } -func MapToRequest(id jsonrpcid, method string, params map[string]interface{}) (RPCRequest, error) { - var paramsMap = make(map[string]json.RawMessage, len(params)) - for name, value := range params { - valueJSON, err := tmjson.Marshal(value) - if err != nil { - return RPCRequest{}, err - } - paramsMap[name] = valueJSON - } - - payload, err := json.Marshal(paramsMap) +// MakeResponse constructs a success response to req with the given result. If +// there is an error marshaling result to JSON, it returns an error response. +func (req RPCRequest) MakeResponse(result interface{}) RPCResponse { + data, err := json.Marshal(result) if err != nil { - return RPCRequest{}, err + return req.MakeErrorf(CodeInternalError, "marshaling result: %v", err) } + return RPCResponse{id: req.id, Result: data} +} - return NewRPCRequest(id, method, payload), nil +// MakeErrorf constructs an error response to req with the given code and a +// message constructed by formatting msg with args. +func (req RPCRequest) MakeErrorf(code ErrorCode, msg string, args ...interface{}) RPCResponse { + return RPCResponse{ + id: req.id, + Error: &RPCError{ + Code: int(code), + Message: code.String(), + Data: fmt.Sprintf(msg, args...), + }, + } } -func ArrayToRequest(id jsonrpcid, method string, params []interface{}) (RPCRequest, error) { - var paramsMap = make([]json.RawMessage, len(params)) - for i, value := range params { - valueJSON, err := tmjson.Marshal(value) - if err != nil { - return RPCRequest{}, err - } - paramsMap[i] = valueJSON +// MakeError constructs an error response to req from the given error value. +// This function will panic if err == nil. +func (req RPCRequest) MakeError(err error) RPCResponse { + if err == nil { + panic("cannot construct an error response for nil") + } + if e, ok := err.(*RPCError); ok { + return RPCResponse{id: req.id, Error: e} + } + if errors.Is(err, coretypes.ErrZeroOrNegativeHeight) || + errors.Is(err, coretypes.ErrZeroOrNegativePerPage) || + errors.Is(err, coretypes.ErrPageOutOfRange) || + errors.Is(err, coretypes.ErrInvalidRequest) { + return RPCResponse{id: req.id, Error: &RPCError{ + Code: int(CodeInvalidRequest), + Message: CodeInvalidRequest.String(), + Data: err.Error(), + }} } + return RPCResponse{id: req.id, Error: &RPCError{ + Code: int(CodeInternalError), + Message: CodeInternalError.String(), + Data: err.Error(), + }} +} - payload, err := json.Marshal(paramsMap) +// SetMethodAndParams updates the method and parameters of req with the given +// values, leaving the ID unchanged. +func (req *RPCRequest) SetMethodAndParams(method string, params interface{}) error { + payload, err := json.Marshal(params) if err != nil { - return RPCRequest{}, err + return err } - - return NewRPCRequest(id, method, payload), nil + req.Method = method + req.Params = payload + return nil } //---------------------------------------- @@ -152,96 +191,57 @@ func (err RPCError) Error() string { } type RPCResponse struct { - JSONRPC string `json:"jsonrpc"` - ID jsonrpcid `json:"id,omitempty"` - Result json.RawMessage `json:"result,omitempty"` - Error *RPCError `json:"error,omitempty"` + id json.RawMessage + + Result json.RawMessage + Error *RPCError } -// UnmarshalJSON custom JSON unmarshaling due to jsonrpcid being string or int +// ID returns a representation of the response ID. +func (resp RPCResponse) ID() string { return string(resp.id) } + +type rpcResponseJSON struct { + V string `json:"jsonrpc"` // must be "2.0" + ID json.RawMessage `json:"id,omitempty"` + R json.RawMessage `json:"result,omitempty"` + E *RPCError `json:"error,omitempty"` +} + +// UnmarshalJSON decodes a response from a JSON-RPC 2.0 response object. func (resp *RPCResponse) UnmarshalJSON(data []byte) error { - unsafeResp := &struct { - JSONRPC string `json:"jsonrpc"` - ID interface{} `json:"id,omitempty"` - Result json.RawMessage `json:"result,omitempty"` - Error *RPCError `json:"error,omitempty"` - }{} - err := json.Unmarshal(data, &unsafeResp) - if err != nil { - return err - } - resp.JSONRPC = unsafeResp.JSONRPC - resp.Error = unsafeResp.Error - resp.Result = unsafeResp.Result - if unsafeResp.ID == nil { - return nil - } - id, err := idFromInterface(unsafeResp.ID) - if err != nil { + var wrapper rpcResponseJSON + if err := json.Unmarshal(data, &wrapper); err != nil { return err + } else if wrapper.V != "" && wrapper.V != "2.0" { + return fmt.Errorf("invalid version: %q", wrapper.V) } - resp.ID = id - return nil -} - -func NewRPCSuccessResponse(id jsonrpcid, res interface{}) RPCResponse { - var rawMsg json.RawMessage - if res != nil { - var js []byte - js, err := tmjson.Marshal(res) - if err != nil { - return RPCInternalError(id, fmt.Errorf("error marshaling response: %w", err)) + if !isNullOrEmpty(wrapper.ID) { + if !validID.Match(wrapper.ID) { + return fmt.Errorf("invalid response ID: %q", string(wrapper.ID)) } - rawMsg = json.RawMessage(js) + resp.id = wrapper.ID } - - return RPCResponse{JSONRPC: "2.0", ID: id, Result: rawMsg} + resp.Error = wrapper.E + resp.Result = wrapper.R + return nil } -func NewRPCErrorResponse(id jsonrpcid, code int, msg string, data string) RPCResponse { - return RPCResponse{ - JSONRPC: "2.0", - ID: id, - Error: &RPCError{Code: code, Message: msg, Data: data}, - } +// MarshalJSON marshals a response with the appropriate version tag. +func (resp RPCResponse) MarshalJSON() ([]byte, error) { + return json.Marshal(rpcResponseJSON{ + V: "2.0", + ID: resp.id, + R: resp.Result, + E: resp.Error, + }) } func (resp RPCResponse) String() string { if resp.Error == nil { - return fmt.Sprintf("RPCResponse{%s %X}", resp.ID, resp.Result) + return fmt.Sprintf("RPCResponse{%s %X}", resp.ID(), resp.Result) } - return fmt.Sprintf("RPCResponse{%s %v}", resp.ID, resp.Error) -} - -// From the JSON-RPC 2.0 spec: -// If there was an error in detecting the id in the Request object (e.g. Parse -// error/Invalid Request), it MUST be Null. -func RPCParseError(err error) RPCResponse { - return NewRPCErrorResponse(nil, -32700, "Parse error", err.Error()) -} - -// From the JSON-RPC 2.0 spec: -// If there was an error in detecting the id in the Request object (e.g. Parse -// error/Invalid Request), it MUST be Null. -func RPCInvalidRequestError(id jsonrpcid, err error) RPCResponse { - return NewRPCErrorResponse(id, -32600, "Invalid Request", err.Error()) -} - -func RPCMethodNotFoundError(id jsonrpcid) RPCResponse { - return NewRPCErrorResponse(id, -32601, "Method not found", "") -} - -func RPCInvalidParamsError(id jsonrpcid, err error) RPCResponse { - return NewRPCErrorResponse(id, -32602, "Invalid params", err.Error()) -} - -func RPCInternalError(id jsonrpcid, err error) RPCResponse { - return NewRPCErrorResponse(id, -32603, "Internal error", err.Error()) -} - -func RPCServerError(id jsonrpcid, err error) RPCResponse { - return NewRPCErrorResponse(id, -32000, "Server error", err.Error()) + return fmt.Sprintf("RPCResponse{%s %v}", resp.ID(), resp.Error) } //---------------------------------------- @@ -253,55 +253,49 @@ type WSRPCConnection interface { // WriteRPCResponse writes the response onto connection (BLOCKING). WriteRPCResponse(context.Context, RPCResponse) error // TryWriteRPCResponse tries to write the response onto connection (NON-BLOCKING). - TryWriteRPCResponse(RPCResponse) bool + TryWriteRPCResponse(context.Context, RPCResponse) bool // Context returns the connection's context. Context() context.Context } -// Context is the first parameter for all functions. It carries a json-rpc -// request, http request and websocket connection. -// -// - JSONReq is non-nil when JSONRPC is called over websocket or HTTP. -// - WSConn is non-nil when we're connected via a websocket. -// - HTTPReq is non-nil when URI or JSONRPC is called over HTTP. -type Context struct { - // json-rpc request - JSONReq *RPCRequest - // websocket connection - WSConn WSRPCConnection - // http request - HTTPReq *http.Request +// CallInfo carries JSON-RPC request metadata for RPC functions invoked via +// JSON-RPC. It can be recovered from the context with GetCallInfo. +type CallInfo struct { + RPCRequest *RPCRequest // non-nil for requests via HTTP or websocket + HTTPRequest *http.Request // non-nil for requests via HTTP + WSConn WSRPCConnection // non-nil for requests via websocket +} + +type callInfoKey struct{} + +// WithCallInfo returns a child context of ctx with the ci attached. +func WithCallInfo(ctx context.Context, ci *CallInfo) context.Context { + return context.WithValue(ctx, callInfoKey{}, ci) } -// RemoteAddr returns the remote address (usually a string "IP:port"). -// If neither HTTPReq nor WSConn is set, an empty string is returned. -// HTTP: -// http.Request#RemoteAddr -// WS: -// result of GetRemoteAddr -func (ctx *Context) RemoteAddr() string { - if ctx.HTTPReq != nil { - return ctx.HTTPReq.RemoteAddr - } else if ctx.WSConn != nil { - return ctx.WSConn.GetRemoteAddr() +// GetCallInfo returns the CallInfo record attached to ctx, or nil if ctx does +// not contain a call record. +func GetCallInfo(ctx context.Context) *CallInfo { + if v := ctx.Value(callInfoKey{}); v != nil { + return v.(*CallInfo) } - return "" + return nil } -// Context returns the request's context. -// The returned context is always non-nil; it defaults to the background context. -// HTTP: -// The context is canceled when the client's connection closes, the request -// is canceled (with HTTP/2), or when the ServeHTTP method returns. -// WS: -// The context is canceled when the client's connections closes. -func (ctx *Context) Context() context.Context { - if ctx.HTTPReq != nil { - return ctx.HTTPReq.Context() - } else if ctx.WSConn != nil { - return ctx.WSConn.Context() +// RemoteAddr returns the remote address (usually a string "IP:port"). If +// neither HTTPRequest nor WSConn is set, an empty string is returned. +// +// For HTTP requests, this reports the request's RemoteAddr. +// For websocket requests, this reports the connection's GetRemoteAddr. +func (ci *CallInfo) RemoteAddr() string { + if ci == nil { + return "" + } else if ci.HTTPRequest != nil { + return ci.HTTPRequest.RemoteAddr + } else if ci.WSConn != nil { + return ci.WSConn.GetRemoteAddr() } - return context.Background() + return "" } //---------------------------------------- diff --git a/rpc/jsonrpc/types/types_test.go b/rpc/jsonrpc/types/types_test.go index d57a0403d9..d5be2f74dd 100644 --- a/rpc/jsonrpc/types/types_test.go +++ b/rpc/jsonrpc/types/types_test.go @@ -2,69 +2,59 @@ package types import ( "encoding/json" - "errors" "fmt" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type SampleResult struct { Value string } -type responseTest struct { - id jsonrpcid - expected string -} - -var responseTests = []responseTest{ - {JSONRPCStringID("1"), `"1"`}, - {JSONRPCStringID("alphabet"), `"alphabet"`}, - {JSONRPCStringID(""), `""`}, - {JSONRPCStringID("àáâ"), `"àáâ"`}, - {JSONRPCIntID(-1), "-1"}, - {JSONRPCIntID(0), "0"}, - {JSONRPCIntID(1), "1"}, - {JSONRPCIntID(100), "100"}, +// Valid JSON identifier texts. +var testIDs = []string{ + `"1"`, `"alphabet"`, `""`, `"àáâ"`, "-1", "0", "1", "100", } func TestResponses(t *testing.T) { - assert := assert.New(t) - for _, tt := range responseTests { - jsonid := tt.id - a := NewRPCSuccessResponse(jsonid, &SampleResult{"hello"}) - b, _ := json.Marshal(a) - s := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, tt.expected) - assert.Equal(s, string(b)) + for _, id := range testIDs { + req := RPCRequest{id: json.RawMessage(id)} - d := RPCParseError(errors.New("hello world")) - e, _ := json.Marshal(d) - f := `{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":"hello world"}}` - assert.Equal(f, string(e)) + a := req.MakeResponse(&SampleResult{"hello"}) + b, err := json.Marshal(a) + require.NoError(t, err, "input id: %q", id) + s := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, id) + assert.Equal(t, s, string(b)) - g := RPCMethodNotFoundError(jsonid) - h, _ := json.Marshal(g) - i := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"error":{"code":-32601,"message":"Method not found"}}`, tt.expected) - assert.Equal(string(h), i) + d := req.MakeErrorf(CodeParseError, "hello world") + e, err := json.Marshal(d) + require.NoError(t, err) + f := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"error":{"code":-32700,"message":"Parse error","data":"hello world"}}`, id) + assert.Equal(t, f, string(e)) + + g := req.MakeErrorf(CodeMethodNotFound, "foo") + h, err := json.Marshal(g) + require.NoError(t, err) + i := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"error":{"code":-32601,"message":"Method not found","data":"foo"}}`, id) + assert.Equal(t, string(h), i) } } func TestUnmarshallResponses(t *testing.T) { - assert := assert.New(t) - for _, tt := range responseTests { + for _, id := range testIDs { response := &RPCResponse{} - err := json.Unmarshal( - []byte(fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, tt.expected)), - response, - ) - assert.Nil(err) - a := NewRPCSuccessResponse(tt.id, &SampleResult{"hello"}) - assert.Equal(*response, a) + input := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, id) + require.NoError(t, json.Unmarshal([]byte(input), &response)) + + req := RPCRequest{id: json.RawMessage(id)} + a := req.MakeResponse(&SampleResult{"hello"}) + assert.Equal(t, *response, a) } - response := &RPCResponse{} - err := json.Unmarshal([]byte(`{"jsonrpc":"2.0","id":true,"result":{"Value":"hello"}}`), response) - assert.NotNil(err) + var response RPCResponse + const input = `{"jsonrpc":"2.0","id":true,"result":{"Value":"hello"}}` + require.Error(t, json.Unmarshal([]byte(input), &response)) } func TestRPCError(t *testing.T) { diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index 73217838f1..f9e70dfca5 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -8,8 +8,8 @@ info: Tendermint supports the following RPC protocols: * URI over HTTP - * JSONRPC over HTTP - * JSONRPC over websockets + * JSON-RPC 2.0 over HTTP + * JSON-RPC 2.0 over websockets (deprecated) ## Configuration @@ -30,7 +30,7 @@ info: ## URI/HTTP - A REST like interface. + A GET request with arguments encoded as query parameters: curl localhost:26657/block?height=5 @@ -42,10 +42,13 @@ info: ## JSONRPC/websockets - JSONRPC requests can be also made via websocket. + In Tendermint v0.35 and earlier, JSONRPC requests can be also made via + websocket. The websocket interface is deprecated in Tendermint v0.36, and + will be removed in v0.37. + The websocket endpoint is at `/websocket`, e.g. `localhost:26657/websocket`. - Asynchronous RPC functions like event `subscribe` and `unsubscribe` are - only available via websockets. + The RPC methods for event subscription (`subscribe`, `unsubscribe`, and + `unsubscribe_all`) are only available via websockets. Example using https://github.com/hashrocket/ws: @@ -69,6 +72,8 @@ tags: description: Transactions broadcast APIs - name: ABCI description: ABCI APIs + - name: Events + description: Event subscription APIs - name: Evidence description: Evidence APIs - name: Unsafe @@ -81,21 +86,13 @@ paths: - Tx operationId: broadcast_tx_sync description: | - If you want to be sure that the transaction is included in a block, you can - subscribe for the result using JSONRPC via a websocket. See + This method blocks until CheckTx returns and reports its result, but + does not wait for the transaction to be included in a block. To know + when the transaction is included in a block, check for the transaction + event using JSON-RPC. See https://docs.tendermint.com/master/app-dev/subscribing-to-events-via-websocket.html - If you haven't received anything after a couple of blocks, resend it. If the - same happens again, send it to some other node. A few reasons why it could - happen: - - 1. malicious node can drop or pretend it had committed your tx - 2. malicious proposer (not necessary the one you're communicating with) can - drop transactions, which might become valid in the future - (https://github.com/tendermint/tendermint/issues/3322) - - Please refer to - https://docs.tendermint.com/master/tendermint-core/using-tendermint.html#formatting + See https://docs.tendermint.com/master/tendermint-core/using-tendermint.html#formatting for formatting/encoding rules. parameters: - in: query @@ -125,21 +122,13 @@ paths: - Tx operationId: broadcast_tx_async description: | - If you want to be sure that the transaction is included in a block, you can - subscribe for the result using JSONRPC via a websocket. See + This method submits the transaction and returns immediately without + waiting for the transaction to be checked (CheckTx) or committed. Too + know when the transaction is included in a block, you can check for the + transaction event using JSON-RPC. See https://docs.tendermint.com/master/app-dev/subscribing-to-events-via-websocket.html - If you haven't received anything after a couple of blocks, resend it. If the - same happens again, send it to some other node. A few reasons why it could - happen: - 1. malicious node can drop or pretend it had committed your tx - 2. malicious proposer (not necessary the one you're communicating with) can - drop transactions, which might become valid in the future - (https://github.com/tendermint/tendermint/issues/3322) - 3. node can be offline - - Please refer to - https://docs.tendermint.com/master/tendermint-core/using-tendermint.html#formatting + See https://docs.tendermint.com/master/tendermint-core/using-tendermint.html#formatting for formatting/encoding rules. parameters: - in: query @@ -169,19 +158,20 @@ paths: - Tx operationId: broadcast_tx_commit description: | - IMPORTANT: use only for testing and development. In production, use - BroadcastTxSync or BroadcastTxAsync. You can subscribe for the transaction - result using JSONRPC via a websocket. See - https://docs.tendermint.com/master/app-dev/subscribing-to-events-via-websocket.html + This method waits for the transaction to be checked (CheckTx) and makes + a best effort to wait for it to be committed into a block before + returning. It will report an error if the request times out before the + transaction commits. If CheckTx or DeliverTx fails, the RPC will + succeed and report the failing (non-zero) ABCI result code. - CONTRACT: only returns error if mempool.CheckTx() errs or if we timeout - waiting for tx to commit. + WARNING: Use this only for testing and development. For production use, + call broadcast_tx_sync or broadcast_tx_async. - If CheckTx or DeliverTx fail, no error will be returned, but the returned result - will contain a non-OK ABCI code. + To know when a transaction is included in a block, check for the + transaction event using JSON-RPC. See + https://docs.tendermint.com/master/app-dev/subscribing-to-events-via-websocket.html - Please refer to - https://docs.tendermint.com/master/tendermint-core/using-tendermint.html#formatting + See https://docs.tendermint.com/master/tendermint-core/using-tendermint.html#formatting for formatting/encoding rules. parameters: - in: query @@ -211,7 +201,7 @@ paths: - Tx operationId: check_tx description: | - The transaction won't be added to the mempool. + The transaction won\'t be added to the mempool. Please refer to https://docs.tendermint.com/master/tendermint-core/using-tendermint.html#formatting @@ -262,10 +252,103 @@ paths: schema: $ref: "#/components/schemas/ErrorResponse" + /events: + get: + summary: Fetch events posted by the consensus node. + tags: + - Events + operationId: events + description: | + Fetch a batch of events posted by the consensus node and matching a + specified query. + + The query grammar is defined in + https://godoc.org/github.com/tendermint/tendermint/internal/pubsub/query/syntax. + An empty query matches all events; otherwise a query comprises one or + more terms comparing event metadata to target values. For example, to + select new block events: + + tm.event = 'NewBlock' + + Multiple terms can be combined with AND, for example to match the + transaction event with a given hash, use: + + tm.event = 'Tx' AND tx.hash = 'EA7B33F' + + The comparison operators include "=", "<", "<=", ">", ">=", and + "CONTAINS". Operands may be strings (in single quotes), numbers, dates, + or timestamps. In addition, the "EXISTS" operator allows you to check + for the presence of an attribute regardless of its value. + + Tendermint defines a tm.event attribute for all events. Transactions + are also assigned tx.hash and tx.height attributes. Other attributes + are provided by the application as ABCI Event records. The name of the + event in the query is formed by combining the type and attribute key + with a period. For example, given: + + []abci.Event{{ + Type: "reward", + Attributes: []abci.EventAttribute{ + {Key: "address", Value: "cosmos1xyz012pdq"}, + {Key: "amount", Value: "45.62"}, + {Key: "balance", Value: "100.390001"}, + }, + }} + + the query may refer to the names "reward.address", "reward.amount", and + "reward.balance", as in: + + reward.address EXISTS AND reward.balance > 45 + + The node maintains a log of all events within an operator-defined time + window. The /events method returns the most recent items from the log + that match the query. Each item returned includes a cursor that marks + its location in the log. Cursors can be passed via the "before" and + "after" parameters to fetch events earlier in the log. + parameters: + - in: query + name: filter + schema: + $ref: "#/components/schemas/EventFilter" + - in: query + name: maxItems + schema: + type: integer + example: 10 + - in: query + name: after + schema: + type: string + example: "0005d7c09065e9a7-01cf" + - in: query + name: before + schema: + type: string + example: "0005d7c09065e9a7-01cf" + - in: query + name: waitTime + schema: + type: integer + example: 5000000000 + responses: + "200": + description: Reports a batch of matching events + content: + application/json: + schema: + $ref: "#/components/schemas/EventsResponse" + "500": + description: Reports an error + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + /subscribe: get: summary: Subscribe for events via WebSocket. tags: + - Events - Websocket operationId: subscribe description: | @@ -422,6 +505,7 @@ paths: get: summary: Unsubscribe from event on Websocket tags: + - Events - Websocket operationId: unsubscribe description: | @@ -472,6 +556,7 @@ paths: get: summary: Unsubscribe from all events via WebSocket tags: + - Events - Websocket operationId: unsubscribe_all description: | @@ -1062,13 +1147,21 @@ paths: operationId: unconfirmed_txs parameters: - in: query - name: limit - description: Maximum number of unconfirmed transactions to return (max 100) + name: page + description: "Page number (1-based)" required: false schema: type: integer - default: 30 + default: 1 example: 1 + - in: query + name: per_page + description: "Number of entries per page (max: 100)" + required: false + schema: + type: integer + example: 100 + default: 30 tags: - Info description: | @@ -1395,6 +1488,47 @@ components: error: type: string example: "Description of failure" + EventItem: + description: Event item metadata + type: object + properties: + cursor: + type: string + example: "0005d7c09065e9a7-01cf" + data: + type: object + properties: + type: + type: string + example: "tendermint/event/Tx" + value: + type: string + format: json + EventFilter: + description: Event filter query + type: object + properties: + filter: + type: string + example: "tm.event = 'Tx'" + EventsResponse: + description: Events response + allOf: + - $ref: "#/components/schemas/JSONRPC" + - type: object + properties: + items: + type: array + items: + $ref: "#/components/schemas/EventItem" + more: + type: boolean + oldest: + type: string + example: "0005d7c09065e9a7-01cf" + newest: + type: string + example: "0005d7c090660099-0200" ProtocolVersion: type: object properties: diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 3523198529..f26ec95648 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "testing" "time" abciclient "github.com/tendermint/tendermint/abci/client" @@ -14,7 +15,6 @@ import ( "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/node" "github.com/tendermint/tendermint/rpc/coretypes" - coregrpc "github.com/tendermint/tendermint/rpc/grpc" rpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" ) @@ -24,6 +24,7 @@ type Options struct { suppressStdout bool } +// waitForRPC connects to the RPC service and blocks until a /status call succeeds. func waitForRPC(ctx context.Context, conf *config.Config) { laddr := conf.RPC.ListenAddress client, err := rpcclient.New(laddr) @@ -32,7 +33,7 @@ func waitForRPC(ctx context.Context, conf *config.Config) { } result := new(coretypes.ResultStatus) for { - _, err := client.Call(ctx, "status", map[string]interface{}{}, result) + err := client.Call(ctx, "status", map[string]interface{}{}, result) if err == nil { return } @@ -42,16 +43,6 @@ func waitForRPC(ctx context.Context, conf *config.Config) { } } -func waitForGRPC(ctx context.Context, conf *config.Config) { - client := GetGRPCClient(conf) - for { - _, err := client.Ping(ctx, &coregrpc.RequestPing{}) - if err == nil { - return - } - } -} - func randPort() int { port, err := tmnet.GetFreePort() if err != nil { @@ -60,38 +51,37 @@ func randPort() int { return port } -func makeAddrs() (string, string, string) { - return fmt.Sprintf("tcp://127.0.0.1:%d", randPort()), - fmt.Sprintf("tcp://127.0.0.1:%d", randPort()), - fmt.Sprintf("tcp://127.0.0.1:%d", randPort()) +// makeAddrs constructs local listener addresses for node services. This +// implementation uses random ports so test instances can run concurrently. +func makeAddrs() (p2pAddr, rpcAddr string) { + const addrTemplate = "tcp://127.0.0.1:%d" + return fmt.Sprintf(addrTemplate, randPort()), fmt.Sprintf(addrTemplate, randPort()) } -func CreateConfig(testName string) (*config.Config, error) { - c, err := config.ResetTestRoot(testName) +func CreateConfig(t *testing.T, testName string) (*config.Config, error) { + c, err := config.ResetTestRoot(t.TempDir(), testName) if err != nil { return nil, err } - // and we use random ports to run in parallel - tm, rpc, grpc := makeAddrs() - c.P2P.ListenAddress = tm - c.RPC.ListenAddress = rpc + p2pAddr, rpcAddr := makeAddrs() + c.P2P.ListenAddress = p2pAddr + c.RPC.ListenAddress = rpcAddr + c.RPC.EventLogWindowSize = 5 * time.Minute + c.Consensus.WalPath = "rpc-test" c.RPC.CORSAllowedOrigins = []string{"https://tendermint.com/"} - c.RPC.GRPCListenAddress = grpc return c, nil } -func GetGRPCClient(conf *config.Config) coregrpc.BroadcastAPIClient { - grpcAddr := conf.RPC.GRPCListenAddress - return coregrpc.StartGRPCClient(grpcAddr) -} - type ServiceCloser func(context.Context) error -func StartTendermint(ctx context.Context, +func StartTendermint( + ctx context.Context, conf *config.Config, app abci.Application, - opts ...func(*Options)) (service.Service, ServiceCloser, error) { + opts ...func(*Options), +) (service.Service, ServiceCloser, error) { + ctx, cancel := context.WithCancel(ctx) nodeOpts := &Options{} for _, opt := range opts { @@ -101,33 +91,31 @@ func StartTendermint(ctx context.Context, if nodeOpts.suppressStdout { logger = log.NewNopLogger() } else { - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) + var err error + logger, err = log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) + if err != nil { + return nil, func(_ context.Context) error { cancel(); return nil }, err + } } - papp := abciclient.NewLocalCreator(app) - //signer, err := privval.NewDashCoreSignerClient() - - tmNode, err := node.New(conf, logger, papp, nil, nil) + papp := abciclient.NewLocalClient(logger, app) + tmNode, err := node.New(ctx, conf, logger, papp, nil) if err != nil { - return nil, func(_ context.Context) error { return nil }, err + return nil, func(_ context.Context) error { cancel(); return nil }, err } - err = tmNode.Start() + err = tmNode.Start(ctx) if err != nil { - return nil, func(_ context.Context) error { return nil }, err + return nil, func(_ context.Context) error { cancel(); return nil }, err } - // wait for rpc waitForRPC(ctx, conf) - waitForGRPC(ctx, conf) if !nodeOpts.suppressStdout { fmt.Println("Tendermint running!") } return tmNode, func(ctx context.Context) error { - if err := tmNode.Stop(); err != nil { - logger.Error("Error when trying to stop node", "err", err) - } + cancel() tmNode.Wait() os.RemoveAll(conf.RootDir) return nil diff --git a/scripts/authors.sh b/scripts/authors.sh index 7aafb0127e..49251242ee 100755 --- a/scripts/authors.sh +++ b/scripts/authors.sh @@ -1,16 +1,16 @@ #! /bin/bash -# Usage: -# `./authors.sh` -# Print a list of all authors who have committed to develop since master. -# -# `./authors.sh ` -# Lookup the email address on Github and print the associated username +set -euo pipefail -author=$1 +ref=$1 -if [[ "$author" == "" ]]; then - git log master..develop | grep Author | sort | uniq +if [[ ! -z "$ref" ]]; then + git log master..$ref | grep Author | sort | uniq else - curl -s "https://api.github.com/search/users?q=$author+in%3Aemail&type=Users&utf8=%E2%9C%93" | jq .items[0].login +cat << EOF +Usage: + ./authors.sh + Print a list of all authors who have committed to the codebase since the supplied commit ref. +EOF fi + diff --git a/scripts/confix/condiff/condiff.go b/scripts/confix/condiff/condiff.go new file mode 100644 index 0000000000..6b11e4e2cb --- /dev/null +++ b/scripts/confix/condiff/condiff.go @@ -0,0 +1,152 @@ +// Program condiff performs a keyspace diff on two TOML documents. +package main + +import ( + "context" + "flag" + "fmt" + "io" + "log" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/creachadair/tomledit" + "github.com/creachadair/tomledit/parser" + "github.com/creachadair/tomledit/transform" +) + +var ( + doDesnake = flag.Bool("desnake", false, "Convert snake_case to kebab-case before comparing") +) + +func init() { + flag.Usage = func() { + fmt.Fprintf(os.Stderr, `Usage: %[1]s [options] f1 f2 + +Diff the keyspaces of the TOML documents in files f1 and f2. +The output prints one line per key that differs: + + -S name -- section exists in f1 but not f2 + +S name -- section exists in f2 but not f1 + -M name -- mapping exists in f1 but not f2 + +M name -- mapping exists in f2 but not f1 + +Comments, order, and values are ignored for comparison purposes. + +Options: +`, filepath.Base(os.Args[0])) + flag.PrintDefaults() + } +} + +func main() { + flag.Parse() + + if flag.NArg() != 2 { + log.Fatalf("Usage: %[1]s ", filepath.Base(os.Args[0])) + } + lhs := mustParse(flag.Arg(0)) + rhs := mustParse(flag.Arg(1)) + if *doDesnake { + log.Printf("Converting all names from snake_case to kebab-case") + fix := transform.SnakeToKebab() + _ = fix(context.Background(), lhs) + _ = fix(context.Background(), rhs) + } + diffDocs(os.Stdout, lhs, rhs) +} + +func mustParse(path string) *tomledit.Document { + f, err := os.Open(path) + if err != nil { + log.Fatalf("Opening TOML input: %v", err) + } + defer f.Close() + doc, err := tomledit.Parse(f) + if err != nil { + log.Fatalf("Parsing %q: %v", path, err) + } + return doc +} + +func allKeys(s *tomledit.Section) []string { + var keys []string + s.Scan(func(key parser.Key, _ *tomledit.Entry) bool { + keys = append(keys, key.String()) + return true + }) + return keys +} + +const ( + delSection = "-S" + delMapping = "-M" + addSection = "+S" + addMapping = "+M" + + delMapSep = "\n" + delMapping + " " + addMapSep = "\n" + addMapping + " " +) + +func diffDocs(w io.Writer, lhs, rhs *tomledit.Document) { + diffSections(w, lhs.Global, rhs.Global) + lsec, rsec := lhs.Sections, rhs.Sections + transform.SortSectionsByName(lsec) + transform.SortSectionsByName(rsec) + + i, j := 0, 0 + for i < len(lsec) && j < len(rsec) { + if lsec[i].Name.Before(rsec[j].Name) { + fmt.Fprintln(w, delSection, lsec[i].Name) + fmt.Fprintln(w, delMapping, strings.Join(allKeys(lsec[i]), delMapSep)) + i++ + } else if rsec[j].Name.Before(lsec[i].Name) { + fmt.Fprintln(w, addSection, rsec[j].Name) + fmt.Fprintln(w, addMapping, strings.Join(allKeys(rsec[j]), addMapSep)) + j++ + } else { + diffSections(w, lsec[i], rsec[j]) + i++ + j++ + } + } + for ; i < len(lsec); i++ { + fmt.Fprintln(w, delSection, lsec[i].Name) + fmt.Fprintln(w, delMapping, strings.Join(allKeys(lsec[i]), delMapSep)) + } + for ; j < len(rsec); j++ { + fmt.Fprintln(w, addSection, rsec[j].Name) + fmt.Fprintln(w, addMapping, strings.Join(allKeys(rsec[j]), addMapSep)) + } +} + +func diffSections(w io.Writer, lhs, rhs *tomledit.Section) { + diffKeys(w, allKeys(lhs), allKeys(rhs)) +} + +func diffKeys(w io.Writer, lhs, rhs []string) { + sort.Strings(lhs) + sort.Strings(rhs) + + i, j := 0, 0 + for i < len(lhs) && j < len(rhs) { + if lhs[i] < rhs[j] { + fmt.Fprintln(w, delMapping, lhs[i]) + i++ + } else if lhs[i] > rhs[j] { + fmt.Fprintln(w, addMapping, rhs[j]) + j++ + } else { + i++ + j++ + } + } + for ; i < len(lhs); i++ { + fmt.Fprintln(w, delMapping, lhs[i]) + } + for ; j < len(rhs); j++ { + fmt.Fprintln(w, addMapping, rhs[j]) + } +} diff --git a/scripts/confix/confix.go b/scripts/confix/confix.go new file mode 100644 index 0000000000..6677f0b49a --- /dev/null +++ b/scripts/confix/confix.go @@ -0,0 +1,163 @@ +// Program confix applies fixes to a Tendermint TOML configuration file to +// update a file created with an older version of Tendermint to a compatible +// format for a newer version. +package main + +import ( + "bytes" + "context" + "errors" + "flag" + "fmt" + "log" + "os" + "path/filepath" + + "github.com/creachadair/atomicfile" + "github.com/creachadair/tomledit" + "github.com/creachadair/tomledit/transform" + "github.com/spf13/viper" + "github.com/tendermint/tendermint/config" +) + +func init() { + flag.Usage = func() { + fmt.Fprintf(os.Stderr, `Usage: %[1]s -config [-out ] + +Modify the contents of the specified -config TOML file to update the names, +locations, and values of configuration settings to the current configuration +layout. The output is written to -out, or to stdout. + +It is valid to set -config and -out to the same path. In that case, the file will +be modified in-place. In case of any error in updating the file, no output is +written. + +Options: +`, filepath.Base(os.Args[0])) + flag.PrintDefaults() + } +} + +var ( + configPath = flag.String("config", "", "Config file path (required)") + outPath = flag.String("out", "", "Output file path (default stdout)") +) + +func main() { + flag.Parse() + if *configPath == "" { + log.Fatal("You must specify a non-empty -config path") + } + + doc, err := LoadConfig(*configPath) + if err != nil { + log.Fatalf("Loading config: %v", err) + } + + ctx := transform.WithLogWriter(context.Background(), os.Stderr) + if err := ApplyFixes(ctx, doc); err != nil { + log.Fatalf("Updating %q: %v", *configPath, err) + } + + var buf bytes.Buffer + if err := tomledit.Format(&buf, doc); err != nil { + log.Fatalf("Formatting config: %v", err) + } + + // Verify that Tendermint can parse the results after our edits. + if err := CheckValid(buf.Bytes()); err != nil { + log.Fatalf("Updated config is invalid: %v", err) + } + + if *outPath == "" { + os.Stdout.Write(buf.Bytes()) + } else if err := atomicfile.WriteData(*outPath, buf.Bytes(), 0600); err != nil { + log.Fatalf("Writing output: %v", err) + } +} + +// ApplyFixes transforms doc and reports whether it succeeded. +func ApplyFixes(ctx context.Context, doc *tomledit.Document) error { + // Check what version of Tendermint might have created this config file, as + // a safety check for the updates we are about to make. + tmVersion := GuessConfigVersion(doc) + if tmVersion == vUnknown { + return errors.New("cannot tell what Tendermint version created this config") + } else if tmVersion < v34 || tmVersion > v36 { + // TODO(creachadair): Add in rewrites for older versions. This will + // require some digging to discover what the changes were. The upgrade + // instructions do not give specifics. + return fmt.Errorf("unable to update version %s config", tmVersion) + } + return plan.Apply(ctx, doc) +} + +// LoadConfig loads and parses the TOML document from path. +func LoadConfig(path string) (*tomledit.Document, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return tomledit.Parse(f) +} + +const ( + vUnknown = "" + v32 = "v0.32" + v33 = "v0.33" + v34 = "v0.34" + v35 = "v0.35" + v36 = "v0.36" +) + +// GuessConfigVersion attempts to figure out which version of Tendermint +// created the specified config document. It returns "" if the creating version +// cannot be determined, otherwise a string of the form "vX.YY". +func GuessConfigVersion(doc *tomledit.Document) string { + hasDisableWS := doc.First("rpc", "experimental-disable-websocket") != nil + hasUseLegacy := doc.First("p2p", "use-legacy") != nil // v0.35 only + if hasDisableWS && !hasUseLegacy { + return v36 + } + + hasBlockSync := transform.FindTable(doc, "blocksync") != nil // add: v0.35 + hasStateSync := transform.FindTable(doc, "statesync") != nil // add: v0.34 + if hasBlockSync && hasStateSync { + return v35 + } else if hasStateSync { + return v34 + } + + hasIndexKeys := doc.First("tx_index", "index_keys") != nil // add: v0.33 + hasIndexTags := doc.First("tx_index", "index_tags") != nil // rem: v0.33 + if hasIndexKeys && !hasIndexTags { + return v33 + } + + hasFastSync := transform.FindTable(doc, "fastsync") != nil // add: v0.32 + if hasIndexTags && hasFastSync { + return v32 + } + + // Something older, probably. + return vUnknown +} + +// CheckValid checks whether the specified config appears to be a valid +// Tendermint config file. This emulates how the node loads the config. +func CheckValid(data []byte) error { + v := viper.New() + v.SetConfigType("toml") + + if err := v.ReadConfig(bytes.NewReader(data)); err != nil { + return fmt.Errorf("reading config: %w", err) + } + + var cfg config.Config + if err := v.Unmarshal(&cfg); err != nil { + return fmt.Errorf("decoding config: %w", err) + } + + return cfg.ValidateBasic() +} diff --git a/scripts/confix/confix_test.go b/scripts/confix/confix_test.go new file mode 100644 index 0000000000..ec258f4ca6 --- /dev/null +++ b/scripts/confix/confix_test.go @@ -0,0 +1,99 @@ +package main_test + +import ( + "bytes" + "context" + "strings" + "testing" + + "github.com/creachadair/tomledit" + "github.com/google/go-cmp/cmp" + + confix "github.com/tendermint/tendermint/scripts/confix" +) + +func mustParseConfig(t *testing.T, path string) *tomledit.Document { + doc, err := confix.LoadConfig(path) + if err != nil { + t.Fatalf("Loading config: %v", err) + } + return doc +} + +func TestGuessConfigVersion(t *testing.T) { + tests := []struct { + path, want string + }{ + {"testdata/non-config.toml", ""}, + {"testdata/v30-config.toml", ""}, + {"testdata/v31-config.toml", ""}, + {"testdata/v32-config.toml", "v0.32"}, + {"testdata/v33-config.toml", "v0.33"}, + {"testdata/v34-config.toml", "v0.34"}, + {"testdata/v35-config.toml", "v0.35"}, + {"testdata/v36-config.toml", "v0.36"}, + } + for _, test := range tests { + t.Run(test.path, func(t *testing.T) { + got := confix.GuessConfigVersion(mustParseConfig(t, test.path)) + if got != test.want { + t.Errorf("Wrong version: got %q, want %q", got, test.want) + } + }) + } +} + +func TestApplyFixes(t *testing.T) { + ctx := context.Background() + + t.Run("Unknown", func(t *testing.T) { + err := confix.ApplyFixes(ctx, mustParseConfig(t, "testdata/v31-config.toml")) + if err == nil || !strings.Contains(err.Error(), "cannot tell what Tendermint version") { + t.Error("ApplyFixes succeeded, but should have failed for an unknown version") + } + }) + t.Run("TooOld", func(t *testing.T) { + err := confix.ApplyFixes(ctx, mustParseConfig(t, "testdata/v33-config.toml")) + if err == nil || !strings.Contains(err.Error(), "unable to update version v0.33 config") { + t.Errorf("ApplyFixes: got %v, want version error", err) + } + }) + t.Run("OK", func(t *testing.T) { + doc := mustParseConfig(t, "testdata/v34-config.toml") + if err := confix.ApplyFixes(ctx, doc); err != nil { + t.Fatalf("ApplyFixes: unexpected error: %v", err) + } + + t.Run("Fixpoint", func(t *testing.T) { + // Verify that reapplying fixes to the same config succeeds, and does not + // make any additional changes. + var before bytes.Buffer + if err := tomledit.Format(&before, doc); err != nil { + t.Fatalf("Formatting document: %v", err) + } + if err := confix.CheckValid(before.Bytes()); err != nil { + t.Fatalf("Validating output: %v", err) + } + want := before.String() + + // Re-parse the output from the first round of transformations. + doc2, err := tomledit.Parse(&before) + if err != nil { + t.Fatalf("Parsing fixed output: %v", err) + } + if err := confix.ApplyFixes(ctx, doc2); err != nil { + t.Fatalf("ApplyFixes: unexpected error: %v", err) + } + + var after bytes.Buffer + if err := tomledit.Format(&after, doc2); err != nil { + t.Fatalf("Formatting document: %v", err) + } + got := after.String() + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("Reapplied fixes changed something: (-want, +got)\n%s", diff) + } + }) + }) +} diff --git a/scripts/confix/plan.go b/scripts/confix/plan.go new file mode 100644 index 0000000000..653bca9fd2 --- /dev/null +++ b/scripts/confix/plan.go @@ -0,0 +1,225 @@ +package main + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/creachadair/tomledit" + "github.com/creachadair/tomledit/parser" + "github.com/creachadair/tomledit/transform" +) + +// The plan is the sequence of transformation steps that should be applied, in +// the given order, to convert a configuration file to be compatible with the +// current version of the config grammar. +// +// Transformation steps are specific to the target config version. For this +// reason, you must exercise caution when backporting changes to this script +// into older releases. +var plan = transform.Plan{ + { + // Since https://github.com/tendermint/tendermint/pull/5777. + Desc: "Rename everything from snake_case to kebab-case", + T: transform.SnakeToKebab(), + }, + { + // [fastsync] renamed in https://github.com/tendermint/tendermint/pull/6896. + // [blocksync] removed in https://github.com/tendermint/tendermint/pull/7159. + Desc: "Remove [fastsync] and [blocksync] sections", + T: transform.Func(func(_ context.Context, doc *tomledit.Document) error { + doc.First("fast-sync").Remove() + transform.FindTable(doc, "fastsync").Remove() + transform.FindTable(doc, "blocksync").Remove() + return nil + }), + ErrorOK: true, + }, + { + // Since https://github.com/tendermint/tendermint/pull/6241. + Desc: `Add top-level mode setting (default "full")`, + T: transform.EnsureKey(nil, &parser.KeyValue{ + Block: parser.Comments{"Mode of Node: full | validator | seed"}, + Name: parser.Key{"mode"}, + Value: parser.MustValue(`"full"`), + }), + ErrorOK: true, + }, + { + // Since https://github.com/tendermint/tendermint/pull/7121. + Desc: "Remove gRPC settings from the [rpc] section", + T: transform.Func(func(_ context.Context, doc *tomledit.Document) error { + doc.First("rpc", "grpc-laddr").Remove() + doc.First("rpc", "grpc-max-open-connections").Remove() + return nil + }), + }, + { + // Since https://github.com/tendermint/tendermint/pull/8217. + Desc: "Remove per-node consensus timeouts (converted to consensus parameters)", + T: transform.Remove( + parser.Key{"consensus", "skip-timeout-commit"}, + parser.Key{"consensus", "timeout-commit"}, + parser.Key{"consensus", "timeout-precommit"}, + parser.Key{"consensus", "timeout-precommit-delta"}, + parser.Key{"consensus", "timeout-prevote"}, + parser.Key{"consensus", "timeout-prevote-delta"}, + parser.Key{"consensus", "timeout-propose"}, + parser.Key{"consensus", "timeout-propose-delta"}, + ), + ErrorOK: true, + }, + { + // Removed wal-dir: https://github.com/tendermint/tendermint/pull/6396. + // Removed version: https://github.com/tendermint/tendermint/pull/7171. + Desc: "Remove vestigial mempool.wal-dir settings", + T: transform.Remove( + parser.Key{"mempool", "wal-dir"}, + parser.Key{"mempool", "version"}, + ), + ErrorOK: true, + }, + { + // Since https://github.com/tendermint/tendermint/pull/6323. + Desc: "Add new [p2p] queue-type setting", + T: transform.EnsureKey(parser.Key{"p2p"}, &parser.KeyValue{ + Block: parser.Comments{"Select the p2p internal queue"}, + Name: parser.Key{"queue-type"}, + Value: parser.MustValue(`"priority"`), + }), + ErrorOK: true, + }, + { + // Since https://github.com/tendermint/tendermint/pull/6353. + Desc: "Add [p2p] connection count and rate limit settings", + T: transform.Func(func(_ context.Context, doc *tomledit.Document) error { + tab := transform.FindTable(doc, "p2p") + if tab == nil { + return errors.New("p2p table not found") + } + transform.InsertMapping(tab.Section, &parser.KeyValue{ + Block: parser.Comments{"Maximum number of connections (inbound and outbound)."}, + Name: parser.Key{"max-connections"}, + Value: parser.MustValue("64"), + }, false) + transform.InsertMapping(tab.Section, &parser.KeyValue{ + Block: parser.Comments{ + "Rate limits the number of incoming connection attempts per IP address.", + }, + Name: parser.Key{"max-incoming-connection-attempts"}, + Value: parser.MustValue("100"), + }, false) + return nil + }), + }, + { + // Added "chunk-fetchers" https://github.com/tendermint/tendermint/pull/6566. + // This value was backported into v0.34.11 (modulo casing). + // Renamed to "fetchers" https://github.com/tendermint/tendermint/pull/6587. + Desc: "Rename statesync.chunk-fetchers to statesync.fetchers", + T: transform.Func(func(ctx context.Context, doc *tomledit.Document) error { + // If the key already exists, rename it preserving its value. + if found := doc.First("statesync", "chunk-fetchers"); found != nil { + found.KeyValue.Name = parser.Key{"fetchers"} + return nil + } + + // Otherwise, add it. + return transform.EnsureKey(parser.Key{"statesync"}, &parser.KeyValue{ + Block: parser.Comments{ + "The number of concurrent chunk and block fetchers to run (default: 4).", + }, + Name: parser.Key{"fetchers"}, + Value: parser.MustValue("4"), + })(ctx, doc) + }), + }, + { + // Since https://github.com/tendermint/tendermint/pull/6807. + // Backported into v0.34.13 (modulo casing). + Desc: "Add statesync.use-p2p setting", + T: transform.EnsureKey(parser.Key{"statesync"}, &parser.KeyValue{ + Block: parser.Comments{ + "# State sync uses light client verification to verify state. This can be done either through the", + "# P2P layer or RPC layer. Set this to true to use the P2P layer. If false (default), RPC layer", + "# will be used.", + }, + Name: parser.Key{"use-p2p"}, + Value: parser.MustValue("false"), + }), + }, + { + // Since https://github.com/tendermint/tendermint/pull/6462. + Desc: "Move priv-validator settings under [priv-validator]", + T: transform.Func(func(_ context.Context, doc *tomledit.Document) error { + const pvPrefix = "priv-validator-" + + var found []*tomledit.Entry + doc.Global.Scan(func(key parser.Key, e *tomledit.Entry) bool { + if len(key) == 1 && strings.HasPrefix(key[0], pvPrefix) { + found = append(found, e) + } + return true + }) + if len(found) == 0 { + return nil // nothing to do + } + + // Now that we know we have work to do, find the target table. + var sec *tomledit.Section + if dst := transform.FindTable(doc, "priv-validator"); dst == nil { + // If the table doesn't exist, create it. Old config files + // probably will not have it, so plug in the comment too. + sec = &tomledit.Section{ + Heading: &parser.Heading{ + Block: parser.Comments{ + "#######################################################", + "### Priv Validator Configuration ###", + "#######################################################", + }, + Name: parser.Key{"priv-validator"}, + }, + } + doc.Sections = append(doc.Sections, sec) + } else { + sec = dst.Section + } + + for _, e := range found { + e.Remove() + e.Name = parser.Key{strings.TrimPrefix(e.Name[0], pvPrefix)} + sec.Items = append(sec.Items, e.KeyValue) + } + return nil + }), + }, + { + // Since https://github.com/tendermint/tendermint/pull/6411. + Desc: "Convert tx-index.indexer from a string to a list of strings", + T: transform.Func(func(ctx context.Context, doc *tomledit.Document) error { + idx := doc.First("tx-index", "indexer") + if idx == nil { + // No previous indexer setting: Default to ["null"] per #8222. + return transform.EnsureKey(parser.Key{"tx-index"}, &parser.KeyValue{ + Block: parser.Comments{"The backend database list to back the indexer."}, + Name: parser.Key{"indexer"}, + Value: parser.MustValue(`["null"]`), + })(ctx, doc) + } + + // Versions prior to v0.35 had a string value here, v0.35 and onward + // use an array of strings. + switch idx.KeyValue.Value.X.(type) { + case parser.Array: + // OK, this is already up-to-date. + return nil + case parser.Token: + // Wrap the value in a single-element array. + idx.KeyValue.Value.X = parser.Array{idx.KeyValue.Value} + return nil + } + return fmt.Errorf("unrecognized value: %v", idx.KeyValue) + }), + }, +} diff --git a/scripts/confix/testdata/README.md b/scripts/confix/testdata/README.md new file mode 100644 index 0000000000..5bbfa795f3 --- /dev/null +++ b/scripts/confix/testdata/README.md @@ -0,0 +1,52 @@ +# Test data for `confix` and `condiff` + +The files in this directory are stock Tendermint configuration files generated +by the last point release of each version series from v0.26 to present, along +with diffs between consecutive versions. + +## Config Samples + +The files named `vXX-config.toml` were generated by checking out and building +the corresponding version of Tendermint v0.xx.y and initializing a new node in +an empty home directory. The resulting `config.toml` file was copied here. +The exact build instructions vary a bit, but a general repro looks like: + +```shell +# This example uses v0.31, substitute the version of your choice. +# Note that the branch names and tags may differ. +# Versions prior to v0.26 may not build. +git checkout v0.31.9 +git clean -fdx + +# Versions prior to v0.32 do not have Go module files. +# Those that do may need some dependencies manually updated. +go mod init github.com/tendermint/tendermint +go mod tidy +go get golang.org/x/sys + +# Once you sort out the dependencies, this should usually work. +make build + +# Confirm you go the version you expected, and generate the file. +./build/tendermint --home=tmhome version +./build/tendermint --home=tmhome init + +# Copy the file out. +cp ./tmhome/config/config.toml v31-config.toml +``` + +## Version Diffs + +The files named `diff-XX-YY.txt` were generated by using the `condiff` tool on +the config samples for versions v0.XX and v0.YY: + +```shell +go run ./scripts/confix/condiff -desnake vXX-config vYY-config.toml > diff-XX-YY.txt +``` + +The `baseline.txt` was computed in the same way, but using an empty starting +file so that we capture all the settings in the target: + +```shell +go run ./scripts/confix/condiff -desnake /dev/null v26-config.toml > baseline.txt +``` diff --git a/scripts/confix/testdata/baseline.txt b/scripts/confix/testdata/baseline.txt new file mode 100644 index 0000000000..4213437044 --- /dev/null +++ b/scripts/confix/testdata/baseline.txt @@ -0,0 +1,73 @@ ++M abci ++M db-backend ++M db-dir ++M fast-sync ++M filter-peers ++M genesis-file ++M log-format ++M log-level ++M moniker ++M node-key-file ++M priv-validator-file ++M priv-validator-laddr ++M prof-laddr ++M proxy-app ++S consensus ++M consensus.wal-file ++M consensus.timeout-propose ++M consensus.timeout-propose-delta ++M consensus.timeout-prevote ++M consensus.timeout-prevote-delta ++M consensus.timeout-precommit ++M consensus.timeout-precommit-delta ++M consensus.timeout-commit ++M consensus.skip-timeout-commit ++M consensus.create-empty-blocks ++M consensus.create-empty-blocks-interval ++M consensus.peer-gossip-sleep-duration ++M consensus.peer-query-maj23-sleep-duration ++M consensus.blocktime-iota ++S instrumentation ++M instrumentation.prometheus ++M instrumentation.prometheus-listen-addr ++M instrumentation.max-open-connections ++M instrumentation.namespace ++S mempool ++M mempool.recheck ++M mempool.broadcast ++M mempool.wal-dir ++M mempool.size ++M mempool.cache-size ++S p2p ++M p2p.laddr ++M p2p.external-address ++M p2p.seeds ++M p2p.persistent-peers ++M p2p.upnp ++M p2p.addr-book-file ++M p2p.addr-book-strict ++M p2p.max-num-inbound-peers ++M p2p.max-num-outbound-peers ++M p2p.flush-throttle-timeout ++M p2p.max-packet-msg-payload-size ++M p2p.send-rate ++M p2p.recv-rate ++M p2p.pex ++M p2p.seed-mode ++M p2p.private-peer-ids ++M p2p.allow-duplicate-ip ++M p2p.handshake-timeout ++M p2p.dial-timeout ++S rpc ++M rpc.laddr ++M rpc.cors-allowed-origins ++M rpc.cors-allowed-methods ++M rpc.cors-allowed-headers ++M rpc.grpc-laddr ++M rpc.grpc-max-open-connections ++M rpc.unsafe ++M rpc.max-open-connections ++S tx-index ++M tx-index.indexer ++M tx-index.index-tags ++M tx-index.index-all-tags diff --git a/test/fuzz/rpc/jsonrpc/server/testdata/cases/empty b/scripts/confix/testdata/diff-26-27.txt similarity index 100% rename from test/fuzz/rpc/jsonrpc/server/testdata/cases/empty rename to scripts/confix/testdata/diff-26-27.txt diff --git a/scripts/confix/testdata/diff-27-28.txt b/scripts/confix/testdata/diff-27-28.txt new file mode 100644 index 0000000000..c5c00449d0 --- /dev/null +++ b/scripts/confix/testdata/diff-27-28.txt @@ -0,0 +1,3 @@ +-M priv-validator-file ++M priv-validator-key-file ++M priv-validator-state-file diff --git a/test/fuzz/p2p/secretconnection/testdata/cases/empty b/scripts/confix/testdata/diff-28-29.txt similarity index 100% rename from test/fuzz/p2p/secretconnection/testdata/cases/empty rename to scripts/confix/testdata/diff-28-29.txt diff --git a/test/fuzz/p2p/pex/testdata/cases/empty b/scripts/confix/testdata/diff-29-30.txt similarity index 100% rename from test/fuzz/p2p/pex/testdata/cases/empty rename to scripts/confix/testdata/diff-29-30.txt diff --git a/scripts/confix/testdata/diff-30-31.txt b/scripts/confix/testdata/diff-30-31.txt new file mode 100644 index 0000000000..0f93b761ef --- /dev/null +++ b/scripts/confix/testdata/diff-30-31.txt @@ -0,0 +1,7 @@ +-M consensus.blocktime-iota ++M mempool.max-txs-bytes ++M rpc.max-subscription-clients ++M rpc.max-subscriptions-per-client ++M rpc.timeout-broadcast-tx-commit ++M rpc.tls-cert-file ++M rpc.tls-key-file diff --git a/scripts/confix/testdata/diff-31-32.txt b/scripts/confix/testdata/diff-31-32.txt new file mode 100644 index 0000000000..98855badef --- /dev/null +++ b/scripts/confix/testdata/diff-31-32.txt @@ -0,0 +1,5 @@ ++S fastsync ++M fastsync.version ++M mempool.max-tx-bytes ++M rpc.max-body-bytes ++M rpc.max-header-bytes diff --git a/scripts/confix/testdata/diff-32-33.txt b/scripts/confix/testdata/diff-32-33.txt new file mode 100644 index 0000000000..7aa61856a3 --- /dev/null +++ b/scripts/confix/testdata/diff-32-33.txt @@ -0,0 +1,6 @@ ++M p2p.persistent-peers-max-dial-period ++M p2p.unconditional-peer-ids ++M tx-index.index-all-keys +-M tx-index.index-all-tags ++M tx-index.index-keys +-M tx-index.index-tags diff --git a/scripts/confix/testdata/diff-33-34.txt b/scripts/confix/testdata/diff-33-34.txt new file mode 100644 index 0000000000..a0ac7a98da --- /dev/null +++ b/scripts/confix/testdata/diff-33-34.txt @@ -0,0 +1,20 @@ +-M prof-laddr ++M consensus.double-sign-check-height ++M mempool.keep-invalid-txs-in-cache ++M mempool.max-batch-bytes ++M rpc.experimental-close-on-slow-client ++M rpc.experimental-subscription-buffer-size ++M rpc.experimental-websocket-write-buffer-size ++M rpc.pprof-laddr ++S statesync ++M statesync.enable ++M statesync.rpc-servers ++M statesync.trust-height ++M statesync.trust-hash ++M statesync.trust-period ++M statesync.discovery-time ++M statesync.temp-dir ++M statesync.chunk-request-timeout ++M statesync.chunk-fetchers +-M tx-index.index-all-keys +-M tx-index.index-keys diff --git a/scripts/confix/testdata/diff-34-35.txt b/scripts/confix/testdata/diff-34-35.txt new file mode 100644 index 0000000000..13a4432a0e --- /dev/null +++ b/scripts/confix/testdata/diff-34-35.txt @@ -0,0 +1,31 @@ +-M fast-sync ++M mode +-M priv-validator-key-file +-M priv-validator-laddr +-M priv-validator-state-file ++S blocksync ++M blocksync.enable ++M blocksync.version +-S fastsync +-M fastsync.version ++M mempool.ttl-duration ++M mempool.ttl-num-blocks ++M mempool.version +-M mempool.wal-dir ++M p2p.bootstrap-peers ++M p2p.max-connections ++M p2p.max-incoming-connection-attempts ++M p2p.queue-type +-M p2p.seed-mode ++M p2p.use-legacy ++S priv-validator ++M priv-validator.key-file ++M priv-validator.state-file ++M priv-validator.laddr ++M priv-validator.client-certificate-file ++M priv-validator.client-key-file ++M priv-validator.root-ca-file +-M statesync.chunk-fetchers ++M statesync.fetchers ++M statesync.use-p2p ++M tx-index.psql-conn diff --git a/scripts/confix/testdata/diff-35-36.txt b/scripts/confix/testdata/diff-35-36.txt new file mode 100644 index 0000000000..13fd268af2 --- /dev/null +++ b/scripts/confix/testdata/diff-35-36.txt @@ -0,0 +1,27 @@ +-S blocksync +-M blocksync.enable +-M blocksync.version +-M consensus.skip-timeout-commit +-M consensus.timeout-commit +-M consensus.timeout-precommit +-M consensus.timeout-precommit-delta +-M consensus.timeout-prevote +-M consensus.timeout-prevote-delta +-M consensus.timeout-propose +-M consensus.timeout-propose-delta +-M mempool.version +-M p2p.addr-book-file +-M p2p.addr-book-strict +-M p2p.max-num-inbound-peers +-M p2p.max-num-outbound-peers +-M p2p.persistent-peers-max-dial-period +-M p2p.unconditional-peer-ids +-M p2p.use-legacy ++M rpc.event-log-max-items ++M rpc.event-log-window-size +-M rpc.experimental-close-on-slow-client ++M rpc.experimental-disable-websocket +-M rpc.experimental-subscription-buffer-size +-M rpc.experimental-websocket-write-buffer-size +-M rpc.grpc-laddr +-M rpc.grpc-max-open-connections diff --git a/scripts/confix/testdata/non-config.toml b/scripts/confix/testdata/non-config.toml new file mode 100644 index 0000000000..abfd486673 --- /dev/null +++ b/scripts/confix/testdata/non-config.toml @@ -0,0 +1,6 @@ +# This is not a Tendermint config file. + +[ test ] +key = 'value' + +# Nothing to see here, move along. diff --git a/scripts/confix/testdata/v26-config.toml b/scripts/confix/testdata/v26-config.toml new file mode 100644 index 0000000000..a9237056be --- /dev/null +++ b/scripts/confix/testdata/v26-config.toml @@ -0,0 +1,249 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +##### main base config options ##### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "localhost" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: leveldb | memdb | cleveldb +db_backend = "leveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "main:info,state:info,*:error" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_file = "config/priv_validator.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# TCP or UNIX socket address for the profiling server to listen on +prof_laddr = "" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + +##### advanced configuration options ##### + +##### rpc server configuration options ##### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://0.0.0.0:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = "[]" + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = "[HEAD GET POST]" + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = "[Origin Accept Content-Type X-Requested-With X-Server-Time]" + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept more significant number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept more significant number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +##### peer to peer configuration options ##### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# UPNP port forwarding +upnp = false + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = true + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +##### mempool configuration options ##### +[mempool] + +recheck = true +broadcast = true +wal_dir = "" + +# size of the mempool +size = 5000 + +# size of the cache (used to filter transactions we saw earlier) +cache_size = 10000 + +##### consensus configuration options ##### +[consensus] + +wal_file = "data/cs.wal/wal" + +timeout_propose = "3s" +timeout_propose_delta = "500ms" +timeout_prevote = "1s" +timeout_prevote_delta = "500ms" +timeout_precommit = "1s" +timeout_precommit_delta = "500ms" +timeout_commit = "1s" + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +# Block time parameters. Corresponds to the minimum time increment between consecutive blocks. +blocktime_iota = "1s" + +##### transactions indexer configuration options ##### +[tx_index] + +# What indexer to use for transactions +# +# Options: +# 1) "null" (default) +# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +indexer = "kv" + +# Comma-separated list of tags to index (by default the only tag is "tx.hash") +# +# You can also index transactions by height by adding "tx.height" tag here. +# +# It's recommended to index only a subset of tags due to possible memory +# bloat. This is, of course, depends on the indexer's DB and the volume of +# transactions. +index_tags = "" + +# When set to true, tells indexer to index all tags (predefined tags: +# "tx.hash", "tx.height" and all tags from DeliverTx responses). +# +# Note this may be not desirable (see the comment above). IndexTags has a +# precedence over IndexAllTags (i.e. when given both, IndexTags will be +# indexed). +index_all_tags = false + +##### instrumentation configuration options ##### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept more significant number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "tendermint" diff --git a/scripts/confix/testdata/v27-config.toml b/scripts/confix/testdata/v27-config.toml new file mode 100644 index 0000000000..25e3b582f8 --- /dev/null +++ b/scripts/confix/testdata/v27-config.toml @@ -0,0 +1,249 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +##### main base config options ##### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "localhost" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: leveldb | memdb | cleveldb +db_backend = "leveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "main:info,state:info,*:error" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_file = "config/priv_validator.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# TCP or UNIX socket address for the profiling server to listen on +prof_laddr = "" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + +##### advanced configuration options ##### + +##### rpc server configuration options ##### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://0.0.0.0:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +##### peer to peer configuration options ##### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# UPNP port forwarding +upnp = false + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = true + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +##### mempool configuration options ##### +[mempool] + +recheck = true +broadcast = true +wal_dir = "" + +# size of the mempool +size = 5000 + +# size of the cache (used to filter transactions we saw earlier) +cache_size = 10000 + +##### consensus configuration options ##### +[consensus] + +wal_file = "data/cs.wal/wal" + +timeout_propose = "3s" +timeout_propose_delta = "500ms" +timeout_prevote = "1s" +timeout_prevote_delta = "500ms" +timeout_precommit = "1s" +timeout_precommit_delta = "500ms" +timeout_commit = "1s" + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +# Block time parameters. Corresponds to the minimum time increment between consecutive blocks. +blocktime_iota = "1s" + +##### transactions indexer configuration options ##### +[tx_index] + +# What indexer to use for transactions +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +indexer = "kv" + +# Comma-separated list of tags to index (by default the only tag is "tx.hash") +# +# You can also index transactions by height by adding "tx.height" tag here. +# +# It's recommended to index only a subset of tags due to possible memory +# bloat. This is, of course, depends on the indexer's DB and the volume of +# transactions. +index_tags = "" + +# When set to true, tells indexer to index all tags (predefined tags: +# "tx.hash", "tx.height" and all tags from DeliverTx responses). +# +# Note this may be not desirable (see the comment above). IndexTags has a +# precedence over IndexAllTags (i.e. when given both, IndexTags will be +# indexed). +index_all_tags = false + +##### instrumentation configuration options ##### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "tendermint" diff --git a/scripts/confix/testdata/v28-config.toml b/scripts/confix/testdata/v28-config.toml new file mode 100644 index 0000000000..b4aaa5aaee --- /dev/null +++ b/scripts/confix/testdata/v28-config.toml @@ -0,0 +1,252 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +##### main base config options ##### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "localhost" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: leveldb | memdb | cleveldb +db_backend = "leveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "main:info,state:info,*:error" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# TCP or UNIX socket address for the profiling server to listen on +prof_laddr = "" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + +##### advanced configuration options ##### + +##### rpc server configuration options ##### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://0.0.0.0:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +##### peer to peer configuration options ##### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# UPNP port forwarding +upnp = false + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +##### mempool configuration options ##### +[mempool] + +recheck = true +broadcast = true +wal_dir = "" + +# size of the mempool +size = 5000 + +# size of the cache (used to filter transactions we saw earlier) +cache_size = 10000 + +##### consensus configuration options ##### +[consensus] + +wal_file = "data/cs.wal/wal" + +timeout_propose = "3s" +timeout_propose_delta = "500ms" +timeout_prevote = "1s" +timeout_prevote_delta = "500ms" +timeout_precommit = "1s" +timeout_precommit_delta = "500ms" +timeout_commit = "1s" + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +# Block time parameters. Corresponds to the minimum time increment between consecutive blocks. +blocktime_iota = "1s" + +##### transactions indexer configuration options ##### +[tx_index] + +# What indexer to use for transactions +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +indexer = "kv" + +# Comma-separated list of tags to index (by default the only tag is "tx.hash") +# +# You can also index transactions by height by adding "tx.height" tag here. +# +# It's recommended to index only a subset of tags due to possible memory +# bloat. This is, of course, depends on the indexer's DB and the volume of +# transactions. +index_tags = "" + +# When set to true, tells indexer to index all tags (predefined tags: +# "tx.hash", "tx.height" and all tags from DeliverTx responses). +# +# Note this may be not desirable (see the comment above). IndexTags has a +# precedence over IndexAllTags (i.e. when given both, IndexTags will be +# indexed). +index_all_tags = false + +##### instrumentation configuration options ##### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "tendermint" diff --git a/scripts/confix/testdata/v29-config.toml b/scripts/confix/testdata/v29-config.toml new file mode 100644 index 0000000000..b4aaa5aaee --- /dev/null +++ b/scripts/confix/testdata/v29-config.toml @@ -0,0 +1,252 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +##### main base config options ##### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "localhost" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: leveldb | memdb | cleveldb +db_backend = "leveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "main:info,state:info,*:error" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# TCP or UNIX socket address for the profiling server to listen on +prof_laddr = "" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + +##### advanced configuration options ##### + +##### rpc server configuration options ##### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://0.0.0.0:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +##### peer to peer configuration options ##### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# UPNP port forwarding +upnp = false + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +##### mempool configuration options ##### +[mempool] + +recheck = true +broadcast = true +wal_dir = "" + +# size of the mempool +size = 5000 + +# size of the cache (used to filter transactions we saw earlier) +cache_size = 10000 + +##### consensus configuration options ##### +[consensus] + +wal_file = "data/cs.wal/wal" + +timeout_propose = "3s" +timeout_propose_delta = "500ms" +timeout_prevote = "1s" +timeout_prevote_delta = "500ms" +timeout_precommit = "1s" +timeout_precommit_delta = "500ms" +timeout_commit = "1s" + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +# Block time parameters. Corresponds to the minimum time increment between consecutive blocks. +blocktime_iota = "1s" + +##### transactions indexer configuration options ##### +[tx_index] + +# What indexer to use for transactions +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +indexer = "kv" + +# Comma-separated list of tags to index (by default the only tag is "tx.hash") +# +# You can also index transactions by height by adding "tx.height" tag here. +# +# It's recommended to index only a subset of tags due to possible memory +# bloat. This is, of course, depends on the indexer's DB and the volume of +# transactions. +index_tags = "" + +# When set to true, tells indexer to index all tags (predefined tags: +# "tx.hash", "tx.height" and all tags from DeliverTx responses). +# +# Note this may be not desirable (see the comment above). IndexTags has a +# precedence over IndexAllTags (i.e. when given both, IndexTags will be +# indexed). +index_all_tags = false + +##### instrumentation configuration options ##### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "tendermint" diff --git a/scripts/confix/testdata/v30-config.toml b/scripts/confix/testdata/v30-config.toml new file mode 100644 index 0000000000..b4aaa5aaee --- /dev/null +++ b/scripts/confix/testdata/v30-config.toml @@ -0,0 +1,252 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +##### main base config options ##### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "localhost" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: leveldb | memdb | cleveldb +db_backend = "leveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "main:info,state:info,*:error" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# TCP or UNIX socket address for the profiling server to listen on +prof_laddr = "" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + +##### advanced configuration options ##### + +##### rpc server configuration options ##### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://0.0.0.0:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +##### peer to peer configuration options ##### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# UPNP port forwarding +upnp = false + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +##### mempool configuration options ##### +[mempool] + +recheck = true +broadcast = true +wal_dir = "" + +# size of the mempool +size = 5000 + +# size of the cache (used to filter transactions we saw earlier) +cache_size = 10000 + +##### consensus configuration options ##### +[consensus] + +wal_file = "data/cs.wal/wal" + +timeout_propose = "3s" +timeout_propose_delta = "500ms" +timeout_prevote = "1s" +timeout_prevote_delta = "500ms" +timeout_precommit = "1s" +timeout_precommit_delta = "500ms" +timeout_commit = "1s" + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +# Block time parameters. Corresponds to the minimum time increment between consecutive blocks. +blocktime_iota = "1s" + +##### transactions indexer configuration options ##### +[tx_index] + +# What indexer to use for transactions +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +indexer = "kv" + +# Comma-separated list of tags to index (by default the only tag is "tx.hash") +# +# You can also index transactions by height by adding "tx.height" tag here. +# +# It's recommended to index only a subset of tags due to possible memory +# bloat. This is, of course, depends on the indexer's DB and the volume of +# transactions. +index_tags = "" + +# When set to true, tells indexer to index all tags (predefined tags: +# "tx.hash", "tx.height" and all tags from DeliverTx responses). +# +# Note this may be not desirable (see the comment above). IndexTags has a +# precedence over IndexAllTags (i.e. when given both, IndexTags will be +# indexed). +index_all_tags = false + +##### instrumentation configuration options ##### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "tendermint" diff --git a/scripts/confix/testdata/v31-config.toml b/scripts/confix/testdata/v31-config.toml new file mode 100644 index 0000000000..247d2da2e6 --- /dev/null +++ b/scripts/confix/testdata/v31-config.toml @@ -0,0 +1,292 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +##### main base config options ##### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "localhost" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: goleveldb | cleveldb | boltdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "main:info,state:info,*:error" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# TCP or UNIX socket address for the profiling server to listen on +prof_laddr = "" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + +##### advanced configuration options ##### + +##### rpc server configuration options ##### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://0.0.0.0:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# The name of a file containing certificate that is used to create the HTTPS server. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. +tls_cert_file = "" + +# The name of a file containing matching private key that is used to create the HTTPS server. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. +tls_key_file = "" + +##### peer to peer configuration options ##### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# UPNP port forwarding +upnp = false + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +##### mempool configuration options ##### +[mempool] + +recheck = true +broadcast = true +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max_txs_bytes=5MB, mempool will only accept 5 transactions). +max_txs_bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +##### consensus configuration options ##### +[consensus] + +wal_file = "data/cs.wal/wal" + +timeout_propose = "3s" +timeout_propose_delta = "500ms" +timeout_prevote = "1s" +timeout_prevote_delta = "500ms" +timeout_precommit = "1s" +timeout_precommit_delta = "500ms" +timeout_commit = "1s" + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +##### transactions indexer configuration options ##### +[tx_index] + +# What indexer to use for transactions +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +indexer = "kv" + +# Comma-separated list of tags to index (by default the only tag is "tx.hash") +# +# You can also index transactions by height by adding "tx.height" tag here. +# +# It's recommended to index only a subset of tags due to possible memory +# bloat. This is, of course, depends on the indexer's DB and the volume of +# transactions. +index_tags = "" + +# When set to true, tells indexer to index all tags (predefined tags: +# "tx.hash", "tx.height" and all tags from DeliverTx responses). +# +# Note this may be not desirable (see the comment above). IndexTags has a +# precedence over IndexAllTags (i.e. when given both, IndexTags will be +# indexed). +index_all_tags = false + +##### instrumentation configuration options ##### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "tendermint" diff --git a/scripts/confix/testdata/v32-config.toml b/scripts/confix/testdata/v32-config.toml new file mode 100644 index 0000000000..e0b897525b --- /dev/null +++ b/scripts/confix/testdata/v32-config.toml @@ -0,0 +1,319 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable +# or --home cmd flag. + +##### main base config options ##### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "localhost" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: goleveldb | cleveldb | boltdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "main:info,state:info,*:error" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# TCP or UNIX socket address for the profiling server to listen on +prof_laddr = "" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + +##### advanced configuration options ##### + +##### rpc server configuration options ##### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://127.0.0.1:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# Maximum size of request body, in bytes +max_body_bytes = 1000000 + +# Maximum size of request header, in bytes +max_header_bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Migth be either absolute path or path related to tendermint's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Migth be either absolute path or path related to tendermint's config directory. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "" + +##### peer to peer configuration options ##### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# UPNP port forwarding +upnp = false + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +##### mempool configuration options ##### +[mempool] + +recheck = true +broadcast = true +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max_txs_bytes=5MB, mempool will only accept 5 transactions). +max_txs_bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}. +max_tx_bytes = 1048576 + +##### fast sync configuration options ##### +[fastsync] + +# Fast Sync version to use: +# 1) "v0" (default) - the legacy fast sync implementation +# 2) "v1" - refactor of v0 version for better testability +version = "v0" + +##### consensus configuration options ##### +[consensus] + +wal_file = "data/cs.wal/wal" + +timeout_propose = "3s" +timeout_propose_delta = "500ms" +timeout_prevote = "1s" +timeout_prevote_delta = "500ms" +timeout_precommit = "1s" +timeout_precommit_delta = "500ms" +timeout_commit = "1s" + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +##### transactions indexer configuration options ##### +[tx_index] + +# What indexer to use for transactions +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +indexer = "kv" + +# Comma-separated list of tags to index (by default the only tag is "tx.hash") +# +# You can also index transactions by height by adding "tx.height" tag here. +# +# It's recommended to index only a subset of tags due to possible memory +# bloat. This is, of course, depends on the indexer's DB and the volume of +# transactions. +index_tags = "" + +# When set to true, tells indexer to index all tags (predefined tags: +# "tx.hash", "tx.height" and all tags from DeliverTx responses). +# +# Note this may be not desirable (see the comment above). IndexTags has a +# precedence over IndexAllTags (i.e. when given both, IndexTags will be +# indexed). +index_all_tags = false + +##### instrumentation configuration options ##### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "tendermint" diff --git a/scripts/confix/testdata/v33-config.toml b/scripts/confix/testdata/v33-config.toml new file mode 100644 index 0000000000..b728f08a13 --- /dev/null +++ b/scripts/confix/testdata/v33-config.toml @@ -0,0 +1,335 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable +# or --home cmd flag. + +##### main base config options ##### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "localhost" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/tecbot/gorocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "main:info,state:info,*:error" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# TCP or UNIX socket address for the profiling server to listen on +prof_laddr = "" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + +##### advanced configuration options ##### + +##### rpc server configuration options ##### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://127.0.0.1:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# Maximum size of request body, in bytes +max_body_bytes = 1000000 + +# Maximum size of request header, in bytes +max_header_bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Migth be either absolute path or path related to tendermint's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Migth be either absolute path or path related to tendermint's config directory. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "" + +##### peer to peer configuration options ##### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# UPNP port forwarding +upnp = false + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "0s" + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +##### mempool configuration options ##### +[mempool] + +recheck = true +broadcast = true +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max_txs_bytes=5MB, mempool will only accept 5 transactions). +max_txs_bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}. +max_tx_bytes = 1048576 + +##### fast sync configuration options ##### +[fastsync] + +# Fast Sync version to use: +# 1) "v0" (default) - the legacy fast sync implementation +# 2) "v1" - refactor of v0 version for better testability +# 3) "v2" - refactor of v1 version for better usability +version = "v0" + +##### consensus configuration options ##### +[consensus] + +wal_file = "data/cs.wal/wal" + +timeout_propose = "3s" +timeout_propose_delta = "500ms" +timeout_prevote = "1s" +timeout_prevote_delta = "500ms" +timeout_precommit = "1s" +timeout_precommit_delta = "500ms" +timeout_commit = "1s" + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +##### transactions indexer configuration options ##### +[tx_index] + +# What indexer to use for transactions +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +indexer = "kv" + +# Comma-separated list of compositeKeys to index (by default the only key is "tx.hash") +# Remember that Event has the following structure: type.key +# type: [ +# key: value, +# ... +# ] +# +# You can also index transactions by height by adding "tx.height" key here. +# +# It's recommended to index only a subset of keys due to possible memory +# bloat. This is, of course, depends on the indexer's DB and the volume of +# transactions. +index_keys = "" + +# When set to true, tells indexer to index all compositeKeys (predefined keys: +# "tx.hash", "tx.height" and all keys from DeliverTx responses). +# +# Note this may be not desirable (see the comment above). IndexKeys has a +# precedence over IndexAllKeys (i.e. when given both, IndexKeys will be +# indexed). +index_all_keys = false + +##### instrumentation configuration options ##### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "tendermint" diff --git a/scripts/confix/testdata/v34-config.toml b/scripts/confix/testdata/v34-config.toml new file mode 100644 index 0000000000..6bcffd9541 --- /dev/null +++ b/scripts/confix/testdata/v34-config.toml @@ -0,0 +1,430 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable +# or --home cmd flag. + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy_app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "localhost" + +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast_sync = true + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/tecbot/gorocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) +db_backend = "goleveldb" + +# Database directory +db_dir = "data" + +# Output level for logging, including package level options +log_level = "info" + +# Output format: 'plain' (colored text) or 'json' +log_format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis_file = "config/genesis.json" + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +priv_validator_key_file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +priv_validator_state_file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node_key_file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter_peers = false + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://127.0.0.1:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors_allowed_origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors_allowed_methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +grpc_laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +grpc_max_open_connections = 900 + +# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc_max_open_connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max_subscription_clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max_subscriptions_per_client = 5 + +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental_subscription_buffer_size = 200 + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental_subscription_buffer_size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental_subscription_buffer_size" to +# accommodate non-subscription-related RPC responses. +experimental_websocket_write_buffer_size = 200 + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behaviour. +experimental_close_on_slow_client = false + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout_broadcast_tx_commit = "10s" + +# Maximum size of request body, in bytes +max_body_bytes = 1000000 + +# Maximum size of request header, in bytes +max_header_bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Might be either absolute path or path related to Tendermint's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. +tls_cert_file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Might be either absolute path or path related to Tendermint's config directory. +# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. +tls_key_file = "" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof_laddr = "" + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. ip and port are required +# example: 159.89.10.97:26656 +external_address = "" + +# Comma separated list of seed nodes to connect to +seeds = "" + +# Comma separated list of nodes to keep persistent connections to +persistent_peers = "" + +# UPNP port forwarding +upnp = false + +# Path to address book +addr_book_file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr_book_strict = true + +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +unconditional_peer_ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +persistent_peers_max_dial_period = "0s" + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +max_packet_msg_payload_size = 1024 + +# Rate at which packets can be sent, in bytes/second +send_rate = 5120000 + +# Rate at which packets can be received, in bytes/second +recv_rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Seed mode, in which node constantly crawls the network and looks for +# peers. If another node asks it for addresses, it responds and disconnects. +# +# Does not work if the peer-exchange reactor is disabled. +seed_mode = false + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +private_peer_ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = false + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + +####################################################### +### Mempool Configuration Option ### +####################################################### +[mempool] + +recheck = true +broadcast = true +wal_dir = "" + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max_txs_bytes=5MB, mempool will only accept 5 transactions). +max_txs_bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache_size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes}. +max_tx_bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max_batch_bytes = 0 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "" +trust_height = 0 +trust_hash = "" +trust_period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery_time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "" + +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 1 minute). +chunk_request_timeout = "10s" + +# The number of concurrent chunk fetchers to run (default: 1). +chunk_fetchers = "4" + +####################################################### +### Fast Sync Configuration Connections ### +####################################################### +[fastsync] + +# Fast Sync version to use: +# 1) "v0" (default) - the legacy fast sync implementation +# 2) "v1" - refactor of v0 version for better testability +# 2) "v2" - complete redesign of v0, optimized for testability & readability +version = "v0" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal_file = "data/cs.wal/wal" + +# How long we wait for a proposal block before prevoting nil +timeout_propose = "3s" +# How much timeout_propose increases with each round +timeout_propose_delta = "500ms" +# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) +timeout_prevote = "1s" +# How much the timeout_prevote increases with each round +timeout_prevote_delta = "500ms" +# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) +timeout_precommit = "1s" +# How much the timeout_precommit increases with each round +timeout_precommit_delta = "500ms" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +timeout_commit = "1s" + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double_sign_check_height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double_sign_check_height = 0 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip_timeout_commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create_empty_blocks = true +create_empty_blocks_interval = "0s" + +create_proof_block_range = 1 + +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2s" + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx_index] + +# What indexer to use for transactions +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = "kv" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus_listen_addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max_open_connections = 3 + +# Instrumentation namespace +namespace = "tendermint" diff --git a/scripts/confix/testdata/v35-config.toml b/scripts/confix/testdata/v35-config.toml new file mode 100644 index 0000000000..ec11e27716 --- /dev/null +++ b/scripts/confix/testdata/v35-config.toml @@ -0,0 +1,529 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable +# or --home cmd flag. + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy-app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "localhost" + +# Mode of Node: full | validator | seed +# * validator node +# - all reactors +# - with priv_validator_key.json, priv_validator_state.json +# * full node +# - all reactors +# - No priv_validator_key.json, priv_validator_state.json +# * seed node +# - only P2P, PEX Reactor +# - No priv_validator_key.json, priv_validator_state.json +mode = "validator" + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/tecbot/gorocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) +db-backend = "goleveldb" + +# Database directory +db-dir = "data" + +# Output level for logging, including package level options +log-level = "info" + +# Output format: 'plain' (colored text) or 'json' +log-format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis-file = "config/genesis.json" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node-key-file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter-peers = false + + +####################################################### +### Priv Validator Configuration ### +####################################################### +[priv-validator] + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +key-file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +state-file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +# when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client +laddr = "" + +# Path to the client certificate generated while creating needed files for secure connection. +# If a remote validator address is provided but no certificate, the connection will be insecure +client-certificate-file = "" + +# Client key generated while creating certificates for secure connection +client-key-file = "" + +# Path to the Root Certificate Authority used to sign both client and server certificates +root-ca-file = "" + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://127.0.0.1:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors-allowed-origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors-allowed-methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors-allowed-headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# TCP or UNIX socket address for the gRPC server to listen on +# NOTE: This server only supports /broadcast_tx_commit +# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. +grpc-laddr = "" + +# Maximum number of simultaneous connections. +# Does not include RPC (HTTP&WebSocket) connections. See max-open-connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. +grpc-max-open-connections = 900 + +# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# Does not include gRPC connections. See grpc-max-open-connections +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max-open-connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max-subscription-clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to +# the estimated # maximum number of broadcast_tx_commit calls per block. +max-subscriptions-per-client = 5 + +# Experimental parameter to specify the maximum number of events a node will +# buffer, per subscription, before returning an error and closing the +# subscription. Must be set to at least 100, but higher values will accommodate +# higher event throughput rates (and will use more memory). +experimental-subscription-buffer-size = 200 + +# Experimental parameter to specify the maximum number of RPC responses that +# can be buffered per WebSocket client. If clients cannot read from the +# WebSocket endpoint fast enough, they will be disconnected, so increasing this +# parameter may reduce the chances of them being disconnected (but will cause +# the node to use more memory). +# +# Must be at least the same as "experimental-subscription-buffer-size", +# otherwise connections could be dropped unnecessarily. This value should +# ideally be somewhat higher than "experimental-subscription-buffer-size" to +# accommodate non-subscription-related RPC responses. +experimental-websocket-write-buffer-size = 200 + +# If a WebSocket client cannot read fast enough, at present we may +# silently drop events instead of generating an error or disconnecting the +# client. +# +# Enabling this experimental parameter will cause the WebSocket connection to +# be closed instead if it cannot read fast enough, allowing for greater +# predictability in subscription behavior. +experimental-close-on-slow-client = false + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout-broadcast-tx-commit = "10s" + +# Maximum size of request body, in bytes +max-body-bytes = 1000000 + +# Maximum size of request header, in bytes +max-header-bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Might be either absolute path or path related to Tendermint's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. +tls-cert-file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Might be either absolute path or path related to Tendermint's config directory. +# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. +tls-key-file = "" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof-laddr = "" + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Enable the legacy p2p layer. +use-legacy = false + +# Select the p2p internal queue +queue-type = "priority" + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. ip and port are required +# example: 159.89.10.97:26656 +external-address = "" + +# Comma separated list of seed nodes to connect to +# We only use these if we can’t connect to peers in the addrbook +# NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead. +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 +seeds = "" + +# Comma separated list of peers to be added to the peer store +# on startup. Either BootstrapPeers or PersistentPeers are +# needed for peer discovery +bootstrap-peers = "" + +# Comma separated list of nodes to keep persistent connections to +persistent-peers = "" + +# UPNP port forwarding +upnp = false + +# Path to address book +# TODO: Remove once p2p refactor is complete in favor of peer store. +addr-book-file = "config/addrbook.json" + +# Set true for strict address routability rules +# Set false for private or local networks +addr-book-strict = true + +# Maximum number of inbound peers +# +# TODO: Remove once p2p refactor is complete in favor of MaxConnections. +# ref: https://github.com/tendermint/tendermint/issues/5670 +max-num-inbound-peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +# +# TODO: Remove once p2p refactor is complete in favor of MaxConnections. +# ref: https://github.com/tendermint/tendermint/issues/5670 +max-num-outbound-peers = 10 + +# Maximum number of connections (inbound and outbound). +max-connections = 64 + +# Rate limits the number of incoming connection attempts per IP address. +max-incoming-connection-attempts = 100 + +# List of node IDs, to which a connection will be (re)established ignoring any existing limits +# TODO: Remove once p2p refactor is complete. +# ref: https://github.com/tendermint/tendermint/issues/5670 +unconditional-peer-ids = "" + +# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 +persistent-peers-max-dial-period = "0s" + +# Time to wait before flushing messages out on the connection +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 +flush-throttle-timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 +max-packet-msg-payload-size = 1400 + +# Rate at which packets can be sent, in bytes/second +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 +send-rate = 5120000 + +# Rate at which packets can be received, in bytes/second +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 +recv-rate = 5120000 + +# Set true to enable the peer-exchange reactor +pex = true + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +# Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055 +private-peer-ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow-duplicate-ip = false + +# Peer connection configuration. +handshake-timeout = "20s" +dial-timeout = "3s" + +####################################################### +### Mempool Configuration Option ### +####################################################### +[mempool] + +# Mempool version to use: +# 1) "v0" - The legacy non-prioritized mempool reactor. +# 2) "v1" (default) - The prioritized mempool reactor. +version = "v1" + +recheck = true +broadcast = true + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max-txs-bytes=5MB, mempool will only accept 5 transactions). +max-txs-bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache-size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}. +max-tx-bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max-batch-bytes = 0 + +# ttl-duration, if non-zero, defines the maximum amount of time a transaction +# can exist for in the mempool. +# +# Note, if ttl-num-blocks is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if it's +# insertion time into the mempool is beyond ttl-duration. +ttl-duration = "0s" + +# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction +# can exist for in the mempool. +# +# Note, if ttl-duration is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if +# it's insertion time into the mempool is beyond ttl-duration. +ttl-num-blocks = 0 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# State sync uses light client verification to verify state. This can be done either through the +# P2P layer or RPC layer. Set this to true to use the P2P layer. If false (default), RPC layer +# will be used. +use-p2p = false + +# If using RPC, at least two addresses need to be provided. They should be compatible with net.Dial, +# for example: "host.example.com:2125" +rpc-servers = "" + +# The hash and height of a trusted block. Must be within the trust-period. +trust-height = 0 +trust-hash = "" + +# The trust period should be set so that Tendermint can detect and gossip misbehavior before +# it is considered expired. For chains based on the Cosmos SDK, one day less than the unbonding +# period should suffice. +trust-period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery-time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to os.TempDir(). +# The synchronizer will create a new, randomly named directory within this directory +# and remove it when the sync is complete. +temp-dir = "" + +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 15 seconds). +chunk-request-timeout = "15s" + +# The number of concurrent chunk and block fetchers to run (default: 4). +fetchers = "4" + +####################################################### +### Block Sync Configuration Connections ### +####################################################### +[blocksync] + +# If this node is many blocks behind the tip of the chain, BlockSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +enable = true + +# Block Sync version to use: +# 1) "v0" (default) - the standard Block Sync implementation +# 2) "v2" - DEPRECATED, please use v0 +version = "v0" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal-file = "data/cs.wal/wal" + +# How long we wait for a proposal block before prevoting nil +timeout-propose = "3s" +# How much timeout-propose increases with each round +timeout-propose-delta = "500ms" +# How long we wait after receiving +2/3 prevotes for “anything” (ie. not a single block or nil) +timeout-prevote = "1s" +# How much the timeout-prevote increases with each round +timeout-prevote-delta = "500ms" +# How long we wait after receiving +2/3 precommits for “anything” (ie. not a single block or nil) +timeout-precommit = "1s" +# How much the timeout-precommit increases with each round +timeout-precommit-delta = "500ms" +# How long we wait after committing a block, before starting on the new +# height (this gives us a chance to receive some more precommits, even +# though we already have +2/3). +timeout-commit = "1s" + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double-sign-check-height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double-sign-check-height = 0 + +# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) +skip-timeout-commit = false + +# EmptyBlocks mode and possible interval between empty blocks +create-empty-blocks = true +create-empty-blocks-interval = "0s" + +create-proof-block-range = 1 + +# Reactor sleep duration parameters +peer-gossip-sleep-duration = "100ms" +peer-query-maj23-sleep-duration = "2s" + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx-index] + +# The backend database list to back the indexer. +# If list contains "null" or "", meaning no indexer service will be used. +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = ["kv"] + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus-listen-addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max-open-connections = 3 + +# Instrumentation namespace +namespace = "tendermint" diff --git a/scripts/confix/testdata/v36-config.toml b/scripts/confix/testdata/v36-config.toml new file mode 100644 index 0000000000..ae617640f6 --- /dev/null +++ b/scripts/confix/testdata/v36-config.toml @@ -0,0 +1,481 @@ +# This is a TOML config file. +# For more information, see https://github.com/toml-lang/toml + +# NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or +# relative to the home directory (e.g. "data"). The home directory is +# "$HOME/.tendermint" by default, but could be changed via $TMHOME env variable +# or --home cmd flag. + +####################################################################### +### Main Base Config Options ### +####################################################################### + +# TCP or UNIX socket address of the ABCI application, +# or the name of an ABCI application compiled in with the Tendermint binary +proxy-app = "tcp://127.0.0.1:26658" + +# A custom human readable name for this node +moniker = "localhost" + +# Mode of Node: full | validator | seed +# * validator node +# - all reactors +# - with priv_validator_key.json, priv_validator_state.json +# * full node +# - all reactors +# - No priv_validator_key.json, priv_validator_state.json +# * seed node +# - only P2P, PEX Reactor +# - No priv_validator_key.json, priv_validator_state.json +mode = "validator" + +# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb +# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) +# - pure go +# - stable +# * cleveldb (uses levigo wrapper) +# - fast +# - requires gcc +# - use cleveldb build tag (go build -tags cleveldb) +# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) +# - EXPERIMENTAL +# - may be faster is some use-cases (random reads - indexer) +# - use boltdb build tag (go build -tags boltdb) +# * rocksdb (uses github.com/tecbot/gorocksdb) +# - EXPERIMENTAL +# - requires gcc +# - use rocksdb build tag (go build -tags rocksdb) +# * badgerdb (uses github.com/dgraph-io/badger) +# - EXPERIMENTAL +# - use badgerdb build tag (go build -tags badgerdb) +db-backend = "goleveldb" + +# Database directory +db-dir = "data" + +# Output level for logging, including package level options +log-level = "info" + +# Output format: 'plain' (colored text) or 'json' +log-format = "plain" + +##### additional base config options ##### + +# Path to the JSON file containing the initial validator set and other meta data +genesis-file = "config/genesis.json" + +# Path to the JSON file containing the private key to use for node authentication in the p2p protocol +node-key-file = "config/node_key.json" + +# Mechanism to connect to the ABCI application: socket | grpc +abci = "socket" + +# If true, query the ABCI app on connecting to a new peer +# so the app can decide if we should keep the connection or not +filter-peers = false + + +####################################################### +### Priv Validator Configuration ### +####################################################### +[priv-validator] + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +key-file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +state-file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +# when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client +laddr = "" + +# Path to the client certificate generated while creating needed files for secure connection. +# If a remote validator address is provided but no certificate, the connection will be insecure +client-certificate-file = "" + +# Client key generated while creating certificates for secure connection +client-key-file = "" + +# Path to the Root Certificate Authority used to sign both client and server certificates +root-ca-file = "" + + +####################################################################### +### Advanced Configuration Options ### +####################################################################### + +####################################################### +### RPC Server Configuration Options ### +####################################################### +[rpc] + +# TCP or UNIX socket address for the RPC server to listen on +laddr = "tcp://127.0.0.1:26657" + +# A list of origins a cross-domain request can be executed from +# Default value '[]' disables cors support +# Use '["*"]' to allow any origin +cors-allowed-origins = [] + +# A list of methods the client is allowed to use with cross-domain requests +cors-allowed-methods = ["HEAD", "GET", "POST", ] + +# A list of non simple headers the client is allowed to use with cross-domain requests +cors-allowed-headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] + +# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool +unsafe = false + +# Maximum number of simultaneous connections (including WebSocket). +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max-open-connections = 900 + +# Maximum number of unique clientIDs that can /subscribe +# If you're using /broadcast_tx_commit, set to the estimated maximum number +# of broadcast_tx_commit calls per block. +max-subscription-clients = 100 + +# Maximum number of unique queries a given client can /subscribe to +# If you're using a Local RPC client and /broadcast_tx_commit, set this +# to the estimated maximum number of broadcast_tx_commit calls per block. +max-subscriptions-per-client = 5 + +# If true, disable the websocket interface to the RPC service. This has +# the effect of disabling the /subscribe, /unsubscribe, and /unsubscribe_all +# methods for event subscription. +# +# EXPERIMENTAL: This setting will be removed in Tendermint v0.37. +experimental-disable-websocket = false + +# The time window size for the event log. All events up to this long before +# the latest (up to EventLogMaxItems) will be available for subscribers to +# fetch via the /events method. If 0 (the default) the event log and the +# /events RPC method are disabled. +event-log-window-size = "0s" + +# The maxiumum number of events that may be retained by the event log. If +# this value is 0, no upper limit is set. Otherwise, items in excess of +# this number will be discarded from the event log. +# +# Warning: This setting is a safety valve. Setting it too low may cause +# subscribers to miss events. Try to choose a value higher than the +# maximum worst-case expected event load within the chosen window size in +# ordinary operation. +# +# For example, if the window size is 10 minutes and the node typically +# averages 1000 events per ten minutes, but with occasional known spikes of +# up to 2000, choose a value > 2000. +event-log-max-items = 0 + +# How long to wait for a tx to be committed during /broadcast_tx_commit. +# WARNING: Using a value larger than 10s will result in increasing the +# global HTTP write timeout, which applies to all connections and endpoints. +# See https://github.com/tendermint/tendermint/issues/3435 +timeout-broadcast-tx-commit = "10s" + +# Maximum size of request body, in bytes +max-body-bytes = 1000000 + +# Maximum size of request header, in bytes +max-header-bytes = 1048576 + +# The path to a file containing certificate that is used to create the HTTPS server. +# Might be either absolute path or path related to Tendermint's config directory. +# If the certificate is signed by a certificate authority, +# the certFile should be the concatenation of the server's certificate, any intermediates, +# and the CA's certificate. +# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. +tls-cert-file = "" + +# The path to a file containing matching private key that is used to create the HTTPS server. +# Might be either absolute path or path related to Tendermint's config directory. +# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server. +# Otherwise, HTTP server is run. +tls-key-file = "" + +# pprof listen address (https://golang.org/pkg/net/http/pprof) +pprof-laddr = "" + +####################################################### +### P2P Configuration Options ### +####################################################### +[p2p] + +# Select the p2p internal queue +queue-type = "priority" + +# Address to listen for incoming connections +laddr = "tcp://0.0.0.0:26656" + +# Address to advertise to peers for them to dial +# If empty, will use the same port as the laddr, +# and will introspect on the listener or use UPnP +# to figure out the address. ip and port are required +# example: 159.89.10.97:26656 +external-address = "" + +# Comma separated list of seed nodes to connect to +# We only use these if we can’t connect to peers in the addrbook +# NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead. +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 +seeds = "" + +# Comma separated list of peers to be added to the peer store +# on startup. Either BootstrapPeers or PersistentPeers are +# needed for peer discovery +bootstrap-peers = "" + +# Comma separated list of nodes to keep persistent connections to +persistent-peers = "" + +# UPNP port forwarding +upnp = false + +# Maximum number of connections (inbound and outbound). +max-connections = 64 + +# Rate limits the number of incoming connection attempts per IP address. +max-incoming-connection-attempts = 100 + +# Set true to enable the peer-exchange reactor +pex = true + +# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +# Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055 +private-peer-ids = "" + +# Toggle to disable guard against peers connecting from the same ip. +allow-duplicate-ip = false + +# Peer connection configuration. +handshake-timeout = "20s" +dial-timeout = "3s" + +# Time to wait before flushing messages out on the connection +# TODO: Remove once MConnConnection is removed. +flush-throttle-timeout = "100ms" + +# Maximum size of a message packet payload, in bytes +# TODO: Remove once MConnConnection is removed. +max-packet-msg-payload-size = 1400 + +# Rate at which packets can be sent, in bytes/second +# TODO: Remove once MConnConnection is removed. +send-rate = 5120000 + +# Rate at which packets can be received, in bytes/second +# TODO: Remove once MConnConnection is removed. +recv-rate = 5120000 + + +####################################################### +### Mempool Configuration Option ### +####################################################### +[mempool] + +recheck = true +broadcast = true + +# Maximum number of transactions in the mempool +size = 5000 + +# Limit the total size of all txs in the mempool. +# This only accounts for raw transactions (e.g. given 1MB transactions and +# max-txs-bytes=5MB, mempool will only accept 5 transactions). +max-txs-bytes = 1073741824 + +# Size of the cache (used to filter transactions we saw earlier) in transactions +cache-size = 10000 + +# Do not remove invalid transactions from the cache (default: false) +# Set to true if it's not possible for any invalid transaction to become valid +# again in the future. +keep-invalid-txs-in-cache = false + +# Maximum size of a single transaction. +# NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}. +max-tx-bytes = 1048576 + +# Maximum size of a batch of transactions to send to a peer +# Including space needed by encoding (one varint per transaction). +# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796 +max-batch-bytes = 0 + +# ttl-duration, if non-zero, defines the maximum amount of time a transaction +# can exist for in the mempool. +# +# Note, if ttl-num-blocks is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if it's +# insertion time into the mempool is beyond ttl-duration. +ttl-duration = "0s" + +# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction +# can exist for in the mempool. +# +# Note, if ttl-duration is also defined, a transaction will be removed if it +# has existed in the mempool at least ttl-num-blocks number of blocks or if +# it's insertion time into the mempool is beyond ttl-duration. +ttl-num-blocks = 0 + +####################################################### +### State Sync Configuration Options ### +####################################################### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = false + +# State sync uses light client verification to verify state. This can be done either through the +# P2P layer or RPC layer. Set this to true to use the P2P layer. If false (default), RPC layer +# will be used. +use-p2p = false + +# If using RPC, at least two addresses need to be provided. They should be compatible with net.Dial, +# for example: "host.example.com:2125" +rpc-servers = "" + +# The hash and height of a trusted block. Must be within the trust-period. +trust-height = 0 +trust-hash = "" + +# The trust period should be set so that Tendermint can detect and gossip misbehavior before +# it is considered expired. For chains based on the Cosmos SDK, one day less than the unbonding +# period should suffice. +trust-period = "168h0m0s" + +# Time to spend discovering snapshots before initiating a restore. +discovery-time = "15s" + +# Temporary directory for state sync snapshot chunks, defaults to os.TempDir(). +# The synchronizer will create a new, randomly named directory within this directory +# and remove it when the sync is complete. +temp-dir = "" + +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 15 seconds). +chunk-request-timeout = "15s" + +# The number of concurrent chunk and block fetchers to run (default: 4). +fetchers = "4" + +####################################################### +### Consensus Configuration Options ### +####################################################### +[consensus] + +wal-file = "data/cs.wal/wal" + +# How many blocks to look back to check existence of the node's consensus votes before joining consensus +# When non-zero, the node will panic upon restart +# if the same consensus key was used to sign {double-sign-check-height} last blocks. +# So, validators should stop the state machine, wait for some blocks, and then restart the state machine to avoid panic. +double-sign-check-height = 0 + +# EmptyBlocks mode and possible interval between empty blocks +create-empty-blocks = true +create-empty-blocks-interval = "0s" + +create-proof-block-range = 1 + +# Reactor sleep duration parameters +peer-gossip-sleep-duration = "100ms" +peer-query-maj23-sleep-duration = "2s" + +### Unsafe Timeout Overrides ### + +# These fields provide temporary overrides for the Timeout consensus parameters. +# Use of these parameters is strongly discouraged. Using these parameters may have serious +# liveness implications for the validator and for the chain. +# +# These fields will be removed from the configuration file in the v0.37 release of Tendermint. +# For additional information, see ADR-74: +# https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-074-timeout-params.md + +# This field provides an unsafe override of the Propose timeout consensus parameter. +# This field configures how long the consensus engine will wait for a proposal block before prevoting nil. +# If this field is set to a value greater than 0, it will take effect. +# unsafe-propose-timeout-override = 0s + +# This field provides an unsafe override of the ProposeDelta timeout consensus parameter. +# This field configures how much the propose timeout increases with each round. +# If this field is set to a value greater than 0, it will take effect. +# unsafe-propose-timeout-delta-override = 0s + +# This field provides an unsafe override of the Vote timeout consensus parameter. +# This field configures how long the consensus engine will wait after +# receiving +2/3 votes in a round. +# If this field is set to a value greater than 0, it will take effect. +# unsafe-vote-timeout-override = 0s + +# This field provides an unsafe override of the VoteDelta timeout consensus parameter. +# This field configures how much the vote timeout increases with each round. +# If this field is set to a value greater than 0, it will take effect. +# unsafe-vote-timeout-delta-override = 0s + +# This field provides an unsafe override of the Commit timeout consensus parameter. +# This field configures how long the consensus engine will wait after receiving +# +2/3 precommits before beginning the next height. +# If this field is set to a value greater than 0, it will take effect. +# unsafe-commit-timeout-override = 0s + +# This field provides an unsafe override of the BypassCommitTimeout consensus parameter. +# This field configures if the consensus engine will wait for the full Commit timeout +# before proceeding to the next height. +# If this field is set to true, the consensus engine will proceed to the next height +# as soon as the node has gathered votes from all of the validators on the network. +# unsafe-bypass-commit-timeout-override = + +####################################################### +### Transaction Indexer Configuration Options ### +####################################################### +[tx-index] + +# The backend database list to back the indexer. +# If list contains "null" or "", meaning no indexer service will be used. +# +# The application will set which txs to index. In some cases a node operator will be able +# to decide which txs to index based on configuration set in the application. +# +# Options: +# 1) "null" (default) - no indexer services. +# 2) "kv" - a simple indexer backed by key-value storage (see DBBackend) +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = ["null"] + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" + +####################################################### +### Instrumentation Configuration Options ### +####################################################### +[instrumentation] + +# When true, Prometheus metrics are served under /metrics on +# PrometheusListenAddr. +# Check out the documentation for the list of available metrics. +prometheus = false + +# Address to listen for Prometheus collector(s) connections +prometheus-listen-addr = ":26660" + +# Maximum number of simultaneous connections. +# If you want to accept a larger number than the default, make sure +# you increase your OS limits. +# 0 - unlimited. +max-open-connections = 3 + +# Instrumentation namespace +namespace = "tendermint" diff --git a/scripts/estream/estream.go b/scripts/estream/estream.go new file mode 100644 index 0000000000..c37bc5ba75 --- /dev/null +++ b/scripts/estream/estream.go @@ -0,0 +1,81 @@ +// Program estream is a manual testing tool for polling the event stream +// of a running Tendermint consensus node. +package main + +import ( + "context" + "encoding/json" + "flag" + "fmt" + "log" + "os" + "os/signal" + "path/filepath" + + "github.com/tendermint/tendermint/rpc/client/eventstream" + rpcclient "github.com/tendermint/tendermint/rpc/client/http" + "github.com/tendermint/tendermint/rpc/coretypes" +) + +var ( + query = flag.String("query", "", "Filter query") + batchSize = flag.Int("batch", 0, "Batch size") + resumeFrom = flag.String("resume", "", "Resume cursor") + numItems = flag.Int("count", 0, "Number of items to read (0 to stream)") + waitTime = flag.Duration("poll", 0, "Long poll interval") + rpcAddr = flag.String("addr", "http://localhost:26657", "RPC service address") +) + +func init() { + flag.Usage = func() { + fmt.Fprintf(os.Stderr, `Usage: %[1]s [options] + +Connect to the Tendermint node whose RPC service is at -addr, and poll for events +matching the specified -query. If no query is given, all events are fetched. +The resulting event data are written to stdout as JSON. + +Use -resume to pick up polling from a previously-reported event cursor. +Use -count to stop polling after a certain number of events has been reported. +Use -batch to override the default request batch size. +Use -poll to override the default long-polling interval. + +Options: +`, filepath.Base(os.Args[0])) + flag.PrintDefaults() + } +} + +func main() { + flag.Parse() + + cli, err := rpcclient.New(*rpcAddr) + if err != nil { + log.Fatalf("RPC client: %v", err) + } + stream := eventstream.New(cli, *query, &eventstream.StreamOptions{ + BatchSize: *batchSize, + ResumeFrom: *resumeFrom, + WaitTime: *waitTime, + }) + + // Shut down cleanly on SIGINT. Don't attempt clean shutdown for other + // fatal signals. + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) + defer cancel() + + var nr int + if err := stream.Run(ctx, func(itm *coretypes.EventItem) error { + nr++ + bits, err := json.Marshal(itm) + if err != nil { + return err + } + fmt.Println(string(bits)) + if *numItems > 0 && nr >= *numItems { + return eventstream.ErrStopRunning + } + return nil + }); err != nil { + log.Fatalf("Stream failed: %v", err) + } +} diff --git a/scripts/json2wal/main.go b/scripts/json2wal/main.go index 6b60ac2fc7..e8d3fcf932 100644 --- a/scripts/json2wal/main.go +++ b/scripts/json2wal/main.go @@ -9,13 +9,13 @@ package main import ( "bufio" + "encoding/json" "fmt" "io" "os" "strings" "github.com/tendermint/tendermint/internal/consensus" - tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/types" ) @@ -27,13 +27,13 @@ func main() { f, err := os.Open(os.Args[1]) if err != nil { - panic(fmt.Errorf("failed to open WAL file: %v", err)) + panic(fmt.Errorf("failed to open WAL file: %w", err)) } defer f.Close() walFile, err := os.OpenFile(os.Args[2], os.O_EXCL|os.O_WRONLY|os.O_CREATE, 0666) if err != nil { - panic(fmt.Errorf("failed to open WAL file: %v", err)) + panic(fmt.Errorf("failed to open WAL file: %w", err)) } defer walFile.Close() @@ -48,7 +48,7 @@ func main() { if err == io.EOF { break } else if err != nil { - panic(fmt.Errorf("failed to read file: %v", err)) + panic(fmt.Errorf("failed to read file: %w", err)) } // ignore the ENDHEIGHT in json.File if strings.HasPrefix(string(msgJSON), "ENDHEIGHT") { @@ -56,14 +56,14 @@ func main() { } var msg consensus.TimedWALMessage - err = tmjson.Unmarshal(msgJSON, &msg) + err = json.Unmarshal(msgJSON, &msg) if err != nil { - panic(fmt.Errorf("failed to unmarshal json: %v", err)) + panic(fmt.Errorf("failed to unmarshal json: %w", err)) } err = dec.Encode(&msg) if err != nil { - panic(fmt.Errorf("failed to encode msg: %v", err)) + panic(fmt.Errorf("failed to encode msg: %w", err)) } } } diff --git a/scripts/keymigrate/migrate.go b/scripts/keymigrate/migrate.go index 2061223ade..ca2c528e2f 100644 --- a/scripts/keymigrate/migrate.go +++ b/scripts/keymigrate/migrate.go @@ -15,8 +15,8 @@ import ( "math/rand" "runtime" "strconv" - "sync" + "github.com/creachadair/taskgroup" "github.com/google/orderedcode" dbm "github.com/tendermint/tm-db" ) @@ -27,7 +27,7 @@ type ( ) func getAllLegacyKeys(db dbm.DB) ([]keyID, error) { - out := []keyID{} + var out []keyID iter, err := db.Iterator(nil, nil) if err != nil { @@ -39,15 +39,12 @@ func getAllLegacyKeys(db dbm.DB) ([]keyID, error) { // make sure it's a key with a legacy format, and skip // all other keys, to make it safe to resume the migration. - if !keyIsLegacy(k) { + if !checkKeyType(k).isLegacy() { continue } - // there's inconsistency around tm-db's handling of - // key copies. - nk := make([]byte, len(k)) - copy(nk, k) - out = append(out, nk) + // Make an explicit copy, since not all tm-db backends do. + out = append(out, []byte(string(k))) } if err = iter.Error(); err != nil { @@ -61,66 +58,123 @@ func getAllLegacyKeys(db dbm.DB) ([]keyID, error) { return out, nil } -func makeKeyChan(keys []keyID) <-chan keyID { - out := make(chan keyID, len(keys)) - defer close(out) - - for _, key := range keys { - out <- key - } +// keyType is an enumeration for the structural type of a key. +type keyType int + +func (t keyType) isLegacy() bool { return t != nonLegacyKey } + +const ( + nonLegacyKey keyType = iota // non-legacy key (presumed already converted) + consensusParamsKey + abciResponsesKey + validatorsKey + stateStoreKey // state storage record + blockMetaKey // H: + blockPartKey // P: + commitKey // C: + seenCommitKey // SC: + blockHashKey // BH: + lightSizeKey // size + lightBlockKey // lb/ + evidenceCommittedKey // \x00 + evidencePendingKey // \x01 + txHeightKey // tx.height/... (special case) + abciEventKey // name/value/height/index + txHashKey // 32-byte transaction hash (unprefixed) +) - return out +var prefixes = []struct { + prefix []byte + ktype keyType +}{ + {[]byte("consensusParamsKey:"), consensusParamsKey}, + {[]byte("abciResponsesKey:"), abciResponsesKey}, + {[]byte("validatorsKey:"), validatorsKey}, + {[]byte("stateKey"), stateStoreKey}, + {[]byte("H:"), blockMetaKey}, + {[]byte("P:"), blockPartKey}, + {[]byte("C:"), commitKey}, + {[]byte("SC:"), seenCommitKey}, + {[]byte("BH:"), blockHashKey}, + {[]byte("size"), lightSizeKey}, + {[]byte("lb/"), lightBlockKey}, + {[]byte("\x00"), evidenceCommittedKey}, + {[]byte("\x01"), evidencePendingKey}, } -func keyIsLegacy(key keyID) bool { - for _, prefix := range []keyID{ - // core "store" - keyID("consensusParamsKey:"), - keyID("abciResponsesKey:"), - keyID("validatorsKey:"), - keyID("stateKey"), - keyID("H:"), - keyID("P:"), - keyID("C:"), - keyID("SC:"), - keyID("BH:"), - // light - keyID("size"), - keyID("lb/"), - // evidence - keyID([]byte{0x00}), - keyID([]byte{0x01}), - // tx index - keyID("tx.height/"), - keyID("tx.hash/"), - } { - if bytes.HasPrefix(key, prefix) { - return true +// checkKeyType classifies a candidate key based on its structure. +func checkKeyType(key keyID) keyType { + for _, p := range prefixes { + if bytes.HasPrefix(key, p.prefix) { + return p.ktype } } - // this means it's a tx index... - if bytes.Count(key, []byte("/")) >= 3 { - return true + // A legacy event key has the form: + // + // / / / + // + // Transaction hashes are stored as a raw binary hash with no prefix. + // + // Because a hash can contain any byte, it is possible (though unlikely) + // that a hash could have the correct form for an event key, in which case + // we would translate it incorrectly. To reduce the likelihood of an + // incorrect interpretation, we parse candidate event keys and check for + // some structural properties before making a decision. + // + // Note, though, that nothing prevents event names or values from containing + // additional "/" separators, so the parse has to be forgiving. + parts := bytes.Split(key, []byte("/")) + if len(parts) >= 4 { + // Special case for tx.height. + if len(parts) == 4 && bytes.Equal(parts[0], []byte("tx.height")) { + return txHeightKey + } + + // The name cannot be empty, but we don't know where the name ends and + // the value begins, so insist that there be something. + var n int + for _, part := range parts[:len(parts)-2] { + n += len(part) + } + // Check whether the last two fields could be .../height/index. + if n > 0 && isDecimal(parts[len(parts)-1]) && isDecimal(parts[len(parts)-2]) { + return abciEventKey + } } - return keyIsHash(key) + // If we get here, it's not an event key. Treat it as a hash if it is the + // right length. Note that it IS possible this could collide with the + // translation of some other key (though not a hash, since encoded hashes + // will be longer). The chance of that is small, but there is nothing we can + // do to detect it. + if len(key) == 32 { + return txHashKey + } + return nonLegacyKey } -func keyIsHash(key keyID) bool { - return len(key) == 32 && !bytes.Contains(key, []byte("/")) +// isDecimal reports whether buf is a non-empty sequence of Unicode decimal +// digits. +func isDecimal(buf []byte) bool { + for _, c := range buf { + if c < '0' || c > '9' { + return false + } + } + return len(buf) != 0 } -func migarateKey(key keyID) (keyID, error) { - switch { - case bytes.HasPrefix(key, keyID("H:")): +func migrateKey(key keyID) (keyID, error) { + switch checkKeyType(key) { + case blockMetaKey: val, err := strconv.Atoi(string(key[2:])) if err != nil { return nil, err } return orderedcode.Append(nil, int64(0), int64(val)) - case bytes.HasPrefix(key, keyID("P:")): + case blockPartKey: parts := bytes.Split(key[2:], []byte(":")) if len(parts) != 2 { return nil, fmt.Errorf("block parts key has %d rather than 2 components", @@ -137,55 +191,59 @@ func migarateKey(key keyID) (keyID, error) { } return orderedcode.Append(nil, int64(1), int64(valOne), int64(valTwo)) - case bytes.HasPrefix(key, keyID("C:")): + case commitKey: val, err := strconv.Atoi(string(key[2:])) if err != nil { return nil, err } return orderedcode.Append(nil, int64(2), int64(val)) - case bytes.HasPrefix(key, keyID("SC:")): + case seenCommitKey: val, err := strconv.Atoi(string(key[3:])) if err != nil { return nil, err } return orderedcode.Append(nil, int64(3), int64(val)) - case bytes.HasPrefix(key, keyID("BH:")): - val, err := strconv.Atoi(string(key[3:])) + case blockHashKey: + hash := string(key[3:]) + if len(hash)%2 == 1 { + hash = "0" + hash + } + val, err := hex.DecodeString(hash) if err != nil { return nil, err } - return orderedcode.Append(nil, int64(4), int64(val)) - case bytes.HasPrefix(key, keyID("validatorsKey:")): + return orderedcode.Append(nil, int64(4), string(val)) + case validatorsKey: val, err := strconv.Atoi(string(key[14:])) if err != nil { return nil, err } return orderedcode.Append(nil, int64(5), int64(val)) - case bytes.HasPrefix(key, keyID("consensusParamsKey:")): + case consensusParamsKey: val, err := strconv.Atoi(string(key[19:])) if err != nil { return nil, err } return orderedcode.Append(nil, int64(6), int64(val)) - case bytes.HasPrefix(key, keyID("abciResponsesKey:")): + case abciResponsesKey: val, err := strconv.Atoi(string(key[17:])) if err != nil { return nil, err } return orderedcode.Append(nil, int64(7), int64(val)) - case bytes.HasPrefix(key, keyID("stateKey")): + case stateStoreKey: return orderedcode.Append(nil, int64(8)) - case bytes.HasPrefix(key, []byte{0x00}): // committed evidence + case evidenceCommittedKey: return convertEvidence(key, 9) - case bytes.HasPrefix(key, []byte{0x01}): // pending evidence + case evidencePendingKey: return convertEvidence(key, 10) - case bytes.HasPrefix(key, keyID("lb/")): + case lightBlockKey: if len(key) < 24 { return nil, fmt.Errorf("light block evidence %q in invalid format", string(key)) } @@ -196,9 +254,9 @@ func migarateKey(key keyID) (keyID, error) { } return orderedcode.Append(nil, int64(11), int64(val)) - case bytes.HasPrefix(key, keyID("size")): + case lightSizeKey: return orderedcode.Append(nil, int64(12)) - case bytes.HasPrefix(key, keyID("tx.height")): + case txHeightKey: parts := bytes.Split(key, []byte("/")) if len(parts) != 4 { return nil, fmt.Errorf("key has %d parts rather than 4", len(parts)) @@ -221,7 +279,7 @@ func migarateKey(key keyID) (keyID, error) { } return orderedcode.Append(nil, elems...) - case bytes.Count(key, []byte("/")) >= 3: // tx indexer + case abciEventKey: parts := bytes.Split(key, []byte("/")) elems := make([]interface{}, 0, 4) @@ -257,7 +315,7 @@ func migarateKey(key keyID) (keyID, error) { elems = append(elems, string(appKey), int64(val), int64(val2)) } return orderedcode.Append(nil, elems...) - case keyIsHash(key): + case txHashKey: return orderedcode.Append(nil, "tx.hash", string(key)) default: return nil, fmt.Errorf("key %q is in the wrong format", string(key)) @@ -349,53 +407,23 @@ func Migrate(ctx context.Context, db dbm.DB) error { return err } - numWorkers := runtime.NumCPU() - wg := &sync.WaitGroup{} - - errs := make(chan error, numWorkers) - - keyCh := makeKeyChan(keys) - - // run migrations. - for i := 0; i < numWorkers; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for key := range keyCh { - err := replaceKey(db, key, migarateKey) - if err != nil { - errs <- err - } + var errs []string + g, start := taskgroup.New(func(err error) error { + errs = append(errs, err.Error()) + return err + }).Limit(runtime.NumCPU()) - if ctx.Err() != nil { - return - } + for _, key := range keys { + key := key + start(func() error { + if err := ctx.Err(); err != nil { + return err } - }() + return replaceKey(db, key, migrateKey) + }) } - - // collect and process the errors. - errStrs := []string{} - signal := make(chan struct{}) - go func() { - defer close(signal) - for err := range errs { - if err == nil { - continue - } - errStrs = append(errStrs, err.Error()) - } - }() - - // Wait for everything to be done. - wg.Wait() - close(errs) - <-signal - - // check the error results - if len(errs) != 0 { - return fmt.Errorf("encountered errors during migration: %v", errStrs) + if g.Wait() != nil { + return fmt.Errorf("encountered errors during migration: %q", errs) } - return nil } diff --git a/scripts/keymigrate/migrate_test.go b/scripts/keymigrate/migrate_test.go index 21e9592fbb..b2727a5df3 100644 --- a/scripts/keymigrate/migrate_test.go +++ b/scripts/keymigrate/migrate_test.go @@ -27,7 +27,7 @@ func getLegacyPrefixKeys(val int) map[string][]byte { "BlockPartTwo": []byte(fmt.Sprintf("P:%d:%d", val+2, val+val)), "BlockCommit": []byte(fmt.Sprintf("C:%d", val)), "SeenCommit": []byte(fmt.Sprintf("SC:%d", val)), - "BlockHeight": []byte(fmt.Sprintf("BH:%d", val)), + "BlockHeight": []byte(fmt.Sprintf("BH:%x", val)), "Validators": []byte(fmt.Sprintf("validatorsKey:%d", val)), "ConsensusParams": []byte(fmt.Sprintf("consensusParamsKey:%d", val)), "ABCIResponse": []byte(fmt.Sprintf("abciResponsesKey:%d", val)), @@ -107,19 +107,19 @@ func TestMigration(t *testing.T) { t.Run("Legacy", func(t *testing.T) { for kind, le := range legacyPrefixes { - require.True(t, keyIsLegacy(le), kind) + require.True(t, checkKeyType(le).isLegacy(), kind) } }) t.Run("New", func(t *testing.T) { for kind, ne := range newPrefixes { - require.False(t, keyIsLegacy(ne), kind) + require.False(t, checkKeyType(ne).isLegacy(), kind) } }) t.Run("Conversion", func(t *testing.T) { for kind, le := range legacyPrefixes { - nk, err := migarateKey(le) + nk, err := migrateKey(le) require.NoError(t, err, kind) - require.False(t, keyIsLegacy(nk), kind) + require.False(t, checkKeyType(nk).isLegacy(), kind) } }) t.Run("Hashes", func(t *testing.T) { @@ -129,8 +129,12 @@ func TestMigration(t *testing.T) { } }) t.Run("ContrivedLegacyKeyDetection", func(t *testing.T) { - require.True(t, keyIsLegacy([]byte("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"))) - require.False(t, keyIsLegacy([]byte("xxxxxxxxxxxxxxx/xxxxxxxxxxxxxxxx"))) + // length 32: should appear to be a hash + require.Equal(t, txHashKey, checkKeyType([]byte("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"))) + + // length ≠ 32: should not appear to be a hash + require.Equal(t, nonLegacyKey, checkKeyType([]byte("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx--"))) + require.Equal(t, nonLegacyKey, checkKeyType([]byte("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"))) }) }) }) @@ -159,7 +163,7 @@ func TestMigration(t *testing.T) { "UserKey3": []byte("foo/bar/baz/1.2/4"), } for kind, key := range table { - out, err := migarateKey(key) + out, err := migrateKey(key) require.Error(t, err, kind) require.Nil(t, out, kind) } @@ -177,7 +181,7 @@ func TestMigration(t *testing.T) { return nil, errors.New("hi") })) }) - t.Run("KeyDisapears", func(t *testing.T) { + t.Run("KeyDisappears", func(t *testing.T) { db := dbm.NewMemDB() key := keyID("hi") require.NoError(t, db.Set(key, []byte("world"))) @@ -204,7 +208,7 @@ func TestMigration(t *testing.T) { require.Equal(t, size, len(keys)) legacyKeys := 0 for _, k := range keys { - if keyIsLegacy(k) { + if checkKeyType(k).isLegacy() { legacyKeys++ } } @@ -212,19 +216,8 @@ func TestMigration(t *testing.T) { }) t.Run("KeyIdempotency", func(t *testing.T) { for _, key := range getNewPrefixKeys(t, 84) { - require.False(t, keyIsLegacy(key)) - } - }) - t.Run("ChannelConversion", func(t *testing.T) { - ch := makeKeyChan([]keyID{ - makeKey(t, "abc", int64(2), int64(42)), - makeKey(t, int64(42)), - }) - count := 0 - for range ch { - count++ + require.False(t, checkKeyType(key).isLegacy()) } - require.Equal(t, 2, count) }) t.Run("Migrate", func(t *testing.T) { _, db := getLegacyDatabase(t) diff --git a/scripts/linkpatch/linkpatch.go b/scripts/linkpatch/linkpatch.go new file mode 100644 index 0000000000..42054d4785 --- /dev/null +++ b/scripts/linkpatch/linkpatch.go @@ -0,0 +1,205 @@ +// Program linkpatch rewrites absolute URLs pointing to targets in GitHub in +// Markdown link tags to target a different branch. +// +// This is used to update documentation links for backport branches. +// See https://github.com/tendermint/tendermint/issues/7675 for context. +package main + +import ( + "bytes" + "flag" + "fmt" + "io/fs" + "log" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/creachadair/atomicfile" +) + +var ( + repoName = flag.String("repo", "tendermint/tendermint", "Repository name to match") + sourceBranch = flag.String("source", "master", "Source branch name (required)") + targetBranch = flag.String("target", "", "Target branch name (required)") + doRecur = flag.Bool("recur", false, "Recur into subdirectories") + + skipPath stringList + skipMatch regexpFlag + + // Match markdown links pointing to absolute URLs. + // This only works for "inline" links, not referenced links. + // The submetch selects the URL. + linkRE = regexp.MustCompile(`(?m)\[.*?\]\((https?://.*?)\)`) +) + +func init() { + flag.Var(&skipPath, "skip-path", "Skip these paths (comma-separated)") + flag.Var(&skipMatch, "skip-match", "Skip URLs matching this regexp (RE2)") + + flag.Usage = func() { + fmt.Fprintf(os.Stderr, `Usage: %[1]s [options] ... + +Rewrite absolute Markdown links targeting the specified GitHub repository +and source branch name to point to the target branch instead. Matching +files are updated in-place. + +Each path names either a directory to list, or a single file path to +rewrite. By default, only the top level of a directory is scanned; use -recur +to recur into subdirectories. + +Options: +`, filepath.Base(os.Args[0])) + flag.PrintDefaults() + } +} + +func main() { + flag.Parse() + switch { + case *repoName == "": + log.Fatal("You must specify a non-empty -repo name (org/repo)") + case *targetBranch == "": + log.Fatal("You must specify a non-empty -target branch") + case *sourceBranch == "": + log.Fatal("You must specify a non-empty -source branch") + case *sourceBranch == *targetBranch: + log.Fatalf("Source and target branch are the same (%q)", *sourceBranch) + case flag.NArg() == 0: + log.Fatal("You must specify at least one file/directory to rewrite") + } + + r, err := regexp.Compile(fmt.Sprintf(`^https?://github.com/%s/(?:blob|tree)/%s`, + *repoName, *sourceBranch)) + if err != nil { + log.Fatalf("Compiling regexp: %v", err) + } + for _, path := range flag.Args() { + if err := processPath(r, path); err != nil { + log.Fatalf("Processing %q failed: %v", path, err) + } + } +} + +func processPath(r *regexp.Regexp, path string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + if fi.Mode().IsDir() { + return processDir(r, path) + } else if fi.Mode().IsRegular() { + return processFile(r, path) + } + return nil // nothing to do with links, device files, sockets, etc. +} + +func processDir(r *regexp.Regexp, root string) error { + return filepath.Walk(root, func(path string, fi fs.FileInfo, err error) error { + if err != nil { + return err + } + if fi.IsDir() { + if skipPath.Contains(path) { + log.Printf("Skipping %q (per -skip-path)", path) + return filepath.SkipDir // explicitly skipped + } else if !*doRecur && path != root { + return filepath.SkipDir // skipped because we aren't recurring + } + return nil // nothing else to do for directories + } else if skipPath.Contains(path) { + log.Printf("Skipping %q (per -skip-path)", path) + return nil // explicitly skipped + } else if filepath.Ext(path) != ".md" { + return nil // nothing to do for non-Markdown files + } + + return processFile(r, path) + }) +} + +func processFile(r *regexp.Regexp, path string) error { + log.Printf("Processing file %q", path) + input, err := os.ReadFile(path) + if err != nil { + return err + } + + pos := 0 + var output bytes.Buffer + for _, m := range linkRE.FindAllSubmatchIndex(input, -1) { + href := string(input[m[2]:m[3]]) + u := r.FindStringIndex(href) + if u == nil || skipMatch.MatchString(href) { + if u != nil { + log.Printf("Skipped URL %q (by -skip-match)", href) + } + output.Write(input[pos:m[1]]) // copy the existing data as-is + pos = m[1] + continue + } + + // Copy everything before the URL as-is, then write the replacement. + output.Write(input[pos:m[2]]) // everything up to the URL + fmt.Fprintf(&output, `https://github.com/%s/blob/%s%s`, *repoName, *targetBranch, href[u[1]:]) + + // Write out the tail of the match, everything after the URL. + output.Write(input[m[3]:m[1]]) + pos = m[1] + } + output.Write(input[pos:]) // the rest of the file + + _, err = atomicfile.WriteAll(path, &output, 0644) + return err +} + +// stringList implements the flag.Value interface for a comma-separated list of strings. +type stringList []string + +func (lst *stringList) Set(s string) error { + if s == "" { + *lst = nil + } else { + *lst = strings.Split(s, ",") + } + return nil +} + +// Contains reports whether lst contains s. +func (lst stringList) Contains(s string) bool { + for _, elt := range lst { + if s == elt { + return true + } + } + return false +} + +func (lst stringList) String() string { return strings.Join([]string(lst), ",") } + +// regexpFlag implements the flag.Value interface for a regular expression. +type regexpFlag struct{ *regexp.Regexp } + +func (r regexpFlag) MatchString(s string) bool { + if r.Regexp == nil { + return false + } + return r.Regexp.MatchString(s) +} + +func (r *regexpFlag) Set(s string) error { + c, err := regexp.Compile(s) + if err != nil { + return err + } + r.Regexp = c + return nil +} + +func (r regexpFlag) String() string { + if r.Regexp == nil { + return "" + } + return r.Regexp.String() +} diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh deleted file mode 100755 index 51b1cc6d33..0000000000 --- a/scripts/protocgen.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash - -set -eo pipefail - -buf generate --path proto/tendermint - -mv ./proto/tendermint/abci/types.pb.go ./abci/types - -mv ./proto/tendermint/rpc/grpc/types.pb.go ./rpc/grpc diff --git a/scripts/scmigrate/migrate.go b/scripts/scmigrate/migrate.go new file mode 100644 index 0000000000..c8e1d94900 --- /dev/null +++ b/scripts/scmigrate/migrate.go @@ -0,0 +1,197 @@ +// Package scmigrate implements a migration for SeenCommit data +// between 0.34 and 0.35 +// +// The Migrate implementation is idempotent and finds all seen commit +// records and deletes all *except* the record corresponding to the +// highest height. +package scmigrate + +import ( + "bytes" + "context" + "errors" + "fmt" + "sort" + + "github.com/gogo/protobuf/proto" + "github.com/google/orderedcode" + dbm "github.com/tendermint/tm-db" + + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" +) + +type toMigrate struct { + key []byte + commit *types.Commit +} + +const prefixSeenCommit = int64(3) + +func makeKeyFromPrefix(ids ...int64) []byte { + vals := make([]interface{}, len(ids)) + for idx := range ids { + vals[idx] = ids[idx] + } + + key, err := orderedcode.Append(nil, vals...) + if err != nil { + panic(err) + } + return key +} + +func makeToMigrate(val []byte) (*types.Commit, error) { + if len(val) == 0 { + return nil, errors.New("empty value") + } + + var pbc = new(tmproto.Commit) + + if err := proto.Unmarshal(val, pbc); err != nil { + return nil, fmt.Errorf("error reading block seen commit: %w", err) + } + + commit, err := types.CommitFromProto(pbc) + if commit == nil { + // theoretically we should error for all errors, but + // there's no reason to keep junk data in the + // database, and it makes testing easier. + if err != nil { + return nil, fmt.Errorf("error from proto commit: %w", err) + } + return nil, fmt.Errorf("missing commit") + } + + return commit, nil +} + +func sortMigrations(scData []toMigrate) { + // put this in it's own function just to make it testable + sort.SliceStable(scData, func(i, j int) bool { + return scData[i].commit.Height > scData[j].commit.Height + }) +} + +func getAllSeenCommits(ctx context.Context, db dbm.DB) ([]toMigrate, error) { + scKeyPrefix := makeKeyFromPrefix(prefixSeenCommit) + iter, err := db.Iterator( + scKeyPrefix, + makeKeyFromPrefix(prefixSeenCommit+1), + ) + if err != nil { + return nil, err + } + + scData := []toMigrate{} + for ; iter.Valid(); iter.Next() { + if err := ctx.Err(); err != nil { + return nil, err + } + + k := iter.Key() + nk := make([]byte, len(k)) + copy(nk, k) + + if !bytes.HasPrefix(nk, scKeyPrefix) { + break + } + commit, err := makeToMigrate(iter.Value()) + if err != nil { + return nil, err + } + + scData = append(scData, toMigrate{ + key: nk, + commit: commit, + }) + } + if err := iter.Error(); err != nil { + return nil, err + } + if err := iter.Close(); err != nil { + return nil, err + } + return scData, nil +} + +func renameRecord(db dbm.DB, keep toMigrate) error { + wantKey := makeKeyFromPrefix(prefixSeenCommit) + if bytes.Equal(keep.key, wantKey) { + return nil // we already did this conversion + } + + // This record's key has already been converted to the "new" format, we just + // now need to trim off the tail. + val, err := db.Get(keep.key) + if err != nil { + return err + } + + batch := db.NewBatch() + if err := batch.Delete(keep.key); err != nil { + return err + } + if err := batch.Set(wantKey, val); err != nil { + return err + } + werr := batch.Write() + cerr := batch.Close() + if werr != nil { + return werr + } + return cerr +} + +func deleteRecords(db dbm.DB, scData []toMigrate) error { + // delete all the remaining stale values in a single batch + batch := db.NewBatch() + + for _, mg := range scData { + if err := batch.Delete(mg.key); err != nil { + return err + } + } + + if err := batch.WriteSync(); err != nil { + return err + } + + if err := batch.Close(); err != nil { + return err + } + return nil +} + +func Migrate(ctx context.Context, db dbm.DB) error { + scData, err := getAllSeenCommits(ctx, db) + if err != nil { + return fmt.Errorf("sourcing tasks to migrate: %w", err) + } else if len(scData) == 0 { + return nil // nothing to do + } + + // Sort commits in decreasing order of height. + sortMigrations(scData) + + // Keep and rename the newest seen commit, delete the rest. + // In TM < v0.35 we kept a last-seen commit for each height; in v0.35 we + // retain only the latest. + keep, remove := scData[0], scData[1:] + + if err := renameRecord(db, keep); err != nil { + return fmt.Errorf("renaming seen commit record: %w", err) + } + + if len(remove) == 0 { + return nil + } + + // Remove any older seen commits. Prior to v0.35, we kept these records for + // all heights, but v0.35 keeps only the latest. + if err := deleteRecords(db, remove); err != nil { + return fmt.Errorf("writing data: %w", err) + } + + return nil +} diff --git a/scripts/scmigrate/migrate_test.go b/scripts/scmigrate/migrate_test.go new file mode 100644 index 0000000000..900a15f857 --- /dev/null +++ b/scripts/scmigrate/migrate_test.go @@ -0,0 +1,174 @@ +package scmigrate + +import ( + "bytes" + "context" + "math/rand" + "testing" + + "github.com/gogo/protobuf/proto" + dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/types" +) + +func appendRandomMigrations(in []toMigrate, num int) []toMigrate { + if in == nil { + in = []toMigrate{} + } + + for i := 0; i < num; i++ { + height := rand.Int63() + if height <= 0 { + continue + } + in = append(in, toMigrate{commit: &types.Commit{Height: height}}) + } + return in +} + +func assertWellOrderedMigrations(t *testing.T, testData []toMigrate) { + t.Run("ValuesDescend", func(t *testing.T) { + for idx := range testData { + height := testData[idx].commit.Height + if idx == 0 { + continue + } + prev := testData[idx-1].commit.Height + if prev < height { + t.Fatal("height decreased in sort order") + } + } + }) + t.Run("EarliestIsZero", func(t *testing.T) { + earliestHeight := testData[len(testData)-1].commit.Height + if earliestHeight != 0 { + t.Fatalf("the earliest height is not 0: %d", earliestHeight) + } + }) +} + +func getLatestHeight(data []toMigrate) int64 { + var out int64 + + for _, d := range data { + if d.commit.Height >= out { + out = d.commit.Height + } + } + + return out +} + +func insertTestData(t *testing.T, db dbm.DB, data []toMigrate) { + t.Helper() + + batch := db.NewBatch() + + for idx, val := range data { + payload, err := proto.Marshal(val.commit.ToProto()) + if err != nil { + t.Fatal(err) + } + + if err := batch.Set(makeKeyFromPrefix(prefixSeenCommit, int64(idx)), payload); err != nil { + t.Fatal(err) + } + } + if err := batch.WriteSync(); err != nil { + t.Fatal(err) + } + if err := batch.Close(); err != nil { + t.Fatal(err) + } +} + +func TestMigrations(t *testing.T) { + t.Run("Sort", func(t *testing.T) { + t.Run("HandCraftedData", func(t *testing.T) { + testData := []toMigrate{ + {commit: &types.Commit{Height: 100}}, + {commit: &types.Commit{Height: 0}}, + {commit: &types.Commit{Height: 8}}, + {commit: &types.Commit{Height: 1}}, + } + + sortMigrations(testData) + assertWellOrderedMigrations(t, testData) + }) + t.Run("RandomGeneratedData", func(t *testing.T) { + testData := []toMigrate{{commit: &types.Commit{Height: 0}}} + + testData = appendRandomMigrations(testData, 10000) + + sortMigrations(testData) + assertWellOrderedMigrations(t, testData) + }) + }) + t.Run("InvalidMigrations", func(t *testing.T) { + if _, err := makeToMigrate(nil); err == nil { + t.Fatal("should error for nil migrations") + } + if _, err := makeToMigrate([]byte{}); err == nil { + t.Fatal("should error for empty migrations") + } + if _, err := makeToMigrate([]byte("invalid")); err == nil { + t.Fatal("should error for empty migrations") + } + }) + + t.Run("GetSeenCommits", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + db := dbm.NewMemDB() + data := appendRandomMigrations([]toMigrate{}, 100) + insertTestData(t, db, data) + commits, err := getAllSeenCommits(ctx, db) + if err != nil { + t.Fatal(err) + } + if len(commits) != len(data) { + t.Log("inputs", len(data)) + t.Log("commits", len(commits)) + t.Fatal("migrations not found in database") + } + }) + t.Run("Integration", func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + db := dbm.NewMemDB() + data := appendRandomMigrations([]toMigrate{}, 1000) + insertTestData(t, db, data) + + latestHeight := getLatestHeight(data) + for _, test := range []string{"Migration", "Idempotency"} { + // run the test twice to make sure that it's + // safe to rerun + t.Run(test, func(t *testing.T) { + if err := Migrate(ctx, db); err != nil { + t.Fatalf("Migration failed: %v", err) + } + + post, err := getAllSeenCommits(ctx, db) + if err != nil { + t.Fatalf("Fetching seen commits: %v", err) + } + + if len(post) != 1 { + t.Fatalf("Wrong number of commits: got %d, wanted 1", len(post)) + } + + wantKey := makeKeyFromPrefix(prefixSeenCommit) + if !bytes.Equal(post[0].key, wantKey) { + t.Errorf("Seen commit key: got %x, want %x", post[0].key, wantKey) + } + if got := post[0].commit.Height; got != latestHeight { + t.Fatalf("Wrong commit height after migration: got %d, wanted %d", got, latestHeight) + } + }) + } + }) + +} diff --git a/scripts/wal2json/main.go b/scripts/wal2json/main.go index 5a5a0abac3..7ee7561068 100644 --- a/scripts/wal2json/main.go +++ b/scripts/wal2json/main.go @@ -8,12 +8,12 @@ package main import ( + "encoding/json" "fmt" "io" "os" "github.com/tendermint/tendermint/internal/consensus" - tmjson "github.com/tendermint/tendermint/libs/json" ) func main() { @@ -24,7 +24,7 @@ func main() { f, err := os.Open(os.Args[1]) if err != nil { - panic(fmt.Errorf("failed to open WAL file: %v", err)) + panic(fmt.Errorf("failed to open WAL file: %w", err)) } defer f.Close() @@ -34,12 +34,12 @@ func main() { if err == io.EOF { break } else if err != nil { - panic(fmt.Errorf("failed to decode msg: %v", err)) + panic(fmt.Errorf("failed to decode msg: %w", err)) } - json, err := tmjson.Marshal(msg) + json, err := json.Marshal(msg) if err != nil { - panic(fmt.Errorf("failed to marshal msg: %v", err)) + panic(fmt.Errorf("failed to marshal msg: %w", err)) } _, err = os.Stdout.Write(json) diff --git a/spec/README.md b/spec/README.md new file mode 100644 index 0000000000..c60951f28d --- /dev/null +++ b/spec/README.md @@ -0,0 +1,81 @@ +--- +order: 1 +title: Overview +parent: + title: Spec + order: 7 +--- + +# Tendermint Specifications + +This directory hosts the canonical Markdown specifications of the Tendermint Protocol. + +It shall be used to describe protocol semantics, namely the BFT consensus engine, leader election, block propagation and light client verification. The specification includes encoding descriptions used in interprocess communication to comply with the protocol. It defines the interface between the application and Tendermint. The english specifications are often accompanies with a TLA+ specification. + +## Table of Contents + +- [Overview](#overview) +- [Application Blockchain Interface](./abci++/README.md) +- [Encoding and Digests](./core/encoding.md) +- [Core Data Structures](./core/data_structures.md) +- [State](./core/state.md) +- [Consensus Algorithm](./consensus/consensus.md) +- [Creating a proposal](./consensus/creating-proposal.md) +- [Time](./consensus/proposer-based-timestamp/README.md) +- [Light Client](./consensus/light-client/README.md) +- [The Base P2P Layer](./p2p/node.md) +- [Peer Exchange (PEX)](./p2p/messages/pex.md) +- [Remote Procedure Calls](./rpc/README.md) +- [Write-Ahead Log](./consensus/wal.md) +- [Ivy Proofs](./ivy-proofs/README.md) + +## Contibuting + +Contributions are welcome. + +Proposals at an early stage can first be drafted as Github issues. To progress, a proposal will often need to be written out and approved as a [Request For Comment (RFC)](../docs/rfc/README.md). + +The standard language for coding blocks is Golang. + +If you find discrepancies between the spec and the code that +do not have an associated issue or pull request on github, +please submit them to our [bug bounty](https://tendermint.com/security)! + +## Overview + +Tendermint provides Byzantine Fault Tolerant State Machine Replication using +hash-linked batches of transactions. Such transaction batches are called "blocks". +Hence, Tendermint defines a "blockchain". + +Each block in Tendermint has a unique index - its Height. +Height's in the blockchain are monotonic. +Each block is committed by a known set of weighted Validators. +Membership and weighting within this validator set may change over time. +Tendermint guarantees the safety and liveness of the blockchain +so long as less than 1/3 of the total weight of the Validator set +is malicious or faulty. + +A commit in Tendermint is a set of signed messages from more than 2/3 of +the total weight of the current Validator set. Validators take turns proposing +blocks and voting on them. Once enough votes are received, the block is considered +committed. These votes are included in the _next_ block as proof that the previous block +was committed - they cannot be included in the current block, as that block has already been +created. + +Once a block is committed, it can be executed against an application. +The application returns results for each of the transactions in the block. +The application can also return changes to be made to the validator set, +as well as a cryptographic digest of its latest state. + +Tendermint is designed to enable efficient verification and authentication +of the latest state of the blockchain. To achieve this, it embeds +cryptographic commitments to certain information in the block "header". +This information includes the contents of the block (eg. the transactions), +the validator set committing the block, as well as the various results returned by the application. +Note, however, that block execution only occurs _after_ a block is committed. +Thus, application results can only be included in the _next_ block. + +Also note that information like the transaction results and the validator set are never +directly included in the block - only their cryptographic digests (Merkle roots) are. +Hence, verification of a block requires a separate data structure to store this information. +We call this the `State`. Block verification also requires access to the previous block. diff --git a/spec/abci++/README.md b/spec/abci++/README.md new file mode 100644 index 0000000000..38feba9d7e --- /dev/null +++ b/spec/abci++/README.md @@ -0,0 +1,43 @@ +--- +order: 1 +parent: + title: ABCI++ + order: 3 +--- + +# ABCI++ + +## Introduction + +ABCI++ is a major evolution of ABCI (**A**pplication **B**lock**c**hain **I**nterface). +Like its predecessor, ABCI++ is the interface between Tendermint (a state-machine +replication engine) and the actual state machine being replicated (i.e., the Application). +The API consists of a set of _methods_, each with a corresponding `Request` and `Response` +message type. + +The methods are always initiated by Tendermint. The Application implements its logic +for handling all ABCI++ methods. +Thus, Tendermint always sends the `Request*` messages and receives the `Response*` messages +in return. + +All ABCI++ messages and methods are defined in [protocol buffers](../../proto/tendermint/abci/types.proto). +This allows Tendermint to run with applications written in many programming languages. + +This specification is split as follows: + +- [Overview and basic concepts](./abci++_basic_concepts_002_draft.md) - interface's overview and concepts needed to understand other parts of this specification. +- [Methods](./abci++_methods_002_draft.md) - complete details on all ABCI++ methods + and message types. +- [Requirements for the Application](./abci++_app_requirements_002_draft.md) - formal requirements + on the Application's logic to ensure liveness of Tendermint. These requirements define what + Tendermint expects from the Application. +- [Tendermint's expected behavior](./abci++_tmint_expected_behavior_002_draft.md) - specification of + how the different ABCI++ methods may be called by Tendermint. This explains what the Application + is to expect from Tendermint. + +>**TODO** Re-read these and remove redundant info + +- [Applications](../abci/apps.md) - how to manage ABCI application state and other + details about building ABCI applications +- [Client and Server](../abci/client-server.md) - for those looking to implement their + own ABCI application servers diff --git a/spec/abci++/abci++_app_requirements_002_draft.md b/spec/abci++/abci++_app_requirements_002_draft.md new file mode 100644 index 0000000000..620b1cd5e0 --- /dev/null +++ b/spec/abci++/abci++_app_requirements_002_draft.md @@ -0,0 +1,165 @@ +--- +order: 3 +title: Application Requirements +--- + +# Application Requirements + +This section specifies what Tendermint expects from the Application. It is structured as a set +of formal requirement that can be used for testing and verification of the Application's logic. + +Let $p$ and $q$ be two different correct proposers in rounds $r_p$ and $r_q$ respectively, in height $h$. +Let $s_{p,h-1}$ be $p$'s Application's state committed for height $h-1$. +Let $v_p$ (resp. $v_q$) be the block that $p$'s (resp. $q$'s) Tendermint passes on to the Application +via `RequestPrepareProposal` as proposer of round $r_p$ (resp $r_q$), height $h$, also known as the +raw proposal. +Let $v'_p$ (resp. $v'_q$) the possibly modified block $p$'s (resp. $q$'s) Application returns via +`ResponsePrepareProposal` to Tendermint, also known as the prepared proposal. + +Process $p$'s prepared proposal can differ in two different rounds where $p$ is the proposer. + +* Requirement 1 [`PrepareProposal`, header-changes] When the blockchain is in same-block execution mode, + $p$'s Application provides values for the following parameters in `ResponsePrepareProposal`: + _AppHash_, _TxResults_, _ConsensusParams_, _ValidatorUpdates_. Provided values for + _ConsensusParams_ and _ValidatorUpdates_ MAY be empty to denote that the Application + wishes to keep the current values. + +Parameters _AppHash_, _TxResults_, _ConsensusParams_, and _ValidatorUpdates_ are used by Tendermint to +compute various hashes in the block header that will finally be part of the proposal. + +* Requirement 2 [`PrepareProposal`, no-header-changes] When the blockchain is in next-block execution + mode, $p$'s Application does not provide values for the following parameters in `ResponsePrepareProposal`: + _AppHash_, _TxResults_, _ConsensusParams_, _ValidatorUpdates_. + +In practical terms, Requirements 1 and 2 imply that Tendermint will (a) panic if the Application is in +same-block execution mode and _does_ _not_ provide values for +_AppHash_, _TxResults_, _ConsensusParams_, and _ValidatorUpdates_, or +(b) log an error if the Application is in next-block execution mode and _does_ provide values for +_AppHash_, _TxResults_, _ConsensusParams_, or _ValidatorUpdates_ (the values provided will be ignored). + +* Requirement 3 [`PrepareProposal`, timeliness] If $p$'s Application fully executes prepared blocks in + `PrepareProposal` and the network is in a synchronous period while processes $p$ and $q$ are in $r_p$, then + the value of *TimeoutPropose* at $q$ must be such that $q$'s propose timer does not time out + (which would result in $q$ prevoting *nil* in $r_p$). + +Full execution of blocks at `PrepareProposal` time stands on Tendermint's critical path. Thus, +Requirement 3 ensures the Application will set a value for _TimeoutPropose_ such that the time it takes +to fully execute blocks in `PrepareProposal` does not interfere with Tendermint's propose timer. + +* Requirement 4 [`PrepareProposal`, tx-size] When $p$'s Application calls `ResponsePrepareProposal`, the + total size in bytes of the transactions returned does not exceed `RequestPrepareProposal.max_tx_bytes`. + +Busy blockchains might seek to maximize the amount of transactions included in each block. Under those conditions, +Tendermint might choose to increase the transactions passed to the Application via `RequestPrepareProposal.txs` +beyond the `RequestPrepareProposal.max_tx_bytes` limit. The idea is that, if the Application drops some of +those transactions, it can still return a transaction list whose byte size is as close to +`RequestPrepareProposal.max_tx_bytes` as possible. Thus, Requirement 4 ensures that the size in bytes of the +transaction list returned by the application will never cause the resulting block to go beyond its byte limit. + +* Requirement 5 [`PrepareProposal`, `ProcessProposal`, coherence]: For any two correct processes $p$ and $q$, + if $q$'s Tendermint calls `RequestProcessProposal` on $v'_p$, + $q$'s Application returns Accept in `ResponseProcessProposal`. + +Requirement 5 makes sure that blocks proposed by correct processes _always_ pass the correct receiving process's +`ProcessProposal` check. +On the other hand, if there is a deterministic bug in `PrepareProposal` or `ProcessProposal` (or in both), +strictly speaking, this makes all processes that hit the bug byzantine. This is a problem in practice, +as very often validators are running the Application from the same codebase, so potentially _all_ would +likely hit the bug at the same time. This would result in most (or all) processes prevoting `nil`, with the +serious consequences on Tendermint's liveness that this entails. Due to its criticality, Requirement 5 is a +target for extensive testing and automated verification. + +* Requirement 6 [`ProcessProposal`, determinism-1]: `ProcessProposal` is a (deterministic) function of the current + state and the block that is about to be applied. In other words, for any correct process $p$, and any arbitrary block $v'$, + if $p$'s Tendermint calls `RequestProcessProposal` on $v'$ at height $h$, + then $p$'s Application's acceptance or rejection **exclusively** depends on $v'$ and $s_{p,h-1}$. + +* Requirement 7 [`ProcessProposal`, determinism-2]: For any two correct processes $p$ and $q$, and any arbitrary block $v'$, + if $p$'s (resp. $q$'s) Tendermint calls `RequestProcessProposal` on $v'$ at height $h$, + then $p$'s Application accepts $v'$ if and only if $q$'s Application accepts $v'$. + Note that this requirement follows from Requirement 6 and the Agreement property of consensus. + +Requirements 6 and 7 ensure that all correct processes will react in the same way to a proposed block, even +if the proposer is Byzantine. However, `ProcessProposal` may contain a bug that renders the +acceptance or rejection of the block non-deterministic, and therefore prevents processes hitting +the bug from fulfilling Requirements 6 or 7 (effectively making those processes Byzantine). +In such a scenario, Tendermint's liveness cannot be guaranteed. +Again, this is a problem in practice if most validators are running the same software, as they are likely +to hit the bug at the same point. There is currently no clear solution to help with this situation, so +the Application designers/implementors must proceed very carefully with the logic/implementation +of `ProcessProposal`. As a general rule `ProcessProposal` _should_ always accept the block. + +According to the Tendermint algorithm, a correct process can broadcast at most one precommit message in round $r$, height $h$. +Since, as stated in the [Description](#description) section, `ResponseExtendVote` is only called when Tendermint +is about to broadcast a non-`nil` precommit message, a correct process can only produce one vote extension in round $r$, height $h$. +Let $e^r_p$ be the vote extension that the Application of a correct process $p$ returns via `ResponseExtendVote` in round $r$, height $h$. +Let $w^r_p$ be the proposed block that $p$'s Tendermint passes to the Application via `RequestExtendVote` in round $r$, height $h$. + +* Requirement 8 [`ExtendVote`, `VerifyVoteExtension`, coherence]: For any two correct processes $p$ and $q$, if $q$ receives $e^r_p$ + from $p$ in height $h$, $q$'s Application returns Accept in `ResponseVerifyVoteExtension`. + +Requirement 8 constrains the creation and handling of vote extensions in a similar way as Requirement 5 +contrains the creation and handling of proposed blocks. +Requirement 8 ensures that extensions created by correct processes _always_ pass the `VerifyVoteExtension` +checks performed by correct processes receiving those extensions. +However, if there is a (deterministic) bug in `ExtendVote` or `VerifyVoteExtension` (or in both), +we will face the same liveness issues as described for Requirement 5, as Precommit messages with invalid vote +extensions will be discarded. + +* Requirement 9 [`VerifyVoteExtension`, determinism-1]: `VerifyVoteExtension` is a (deterministic) function of + the current state, the vote extension received, and the prepared proposal that the extension refers to. + In other words, for any correct process $p$, and any arbitrary vote extension $e$, and any arbitrary + block $w$, if $p$'s (resp. $q$'s) Tendermint calls `RequestVerifyVoteExtension` on $e$ and $w$ at height $h$, + then $p$'s Application's acceptance or rejection **exclusively** depends on $e$, $w$ and $s_{p,h-1}$. + +* Requirement 10 [`VerifyVoteExtension`, determinism-2]: For any two correct processes $p$ and $q$, + and any arbitrary vote extension $e$, and any arbitrary block $w$, + if $p$'s (resp. $q$'s) Tendermint calls `RequestVerifyVoteExtension` on $e$ and $w$ at height $h$, + then $p$'s Application accepts $e$ if and only if $q$'s Application accepts $e$. + Note that this requirement follows from Requirement 9 and the Agreement property of consensus. + +Requirements 9 and 10 ensure that the validation of vote extensions will be deterministic at all +correct processes. +Requirements 9 and 10 protect against arbitrary vote extension data from Byzantine processes +similarly to Requirements 6 and 7 and proposed blocks. +Requirements 9 and 10 can be violated by a bug inducing non-determinism in +`VerifyVoteExtension`. In this case liveness can be compromised. +Extra care should be put in the implementation of `ExtendVote` and `VerifyVoteExtension` and, +as a general rule, `VerifyVoteExtension` _should_ always accept the vote extension. + +* Requirement 11 [_all_, no-side-effects]: $p$'s calls to `RequestPrepareProposal`, + `RequestProcessProposal`, `RequestExtendVote`, and `RequestVerifyVoteExtension` at height $h$ do + not modify $s_{p,h-1}$. + +* Requirement 12 [`ExtendVote`, `FinalizeBlock`, non-dependency]: for any correct process $p$, +and any vote extension $e$ that $p$ received at height $h$, the computation of +$s_{p,h}$ does not depend on $e$. + +The call to correct process $p$'s `RequestFinalizeBlock` at height $h$, with block $v_{p,h}$ +passed as parameter, creates state $s_{p,h}$. +Additionally, + +* in next-block execution mode, $p$'s `FinalizeBlock` creates a set of transaction results $T_{p,h}$, +* in same-block execution mode, $p$'s `PrepareProposal` creates a set of transaction results $T_{p,h}$ + if $p$ was the proposer of $v_{p,h}$, otherwise `FinalizeBlock` creates $T_{p,h}$. + +* Requirement 13 [`FinalizeBlock`, determinism-1]: For any correct process $p$, + $s_{p,h}$ exclusively depends on $s_{p,h-1}$ and $v_{p,h}$. + +* Requirement 14 [`FinalizeBlock`, determinism-2]: For any correct process $p$, + the contents of $T_{p,h}$ exclusively depend on $s_{p,h-1}$ and $v_{p,h}$. + +Note that Requirements 13 and 14, combined with Agreement property of consensus ensure +the Application state evolves consistently at all correct processes. + +Finally, notice that neither `PrepareProposal` nor `ExtendVote` have determinism-related +requirements associated. +Indeed, `PrepareProposal` is not required to be deterministic: + +* $v'_p$ may depend on $v_p$ and $s_{p,h-1}$, but may also depend on other values or operations. +* $v_p = v_q \nRightarrow v'_p = v'_q$. + +Likewise, `ExtendVote` can also be non-deterministic: + +* $e^r_p$ may depend on $w^r_p$ and $s_{p,h-1}$, but may also depend on other values or operations. +* $w^r_p = w^r_q \nRightarrow e^r_p = e^r_q$ diff --git a/spec/abci++/abci++_basic_concepts_002_draft.md b/spec/abci++/abci++_basic_concepts_002_draft.md new file mode 100644 index 0000000000..a1ad038a51 --- /dev/null +++ b/spec/abci++/abci++_basic_concepts_002_draft.md @@ -0,0 +1,404 @@ +--- +order: 1 +title: Overview and basic concepts +--- + +## Outline +- [ABCI++ vs. ABCI](#abci-vs-abci) +- [Methods overview](#methods-overview) + - [Consensus methods](#consensus-methods) + - [Mempool methods](#mempool-methods) + - [Info methods](#info-methods) + - [State-sync methods](#state-sync-methods) +- [Next-block execution vs. same-block execution](#next-block-execution-vs-same-block-execution) + - [Tendermint timeouts](#tendermint-timeouts-in-same-block-execution) +- [Determinism](#determinism) +- [Errors](#errors) +- [Events](#events) +- [Evidence](#evidence) + +# Overview and basic concepts + +## ABCI++ vs. ABCI +[↑ Back to Outline](#outline) + +With ABCI, the application can only act at one phase in consensus, immediately after a block has been finalized. This restriction on the application prevents numerous features for the application, including many scalability improvements that are now better understood than when ABCI was first written. For example, many of the scalability proposals can be boiled down to "Make the miner / block proposers / validators do work, so the network does not have to". This includes optimizations such as tx-level signature aggregation, state transition proofs, etc. Furthermore, many new security properties cannot be achieved in the current paradigm, as the application cannot enforce validators to do more than just finalize txs. This includes features such as threshold cryptography, and guaranteed IBC connection attempts. + +ABCI++ overcomes these limitations by allowing the application to intervene at three key places of the block execution. The new interface allows block proposers to perform application-dependent work in a block through the `PrepareProposal` method; validators to perform application-dependent work in a proposed block through the `ProcessProposal` method; and applications to require their validators do more than just validate blocks, e.g., validator guaranteed IBC connection attempts, through the `ExtendVote` and `VerifyVoteExtension` methods. Furthermore, ABCI++ renames {`BeginBlock`, [`DeliverTx`], `EndBlock`} to `FinalizeBlock`, as a simplified way to deliver a decided block to the Application. + +## Methods overview +[↑ Back to Outline](#outline) + +Methods can be classified into four categories: consensus, mempool, info, and state-sync. + +### Consensus/block execution methods + +The first time a new blockchain is started, Tendermint calls +`InitChain`. From then on, method `FinalizeBlock` is executed at the end of each +block, resulting in an updated Application state. +During consensus execution of a block height, before method `FinalizeBlock` is +called, methods `PrepareProposal`, `ProcessProposal`, `ExtendVote`, and +`VerifyVoteExtension` may be called several times. +See [Tendermint's expected behavior](abci++_tmint_expected_behavior_002_draft.md) +for details on the possible call sequences of these methods. + +* [**InitChain:**](./abci++_methods_002_draft.md#initchain) This method initializes the blockchain. Tendermint calls it once upon genesis. + +* [**PrepareProposal:**](./abci++_methods_002_draft.md#prepareproposal) It allows the block proposer to perform application-dependent work in a block before using it as its proposal. This enables, for instance, batch optimizations to a block, which has been empirically demonstrated to be a key component for scaling. Method `PrepareProposal` is called every time Tendermint is about to send +a proposal message, but no previous proposal has been locked at Tendermint level. +Tendermint gathers outstanding transactions from the mempool, generates a block header, and uses +them to create a block to propose. Then, it calls `RequestPrepareProposal` +with the newly created proposal, called _raw proposal_. The Application can +make changes to the raw proposal, such as modifying transactions, and returns +the (potentially) modified proposal, called _prepared proposal_ in the +`Response*` call. The logic modifying the raw proposal can be non-deterministic. + +* [**ProcessProposal:**](./abci++_methods_002_draft.md#processproposal) It allows a validator to perform application-dependent work in a proposed block. This enables features such as allowing validators to reject a block according to whether the state machine deems it valid, and changing the block execution pipeline. Tendermint calls it when it receives a proposal and it is not locked on a block. The Application cannot +modify the proposal at this point but can reject it if it realizes it is invalid. +If that is the case, Tendermint will prevote `nil` on the proposal, which has +strong liveness implications for Tendermint. As a general rule, the Application +SHOULD accept a prepared proposal passed via `ProcessProposal`, even if a part of +the proposal is invalid (e.g., an invalid transaction); the Application can +ignore the invalid part of the prepared proposal at block execution time. + +* [**ExtendVote:**](./abci++_methods_002_draft.md#extendvote) It allows applications to force their validators to do more than just validate within consensus. `ExtendVote` allows applications to include non-deterministic data, opaque to Tendermint, to precommit messages (the final round of voting). +The data, called _vote extension_, will also be made available to the +application in the next height, along with the vote it is extending, in the rounds +where the local process is the proposer. +If the Application does not have vote extension information to provide, it returns a 0-length byte array as its vote extension. +Tendermint calls `ExtendVote` when is about to send a non-`nil` precommit message. + +* [**VerifyVoteExtension:**](./abci++_methods_002_draft.md#verifyvoteextension) It allows validators to validate the vote extension data attached to a precommit message. If the validation fails, the precommit message will be deemed invalid and ignored +by Tendermint. This has a negative impact on Tendermint's liveness, i.e., if vote extensions repeatedly cannot be verified by correct validators, Tendermint may not be able to finalize a block even if sufficiently many (+2/3) of the validators send precommit votes for that block. Thus, `VerifyVoteExtension` should be used with special care. +As a general rule, an Application that detects an invalid vote extension SHOULD +accept it in `ResponseVerifyVoteExtension` and ignore it in its own logic. Tendermint calls it when +a process receives a precommit message with a (possibly empty) vote extension. + +* [**FinalizeBlock:**](./abci++_methods_002_draft.md#finalizeblock) It delivers a decided block to the Application. The Application must execute the transactions in the block in order and update its state accordingly. Cryptographic commitments to the block and transaction results, via the corresponding +parameters in `ResponseFinalizeBlock`, are included in the header of the next block. Tendermint calls it when a new block is decided. + +### Mempool methods + +* [**CheckTx:**](./abci++_methods_002_draft.md#checktx) This method allows the Application to validate transactions against its current state, e.g., checking signatures and account balances. If a transaction passes the validation, then tendermint adds it to its local mempool, discarding it otherwise. Tendermint calls it when it receives a new transaction either coming from an external user or another node. Furthermore, Tendermint can be configured to re-call `CheckTx` on any decided transaction (after `FinalizeBlock`). + +### Info methods + +* [**Info:**](./abci++_methods_002_draft.md#info) Used to sync Tendermint with the Application during a handshake that happens on startup. + +* [**Query:**](./abci++_methods_002_draft.md#query) Clients can use this method to query the Application for information about the application state. + +### State-sync methods + +State sync allows new nodes to rapidly bootstrap by discovering, fetching, and applying +state machine snapshots instead of replaying historical blocks. For more details, see the +[state sync section](../p2p/messages/state-sync.md). + +New nodes will discover and request snapshots from other nodes in the P2P network. +A Tendermint node that receives a request for snapshots from a peer will call +`ListSnapshots` on its Application. The Application returns the list of locally avaiable snapshots. +Note that the list does not contain the actual snapshot but metadata about it: height at which the snapshot was taken, application-specific verification data and more (see [snapshot data type](./abci++_methods_002_draft.md#snapshot) for more details). After receiving a list of available snapshots from a peer, the new node can offer any of the snapshots in the list to its local Application via the `OfferSnapshot` method. The Application can check at this point the validity of the snapshot metadata. + +Snapshots may be quite large and are thus broken into smaller "chunks" that can be +assembled into the whole snapshot. Once the Application accepts a snapshot and +begins restoring it, Tendermint will fetch snapshot "chunks" from existing nodes. +The node providing "chunks" will fetch them from its local Application using +the `LoadSnapshotChunk` method. + +As the new node receives "chunks" it will apply them sequentially to the local +application with `ApplySnapshotChunk`. When all chunks have been applied, the +Application's `AppHash` is retrieved via an `Info` query. +To ensure that the sync proceeded correctly, Tendermint compares the local Application's `AppHash` to the `AppHash` stored on the blockchain (verified via +[light client verification](../light-client/verification/README.md)). + +In summary: + +* [**ListSnapshots:**](./abci++_methods_002_draft.md#listsnapshots) Used by nodes to discover available snapshots on peers. + +* [**LoadSnapshotChunk:**](./abci++_methods_002_draft.md#loadsnapshotchunk) Used by Tendermint to retrieve snapshot chunks from the application to send to peers. + +* [**OfferSnapshot:**](./abci++_methods_002_draft.md#offersnapshot) When a node receives a snapshot from a peer, Tendermint uses this method to offer the snapshot to the Application. + +* [**ApplySnapshotChunk:**](./abci++_methods_002_draft.md#applysnapshotchunk) Used by Tendermint to hand snapshot chunks to the Application. + +### Other methods + +Additionally, there is a [**Flush**](./abci++_methods_002_draft.md#flush) method that is called on every connection, +and an [**Echo**](./abci++_methods_002_draft.md#echo) method that is just for debugging. + +More details on managing state across connections can be found in the section on +[ABCI Applications](../abci/apps.md). + +## Next-block execution vs. same-block execution +[↑ Back to Outline](#outline) + +In the original ABCI protocol, the only moment when the Application had access to a +block was after it was decided. This led to a block execution model, called _next-block +execution_, where some fields hashed in a block header refer to the execution of the +previous block, namely: + +* the Merkle root of the Application's state +* the transaction results +* the consensus parameter updates +* the validator updates + +With ABCI++, an Application may decide to keep using the next-block execution model, by doing all its processing in `FinalizeBlock`; +however the new methods introduced, `PrepareProposal` and `ProcessProposal` allow +for a new execution model, called _same-block execution_. An Application implementing +this execution model, upon receiving a raw proposal via `RequestPrepareProposal` +and potentially modifying its transaction list, +fully executes the resulting prepared proposal as though it was the decided block. +The results of the block execution are used as follows: + +* The block execution may generate a set of events. The Application should store these events and return them back to Tendermint during the `FinalizeBlock` call if the block is finally decided. +* The Merkle root resulting from executing the prepared proposal is provided in + `ResponsePrepareProposal` and thus refers to the **current block**. Tendermint + will use it in the prepared proposal's header. +* likewise, the transaction results from executing the prepared proposal are + provided in `ResponsePrepareProposal` and refer to the transactions in the + **current block**. Tendermint will use them to calculate the results hash + in the prepared proposal's header. +* The consensus parameter updates and validator updates are also provided in + `ResponsePrepareProposal` and reflect the result of the prepared proposal's + execution. They come into force in height H+1 (as opposed to the H+2 rule + in next-block execution model). + +If the Application decides to keep the next-block execution model, it will not +provide any data in `ResponsePrepareProposal`, other than an optionally modified +transaction list. + +In the long term, the execution model will be set in a new boolean parameter +*same_block* in `ConsensusParams`. +It **must not** be changed once the blockchain has started unless the Application +developers _really_ know what they are doing. +However, modifying `ConsensusParams` structure cannot be done lightly if we are to +preserve blockchain compatibility. Therefore we need an interim solution until +soft upgrades are specified and implemented in Tendermint. This somewhat _unsafe_ +solution consists in Tendermint assuming same-block execution if the Application +fills the above mentioned fields in `ResponsePrepareProposal`. + +### Tendermint timeouts in same-block execution + +The new same-block execution mode requires the Application to fully execute the +prepared block at `PrepareProposal` time. This execution is synchronous, so +Tendermint cannot make progress until the Application returns from `PrepareProposal`. +This stands on Tendermint's critical path: if the Application takes a long time +executing the block, the default value of _TimeoutPropose_ might not be sufficient +to accommodate the long block execution time and non-proposer processes might time +out and prevote `nil`, thus starting a further round unnecessarily. + +The Application is the best suited to provide a value for _TimeoutPropose_ so +that the block execution time upon `PrepareProposal` fits well in the propose +timeout interval. + +Currently, the Application can override the value of _TimeoutPropose_ via the +`config.toml` file. In the future, `ConsensusParams` will have an extra field +with the current _TimeoutPropose_ value so that the Application can adapt it at every height. + +## Determinism +[↑ Back to Outline](#outline) + +ABCI++ applications must implement deterministic finite-state machines to be +securely replicated by the Tendermint consensus engine. This means block execution +over the Consensus Connection must be strictly deterministic: given the same +ordered set of transactions, all nodes will compute identical responses, for all +successive `FinalizeBlock` calls. This is critical because the +responses are included in the header of the next block, either via a Merkle root +or directly, so all nodes must agree on exactly what they are. + +For this reason, it is recommended that application state is not exposed to any +external user or process except via the ABCI connections to a consensus engine +like Tendermint Core. The Application must only change its state based on input +from block execution (`FinalizeBlock` calls), and not through +any other kind of request. This is the only way to ensure all nodes see the same +transactions and compute the same results. + +Some Applications may choose to execute the blocks that are about to be proposed +(via `PrepareProposal`), or those that the Application is asked to validate +(via `ProcessProposal`). However, the state changes caused by processing those +proposed blocks must never replace the previous state until `FinalizeBlock` confirms +the block decided. + +Additionally, vote extensions or the validation thereof (via `ExtendVote` or +`VerifyVoteExtension`) must _never_ have side effects on the current state. +They can only be used when their data is provided in a `RequestPrepareProposal` call. + +If there is some non-determinism in the state machine, consensus will eventually +fail as nodes disagree over the correct values for the block header. The +non-determinism must be fixed and the nodes restarted. + +Sources of non-determinism in applications may include: + +* Hardware failures + * Cosmic rays, overheating, etc. +* Node-dependent state + * Random numbers + * Time +* Underspecification + * Library version changes + * Race conditions + * Floating point numbers + * JSON or protobuf serialization + * Iterating through hash-tables/maps/dictionaries +* External Sources + * Filesystem + * Network calls (eg. some external REST API service) + +See [#56](https://github.com/tendermint/abci/issues/56) for original discussion. + +Note that some methods (`Query, CheckTx, FinalizeBlock`) return +explicitly non-deterministic data in the form of `Info` and `Log` fields. The `Log` is +intended for the literal output from the Application's logger, while the +`Info` is any additional info that should be returned. These are the only fields +that are not included in block header computations, so we don't need agreement +on them. All other fields in the `Response*` must be strictly deterministic. + +## Errors +[↑ Back to Outline](#outline) + +The `Query`, and `CheckTx` methods include a `Code` field in their `Response*`. +The `Code` field is also included in type `TxResult`, used by +method `FinalizeBlock`'s `Response*`. +Field `Code` is meant to contain an application-specific response code. +A response code of `0` indicates no error. Any other response code +indicates to Tendermint that an error occurred. + +These methods also return a `Codespace` string to Tendermint. This field is +used to disambiguate `Code` values returned by different domains of the +Application. The `Codespace` is a namespace for the `Code`. + +Methods `Echo`, `Info`, and `InitChain` do not return errors. +An error in any of these methods represents a critical issue that Tendermint +has no reasonable way to handle. If there is an error in one +of these methods, the Application must crash to ensure that the error is safely +handled by an operator. + +Method `FinalizeBlock` is a special case. It contains a number of +`Code` and `Codespace` fields as part of type `TxResult`. Each of +these codes reports errors related to the transaction it is attached to. +However, `FinalizeBlock` does not return errors at the top level, so the +same considerations on critical issues made for `Echo`, `Info`, and +`InitChain` also apply here. + +The handling of non-zero response codes by Tendermint is described below + +### `CheckTx` + +When Tendermint receives a `ResponseCheckTx` with a non-zero `Code`, the associated +transaction will not be added to Tendermint's mempool or it will be removed if +it is already included. + +### `TxResult` (as part of `FinalizeBlock`) + +The `TxResult` type delivers transactions from Tendermint to the Application. +When Tendermint receives a `ResponseFinalizeBlock` containing a `TxResult` +with a non-zero `Code`, the response code is logged. +The transaction was already included in a block, so the `Code` does not influence +Tendermint consensus. + +### `Query` + +When Tendermint receives a `ResponseQuery` with a non-zero `Code`, this code is +returned directly to the client that initiated the query. + +## Events +[↑ Back to Outline](#outline) + +Method `CheckTx` includes an `Events` field in its `Response*`. +Method `FinalizeBlock` includes an `Events` field at the top level in its +`Response*`, and one `events` field per transaction included in the block. +Applications may respond to these ABCI++ methods with a set of events. +Events allow applications to associate metadata about ABCI++ method execution with the +transactions and blocks this metadata relates to. +Events returned via these ABCI++ methods do not impact Tendermint consensus in any way +and instead exist to power subscriptions and queries of Tendermint state. + +An `Event` contains a `type` and a list of `EventAttributes`, which are key-value +string pairs denoting metadata about what happened during the method's (or transaction's) +execution. `Event` values can be used to index transactions and blocks according to what +happened during their execution. + +Each event has a `type` which is meant to categorize the event for a particular +`Response*` or `Tx`. A `Response*` or `Tx` may contain multiple events with duplicate +`type` values, where each distinct entry is meant to categorize attributes for a +particular event. Every key and value in an event's attributes must be UTF-8 +encoded strings along with the event type itself. + +```protobuf +message Event { + string type = 1; + repeated EventAttribute attributes = 2; +} +``` + +The attributes of an `Event` consist of a `key`, a `value`, and an `index` flag. The +index flag notifies the Tendermint indexer to index the attribute. The value of +the `index` flag is non-deterministic and may vary across different nodes in the network. + +```protobuf +message EventAttribute { + bytes key = 1; + bytes value = 2; + bool index = 3; // nondeterministic +} +``` + +Example: + +```go + abci.ResponseCheckTx{ + // ... + Events: []abci.Event{ + { + Type: "validator.provisions", + Attributes: []abci.EventAttribute{ + abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("balance"), Value: []byte("..."), Index: true}, + }, + }, + { + Type: "validator.provisions", + Attributes: []abci.EventAttribute{ + abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: false}, + abci.EventAttribute{Key: []byte("balance"), Value: []byte("..."), Index: false}, + }, + }, + { + Type: "validator.slashed", + Attributes: []abci.EventAttribute{ + abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: false}, + abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("reason"), Value: []byte("..."), Index: true}, + }, + }, + // ... + }, +} +``` + +## Evidence +[↑ Back to Outline](#outline) + +Tendermint's security model relies on the use of "evidence". Evidence is proof of +malicious behavior by a network participant. It is the responsibility of Tendermint +to detect such malicious behavior. When malicious behavior is detected, Tendermint +will gossip evidence of the behavior to other nodes and commit the evidence to +the chain once it is verified by all validators. This evidence will then be +passed on to the Application through ABCI++. It is the responsibility of the +Application to handle the evidence and exercise punishment. + +EvidenceType has the following protobuf format: + +```protobuf +enum EvidenceType { + UNKNOWN = 0; + DUPLICATE_VOTE = 1; + LIGHT_CLIENT_ATTACK = 2; +} +``` + +There are two forms of evidence: Duplicate Vote and Light Client Attack. More +information can be found in either [data structures](../core/data_structures.md) +or [accountability](../light-client/accountability/) + diff --git a/spec/abci++/abci++_methods_002_draft.md b/spec/abci++/abci++_methods_002_draft.md new file mode 100644 index 0000000000..d1782bbdc1 --- /dev/null +++ b/spec/abci++/abci++_methods_002_draft.md @@ -0,0 +1,909 @@ +--- +order: 2 +title: Methods +--- + +# Methods + +## Methods existing in ABCI + +### Echo + +* **Request**: + * `Message (string)`: A string to echo back +* **Response**: + * `Message (string)`: The input string +* **Usage**: + * Echo a string to test an abci client/server implementation + +### Flush + +* **Usage**: + * Signals that messages queued on the client should be flushed to + the server. It is called periodically by the client + implementation to ensure asynchronous requests are actually + sent, and is called immediately to make a synchronous request, + which returns when the Flush response comes back. + +### Info + +* **Request**: + + | Name | Type | Description | Field Number | + |---------------|--------|------------------------------------------|--------------| + | version | string | The Tendermint software semantic version | 1 | + | block_version | uint64 | The Tendermint Block Protocol version | 2 | + | p2p_version | uint64 | The Tendermint P2P Protocol version | 3 | + | abci_version | string | The Tendermint ABCI semantic version | 4 | + +* **Response**: + + | Name | Type | Description | Field Number | + |---------------------|--------|--------------------------------------------------|--------------| + | data | string | Some arbitrary information | 1 | + | version | string | The application software semantic version | 2 | + | app_version | uint64 | The application protocol version | 3 | + | last_block_height | int64 | Latest block for which the app has called Commit | 4 | + | last_block_app_hash | bytes | Latest result of Commit | 5 | + +* **Usage**: + * Return information about the application state. + * Used to sync Tendermint with the application during a handshake + that happens on startup. + * The returned `app_version` will be included in the Header of every block. + * Tendermint expects `last_block_app_hash` and `last_block_height` to + be updated during `Commit`, ensuring that `Commit` is never + called twice for the same block height. + +> Note: Semantic version is a reference to [semantic versioning](https://semver.org/). Semantic versions in info will be displayed as X.X.x. + +### InitChain + +* **Request**: + + | Name | Type | Description | Field Number | + |------------------|--------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------|--------------| + | time | [google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) | Genesis time | 1 | + | chain_id | string | ID of the blockchain. | 2 | + | consensus_params | [ConsensusParams](#consensusparams) | Initial consensus-critical parameters. | 3 | + | validators | repeated [ValidatorUpdate](#validatorupdate) | Initial genesis validators, sorted by voting power. | 4 | + | app_state_bytes | bytes | Serialized initial application state. JSON bytes. | 5 | + | initial_height | int64 | Height of the initial block (typically `1`). | 6 | + +* **Response**: + + | Name | Type | Description | Field Number | + |------------------|----------------------------------------------|-------------------------------------------------|--------------| + | consensus_params | [ConsensusParams](#consensusparams) | Initial consensus-critical parameters (optional) | 1 | + | validators | repeated [ValidatorUpdate](#validatorupdate) | Initial validator set (optional). | 2 | + | app_hash | bytes | Initial application hash. | 3 | + +* **Usage**: + * Called once upon genesis. + * If ResponseInitChain.Validators is empty, the initial validator set will be the RequestInitChain.Validators + * If ResponseInitChain.Validators is not empty, it will be the initial + validator set (regardless of what is in RequestInitChain.Validators). + * This allows the app to decide if it wants to accept the initial validator + set proposed by tendermint (ie. in the genesis file), or if it wants to use + a different one (perhaps computed based on some application specific + information in the genesis file). + +### Query + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|--------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | data | bytes | Raw query bytes. Can be used with or in lieu of Path. | 1 | + | path | string | Path field of the request URI. Can be used with or in lieu of `data`. Apps MUST interpret `/store` as a query by key on the underlying store. The key SHOULD be specified in the `data` field. Apps SHOULD allow queries over specific types like `/accounts/...` or `/votes/...` | 2 | + | height | int64 | The block height for which you want the query (default=0 returns data for the latest committed block). Note that this is the height of the block containing the application's Merkle root hash, which represents the state as it was after committing the block at Height-1 | 3 | + | prove | bool | Return Merkle proof with response if possible | 4 | + +* **Response**: + + | Name | Type | Description | Field Number | + |-----------|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | code | uint32 | Response code. | 1 | + | log | string | The output of the application's logger. **May be non-deterministic.** | 3 | + | info | string | Additional information. **May be non-deterministic.** | 4 | + | index | int64 | The index of the key in the tree. | 5 | + | key | bytes | The key of the matching data. | 6 | + | value | bytes | The value of the matching data. | 7 | + | proof_ops | [ProofOps](#proofops) | Serialized proof for the value data, if requested, to be verified against the `app_hash` for the given Height. | 8 | + | height | int64 | The block height from which data was derived. Note that this is the height of the block containing the application's Merkle root hash, which represents the state as it was after committing the block at Height-1 | 9 | + | codespace | string | Namespace for the `code`. | 10 | + +* **Usage**: + * Query for data from the application at current or past height. + * Optionally return Merkle proof. + * Merkle proof includes self-describing `type` field to support many types + of Merkle trees and encoding formats. + +### CheckTx + +* **Request**: + + | Name | Type | Description | Field Number | + |------|-------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | tx | bytes | The request transaction bytes | 1 | + | type | CheckTxType | One of `CheckTx_New` or `CheckTx_Recheck`. `CheckTx_New` is the default and means that a full check of the tranasaction is required. `CheckTx_Recheck` types are used when the mempool is initiating a normal recheck of a transaction. | 2 | + +* **Response**: + + | Name | Type | Description | Field Number | + |------------|-------------------------------------------------------------|-----------------------------------------------------------------------|--------------| + | code | uint32 | Response code. | 1 | + | data | bytes | Result bytes, if any. | 2 | + | log | string | The output of the application's logger. **May be non-deterministic.** | 3 | + | info | string | Additional information. **May be non-deterministic.** | 4 | + | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | + | gas_used | int64 | Amount of gas consumed by transaction. | 6 | + | events | repeated [Event](abci++_basic_concepts_002_draft.md#events) | Type & Key-Value events for indexing transactions (eg. by account). | 7 | + | codespace | string | Namespace for the `code`. | 8 | + | sender | string | The transaction's sender (e.g. the signer) | 9 | + | priority | int64 | The transaction's priority (for mempool ordering) | 10 | + +* **Usage**: + + * Technically optional - not involved in processing blocks. + * Guardian of the mempool: every node runs `CheckTx` before letting a + transaction into its local mempool. + * The transaction may come from an external user or another node + * `CheckTx` validates the transaction against the current state of the application, + for example, checking signatures and account balances, but does not apply any + of the state changes described in the transaction. + not running code in a virtual machine. + * Transactions where `ResponseCheckTx.Code != 0` will be rejected - they will not be broadcast to + other nodes or included in a proposal block. + * Tendermint attributes no other value to the response code + +### ListSnapshots + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|-------|------------------------------------|--------------| + + Empty request asking the application for a list of snapshots. + +* **Response**: + + | Name | Type | Description | Field Number | + |-----------|--------------------------------|--------------------------------|--------------| + | snapshots | repeated [Snapshot](#snapshot) | List of local state snapshots. | 1 | + +* **Usage**: + * Used during state sync to discover available snapshots on peers. + * See `Snapshot` data type for details. + +### LoadSnapshotChunk + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|--------|-----------------------------------------------------------------------|--------------| + | height | uint64 | The height of the snapshot the chunk belongs to. | 1 | + | format | uint32 | The application-specific format of the snapshot the chunk belongs to. | 2 | + | chunk | uint32 | The chunk index, starting from `0` for the initial chunk. | 3 | + +* **Response**: + + | Name | Type | Description | Field Number | + |-------|-------|-------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | chunk | bytes | The binary chunk contents, in an arbitray format. Chunk messages cannot be larger than 16 MB _including metadata_, so 10 MB is a good starting point. | 1 | + +* **Usage**: + * Used during state sync to retrieve snapshot chunks from peers. + +### OfferSnapshot + +* **Request**: + + | Name | Type | Description | Field Number | + |----------|-----------------------|--------------------------------------------------------------------------|--------------| + | snapshot | [Snapshot](#snapshot) | The snapshot offered for restoration. | 1 | + | app_hash | bytes | The light client-verified app hash for this height, from the blockchain. | 2 | + +* **Response**: + + | Name | Type | Description | Field Number | + |--------|-------------------|-----------------------------------|--------------| + | result | [Result](#result) | The result of the snapshot offer. | 1 | + +#### Result + +```protobuf + enum Result { + UNKNOWN = 0; // Unknown result, abort all snapshot restoration + ACCEPT = 1; // Snapshot is accepted, start applying chunks. + ABORT = 2; // Abort snapshot restoration, and don't try any other snapshots. + REJECT = 3; // Reject this specific snapshot, try others. + REJECT_FORMAT = 4; // Reject all snapshots with this `format`, try others. + REJECT_SENDER = 5; // Reject all snapshots from all senders of this snapshot, try others. + } +``` + +* **Usage**: + * `OfferSnapshot` is called when bootstrapping a node using state sync. The application may + accept or reject snapshots as appropriate. Upon accepting, Tendermint will retrieve and + apply snapshot chunks via `ApplySnapshotChunk`. The application may also choose to reject a + snapshot in the chunk response, in which case it should be prepared to accept further + `OfferSnapshot` calls. + * Only `AppHash` can be trusted, as it has been verified by the light client. Any other data + can be spoofed by adversaries, so applications should employ additional verification schemes + to avoid denial-of-service attacks. The verified `AppHash` is automatically checked against + the restored application at the end of snapshot restoration. + * For more information, see the `Snapshot` data type or the [state sync section](../p2p/messages/state-sync.md). + +### ApplySnapshotChunk + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|--------|-----------------------------------------------------------------------------|--------------| + | index | uint32 | The chunk index, starting from `0`. Tendermint applies chunks sequentially. | 1 | + | chunk | bytes | The binary chunk contents, as returned by `LoadSnapshotChunk`. | 2 | + | sender | string | The P2P ID of the node who sent this chunk. | 3 | + +* **Response**: + + | Name | Type | Description | Field Number | + |----------------|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | result | Result (see below) | The result of applying this chunk. | 1 | + | refetch_chunks | repeated uint32 | Refetch and reapply the given chunks, regardless of `result`. Only the listed chunks will be refetched, and reapplied in sequential order. | 2 | + | reject_senders | repeated string | Reject the given P2P senders, regardless of `Result`. Any chunks already applied will not be refetched unless explicitly requested, but queued chunks from these senders will be discarded, and new chunks or other snapshots rejected. | 3 | + +```proto + enum Result { + UNKNOWN = 0; // Unknown result, abort all snapshot restoration + ACCEPT = 1; // The chunk was accepted. + ABORT = 2; // Abort snapshot restoration, and don't try any other snapshots. + RETRY = 3; // Reapply this chunk, combine with `RefetchChunks` and `RejectSenders` as appropriate. + RETRY_SNAPSHOT = 4; // Restart this snapshot from `OfferSnapshot`, reusing chunks unless instructed otherwise. + REJECT_SNAPSHOT = 5; // Reject this snapshot, try a different one. + } +``` + +* **Usage**: + * The application can choose to refetch chunks and/or ban P2P peers as appropriate. Tendermint + will not do this unless instructed by the application. + * The application may want to verify each chunk, e.g. by attaching chunk hashes in + `Snapshot.Metadata` and/or incrementally verifying contents against `AppHash`. + * When all chunks have been accepted, Tendermint will make an ABCI `Info` call to verify that + `LastBlockAppHash` and `LastBlockHeight` matches the expected values, and record the + `AppVersion` in the node state. It then switches to fast sync or consensus and joins the + network. + * If Tendermint is unable to retrieve the next chunk after some time (e.g. because no suitable + peers are available), it will reject the snapshot and try a different one via `OfferSnapshot`. + The application should be prepared to reset and accept it or abort as appropriate. + +## New methods introduced in ABCI++ + +### PrepareProposal + +#### Parameters and Types + +* **Request**: + + | Name | Type | Description | Field Number | + |-------------------------|---------------------------------------------|------------------------------------------------------------------------------------------------------------------|--------------| + | max_tx_bytes | int64 | Currently configured maximum size in bytes taken by the modified transactions. | 1 | + | txs | repeated bytes | Preliminary list of transactions that have been picked as part of the block to propose. | 2 | + | local_last_commit | [ExtendedCommitInfo](#extendedcommitinfo) | Info about the last commit, obtained locally from Tendermint's data structures. | 3 | + | byzantine_validators | repeated [Misbehavior](#misbehavior) | List of information about validators that acted incorrectly. | 4 | + | height | int64 | The height of the block that will be proposed. | 5 | + | time | [google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) | Timestamp of the block that that will be proposed. | 6 | + | next_validators_hash | bytes | Merkle root of the next validator set. | 7 | + | proposer_address | bytes | [Address](../core/data_structures.md#address) of the validator that is creating the proposal. | 8 | + +* **Response**: + + | Name | Type | Description | Field Number | + |-------------------------|--------------------------------------------------|---------------------------------------------------------------------------------------------|--------------| + | tx_records | repeated [TxRecord](#txrecord) | Possibly modified list of transactions that have been picked as part of the proposed block. | 2 | + | app_hash | bytes | The Merkle root hash of the application state. | 3 | + | tx_results | repeated [ExecTxResult](#txresult) | List of structures containing the data resulting from executing the transactions | 4 | + | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 5 | + | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to consensus-critical gas, size, and other parameters. | 6 | + +* **Usage**: + * The first six parameters of `RequestPrepareProposal` are the same as `RequestProcessProposal` + and `RequestFinalizeBlock`. + * The height and time values match the values from the header of the proposed block. + * `RequestPrepareProposal` contains a preliminary set of transactions `txs` that Tendermint considers to be a good block proposal, called _raw proposal_. The Application can modify this set via `ResponsePrepareProposal.tx_records` (see [TxRecord](#txrecord)). + * The Application _can_ reorder, remove or add transactions to the raw proposal. Let `tx` be a transaction in `txs`: + * If the Application considers that `tx` should not be proposed in this block, e.g., there are other transactions with higher priority, then it should not include it in `tx_records`. In this case, Tendermint won't remove `tx` from the mempool. The Application should be extra-careful, as abusing this feature may cause transactions to stay forever in the mempool. + * If the Application considers that a `tx` should not be included in the proposal and removed from the mempool, then the Application should include it in `tx_records` and _mark_ it as `REMOVED`. In this case, Tendermint will remove `tx` from the mempool. + * If the Application wants to add a new transaction, then the Application should include it in `tx_records` and _mark_ it as `ADD`. In this case, Tendermint will add it to the mempool. + * The Application should be aware that removing and adding transactions may compromise _traceability_. + > Consider the following example: the Application transforms a client-submitted transaction `t1` into a second transaction `t2`, i.e., the Application asks Tendermint to remove `t1` and add `t2` to the mempool. If a client wants to eventually check what happened to `t1`, it will discover that `t_1` is not in the mempool or in a committed block, getting the wrong idea that `t_1` did not make it into a block. Note that `t_2` _will be_ in a committed block, but unless the Application tracks this information, no component will be aware of it. Thus, if the Application wants traceability, it is its responsability to support it. For instance, the Application could attach to a transformed transaction a list with the hashes of the transactions it derives from. + * Tendermint MAY include a list of transactions in `RequestPrepareProposal.txs` whose total size in bytes exceeds `RequestPrepareProposal.max_tx_bytes`. + Therefore, if the size of `RequestPrepareProposal.txs` is greater than `RequestPrepareProposal.max_tx_bytes`, the Application MUST make sure that the + `RequestPrepareProposal.max_tx_bytes` limit is respected by those transaction records returned in `ResponsePrepareProposal.tx_records` that are marked as `UNMODIFIED` or `ADDED`. + * In same-block execution mode, the Application must provide values for `ResponsePrepareProposal.app_hash`, + `ResponsePrepareProposal.tx_results`, `ResponsePrepareProposal.validator_updates`, and + `ResponsePrepareProposal.consensus_param_updates`, as a result of fully executing the block. + * The values for `ResponsePrepareProposal.validator_updates`, or + `ResponsePrepareProposal.consensus_param_updates` may be empty. In this case, Tendermint will keep + the current values. + * `ResponsePrepareProposal.validator_updates`, triggered by block `H`, affect validation + for blocks `H+1`, and `H+2`. Heights following a validator update are affected in the following way: + * `H`: `NextValidatorsHash` includes the new `validator_updates` value. + * `H+1`: The validator set change takes effect and `ValidatorsHash` is updated. + * `H+2`: `local_last_commit` now includes the altered validator set. + * `ResponseFinalizeBlock.consensus_param_updates` returned for block `H` apply to the consensus + params for block `H+1` even if the change is agreed in block `H`. + For more information on the consensus parameters, + see the [application spec entry on consensus parameters](../abci/apps.md#consensus-parameters). + * It is the responsibility of the Application to set the right value for _TimeoutPropose_ so that + the (synchronous) execution of the block does not cause other processes to prevote `nil` because + their propose timeout goes off. + * In next-block execution mode, Tendermint will ignore parameters `ResponsePrepareProposal.tx_results`, + `ResponsePrepareProposal.validator_updates`, and `ResponsePrepareProposal.consensus_param_updates`. + * As a result of executing the prepared proposal, the Application may produce header events or transaction events. + The Application must keep those events until a block is decided and then pass them on to Tendermint via + `ResponseFinalizeBlock`. + * Likewise, in next-block execution mode, the Application must keep all responses to executing transactions + until it can call `ResponseFinalizeBlock`. + * As a sanity check, Tendermint will check the returned parameters for validity if the Application modified them. + In particular, `ResponsePrepareProposal.tx_records` will be deemed invalid if + * There is a duplicate transaction in the list. + * A new or modified transaction is marked as `UNMODIFIED` or `REMOVED`. + * An unmodified transaction is marked as `ADDED`. + * A transaction is marked as `UNKNOWN`. + * If Tendermint fails to validate the `ResponsePrepareProposal`, Tendermint will assume the application is faulty and crash. + * The implementation of `PrepareProposal` can be non-deterministic. + +#### When does Tendermint call it? + +When a validator _p_ enters Tendermint consensus round _r_, height _h_, in which _p_ is the proposer, +and _p_'s _validValue_ is `nil`: + +1. _p_'s Tendermint collects outstanding transactions from the mempool + * The transactions will be collected in order of priority + * Let $C$ the list of currently collected transactions + * The collection stops when any of the following conditions are met + * the mempool is empty + * the total size of transactions $\in C$ is greater than or equal to `consensusParams.block.max_bytes` + * the sum of `GasWanted` field of transactions $\in C$ is greater than or equal to + `consensusParams.block.max_gas` + * _p_'s Tendermint creates a block header. +2. _p_'s Tendermint calls `RequestPrepareProposal` with the newly generated block. + The call is synchronous: Tendermint's execution will block until the Application returns from the call. +3. The Application checks the block (hashes, transactions, commit info, misbehavior). Besides, + * in same-block execution mode, the Application can (and should) provide `ResponsePrepareProposal.app_hash`, + `ResponsePrepareProposal.validator_updates`, or + `ResponsePrepareProposal.consensus_param_updates`. + * in "next-block execution" mode, _p_'s Tendermint will ignore the values for `ResponsePrepareProposal.app_hash`, + `ResponsePrepareProposal.validator_updates`, and `ResponsePrepareProposal.consensus_param_updates`. + * in both modes, the Application can manipulate transactions + * leave transactions untouched - `TxAction = UNMODIFIED` + * add new transactions directly to the proposal - `TxAction = ADDED` + * remove transactions (invalid) from the proposal and from the mempool - `TxAction = REMOVED` + * remove transactions from the proposal but not from the mempool (effectively _delaying_ them) - the + Application removes the transaction from the list + * modify transactions (e.g. aggregate them) - `TxAction = ADDED` followed by `TxAction = REMOVED`. As explained above, this compromises client traceability, unless it is implemented at the Application level. + * reorder transactions - the Application reorders transactions in the list +4. If the block is modified, the Application sets `ResponsePrepareProposal.modified` to true, + and includes the modified block in the return parameters (see the rules in section _Usage_). + The Application returns from the call. +5. _p_'s Tendermint uses the (possibly) modified block as _p_'s proposal in round _r_, height _h_. + +Note that, if _p_ has a non-`nil` _validValue_, Tendermint will use it as proposal and will not call `RequestPrepareProposal`. + +### ProcessProposal + +#### Parameters and Types + +* **Request**: + + | Name | Type | Description | Field Number | + |----------------------|---------------------------------------------|----------------------------------------------------------------------------------------------------------------|--------------| + | txs | repeated bytes | List of transactions that have been picked as part of the proposed block. | 1 | + | proposed_last_commit | [CommitInfo](#commitinfo) | Info about the last commit, obtained from the information in the proposed block. | 2 | + | byzantine_validators | repeated [Misbehavior](#misbehavior) | List of information about validators that acted incorrectly. | 3 | + | hash | bytes | The block header's hash of the proposed block. | 4 | + | height | int64 | The height of the proposed block. | 5 | + | time | [google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) | Timestamp included in the proposed block. | 6 | + | next_validators_hash | bytes | Merkle root of the next validator set. | 7 | + | proposer_address | bytes | [Address](../core/data_structures.md#address) of the validator that created the proposal. | 8 | + +* **Response**: + + | Name | Type | Description | Field Number | + |-------------------------|--------------------------------------------------|-----------------------------------------------------------------------------------|--------------| + | status | [ProposalStatus](#proposalstatus) | `enum` that signals if the application finds the proposal valid. | 1 | + | app_hash | bytes | The Merkle root hash of the application state. | 2 | + | tx_results | repeated [ExecTxResult](#txresult) | List of structures containing the data resulting from executing the transactions. | 3 | + | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 4 | + | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to consensus-critical gas, size, and other parameters. | 5 | + +* **Usage**: + * Contains fields from the proposed block. + * The Application may fully execute the block as though it was handling `RequestFinalizeBlock`. + However, any resulting state changes must be kept as _candidate state_, + and the Application should be ready to backtrack/discard it in case the decided block is different. + * The height and timestamp values match the values from the header of the proposed block. + * If `ResponseProcessProposal.status` is `REJECT`, Tendermint assumes the proposal received + is not valid. + * In same-block execution mode, the Application is required to fully execute the block and provide values + for parameters `ResponseProcessProposal.app_hash`, `ResponseProcessProposal.tx_results`, + `ResponseProcessProposal.validator_updates`, and `ResponseProcessProposal.consensus_param_updates`, + so that Tendermint can then verify the hashes in the block's header are correct. + If the hashes mismatch, Tendermint will reject the block even if `ResponseProcessProposal.status` + was set to `ACCEPT`. + * In next-block execution mode, the Application should *not* provide values for parameters + `ResponseProcessProposal.app_hash`, `ResponseProcessProposal.tx_results`, + `ResponseProcessProposal.validator_updates`, and `ResponseProcessProposal.consensus_param_updates`. + * The implementation of `ProcessProposal` MUST be deterministic. Moreover, the value of + `ResponseProcessProposal.status` MUST **exclusively** depend on the parameters passed in + the call to `RequestProcessProposal`, and the last committed Application state + (see [Requirements](abci++_app_requirements_002_draft.md) section). + * Moreover, application implementors SHOULD always set `ResponseProcessProposal.status` to `ACCEPT`, + unless they _really_ know what the potential liveness implications of returning `REJECT` are. + +#### When does Tendermint call it? + +When a validator _p_ enters Tendermint consensus round _r_, height _h_, in which _q_ is the proposer (possibly _p_ = _q_): + +1. _p_ sets up timer `ProposeTimeout`. +2. If _p_ is the proposer, _p_ executes steps 1-6 in [PrepareProposal](#prepareproposal). +3. Upon reception of Proposal message (which contains the header) for round _r_, height _h_ from _q_, _p_'s Tendermint verifies the block header. +4. Upon reception of Proposal message, along with all the block parts, for round _r_, height _h_ from _q_, _p_'s Tendermint follows its algorithm + to check whether it should prevote for the block just received, or `nil` +5. If Tendermint should prevote for the block just received + 1. Tendermint calls `RequestProcessProposal` with the block. The call is synchronous. + 2. The Application checks/processes the proposed block, which is read-only, and returns true (_accept_) or false (_reject_) in `ResponseProcessProposal.accept`. + * The Application, depending on its needs, may call `ResponseProcessProposal` + * either after it has completely processed the block (the simpler case), + * or immediately (after doing some basic checks), and process the block asynchronously. In this case the Application will + not be able to reject the block, or force prevote/precommit `nil` afterwards. + 3. If the returned value is + * _accept_, Tendermint prevotes on this proposal for round _r_, height _h_. + * _reject_, Tendermint prevotes `nil`. + +### ExtendVote + +#### Parameters and Types + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|-------|-------------------------------------------------------------------------------|--------------| + | hash | bytes | The header hash of the proposed block that the vote extension is to refer to. | 1 | + | height | int64 | Height of the proposed block (for sanity check). | 2 | + +* **Response**: + + | Name | Type | Description | Field Number | + |-------------------|-------|-----------------------------------------------|--------------| + | vote_extension | bytes | Optional information signed by by Tendermint. | 1 | + +* **Usage**: + * `ResponseExtendVote.vote_extension` is optional information that, if present, will be signed by Tendermint and + attached to the Precommit message. + * `RequestExtendVote.hash` corresponds to the hash of a proposed block that was made available to the application + in a previous call to `ProcessProposal` or `PrepareProposal` for the current height. + * `ResponseExtendVote.vote_extension` will only be attached to a non-`nil` Precommit message. If Tendermint is to + precommit `nil`, it will not call `RequestExtendVote`. + * The Application logic that creates the extension can be non-deterministic. + +#### When does Tendermint call it? + +When a validator _p_ is in Tendermint consensus state _prevote_ of round _r_, height _h_, in which _q_ is the proposer; and _p_ has received + +* the Proposal message _v_ for round _r_, height _h_, along with all the block parts, from _q_, +* `Prevote` messages from _2f + 1_ validators' voting power for round _r_, height _h_, prevoting for the same block _id(v)_, + +then _p_'s Tendermint locks _v_ and sends a Precommit message in the following way + +1. _p_'s Tendermint sets _lockedValue_ and _validValue_ to _v_, and sets _lockedRound_ and _validRound_ to _r_ +2. _p_'s Tendermint calls `RequestExtendVote` with _id(v)_ (`RequestExtendVote.hash`). The call is synchronous. +3. The Application optionally returns an array of bytes, `ResponseExtendVote.extension`, which is not interpreted by Tendermint. +4. _p_'s Tendermint includes `ResponseExtendVote.extension` in a field of type [CanonicalVoteExtension](#canonicalvoteextension), + it then populates the other fields in [CanonicalVoteExtension](#canonicalvoteextension), and signs the populated + data structure. +5. _p_'s Tendermint constructs and signs the [CanonicalVote](../core/data_structures.md#canonicalvote) structure. +6. _p_'s Tendermint constructs the Precommit message (i.e. [Vote](../core/data_structures.md#vote) structure) + using [CanonicalVoteExtension](#canonicalvoteextension) and [CanonicalVote](../core/data_structures.md#canonicalvote). +7. _p_'s Tendermint broadcasts the Precommit message. + +In the cases when _p_'s Tendermint is to broadcast `precommit nil` messages (either _2f+1_ `prevote nil` messages received, +or _timeoutPrevote_ triggered), _p_'s Tendermint does **not** call `RequestExtendVote` and will not include +a [CanonicalVoteExtension](#canonicalvoteextension) field in the `precommit nil` message. + +### VerifyVoteExtension + +#### Parameters and Types + +* **Request**: + + | Name | Type | Description | Field Number | + |-------------------|-------|------------------------------------------------------------------------------------------|--------------| + | hash | bytes | The header hash of the propsed block that the vote extension refers to. | 1 | + | validator_address | bytes | [Address](../core/data_structures.md#address) of the validator that signed the extension | 2 | + | height | int64 | Height of the block (for sanity check). | 3 | + | vote_extension | bytes | Application-specific information signed by Tendermint. Can have 0 length | 4 | + +* **Response**: + + | Name | Type | Description | Field Number | + |--------|-------------------------------|----------------------------------------------------------------|--------------| + | status | [VerifyStatus](#verifystatus) | `enum` signaling if the application accepts the vote extension | 1 | + +* **Usage**: + * `RequestVerifyVoteExtension.vote_extension` can be an empty byte array. The Application's interpretation of it should be + that the Application running at the process that sent the vote chose not to extend it. + Tendermint will always call `RequestVerifyVoteExtension`, even for 0 length vote extensions. + * If `ResponseVerifyVoteExtension.status` is `REJECT`, Tendermint will reject the whole received vote. + See the [Requirements](abci++_app_requirements_002_draft.md) section to understand the potential + liveness implications of this. + * The implementation of `VerifyVoteExtension` MUST be deterministic. Moreover, the value of + `ResponseVerifyVoteExtension.status` MUST **exclusively** depend on the parameters passed in + the call to `RequestVerifyVoteExtension`, and the last committed Application state + (see [Requirements](abci++_app_requirements_002_draft.md) section). + * Moreover, application implementers SHOULD always set `ResponseVerifyVoteExtension.status` to `ACCEPT`, + unless they _really_ know what the potential liveness implications of returning `REJECT` are. + +#### When does Tendermint call it? + +When a validator _p_ is in Tendermint consensus round _r_, height _h_, state _prevote_ (**TODO** discuss: I think I must remove the state +from this condition, but not sure), and _p_ receives a Precommit message for round _r_, height _h_ from _q_: + +1. If the Precommit message does not contain a vote extension with a valid signature, Tendermint discards the message as invalid. + * a 0-length vote extension is valid as long as its accompanying signature is also valid. +2. Else, _p_'s Tendermint calls `RequestVerifyVoteExtension`. +3. The Application returns _accept_ or _reject_ via `ResponseVerifyVoteExtension.status`. +4. If the Application returns + * _accept_, _p_'s Tendermint will keep the received vote, together with its corresponding + vote extension in its internal data structures. It will be used to populate the [ExtendedCommitInfo](#extendedcommitinfo) + structure in calls to `RequestPrepareProposal`, in rounds of height _h + 1_ where _p_ is the proposer. + * _reject_, _p_'s Tendermint will deem the Precommit message invalid and discard it. + +### FinalizeBlock + +#### Parameters and Types + +* **Request**: + + | Name | Type | Description | Field Number | + |----------------------|---------------------------------------------|------------------------------------------------------------------------------------------|--------------| + | txs | repeated bytes | List of transactions committed as part of the block. | 1 | + | decided_last_commit | [CommitInfo](#commitinfo) | Info about the last commit, obtained from the block that was just decided. | 2 | + | byzantine_validators | repeated [Misbehavior](#misbehavior) | List of information about validators that acted incorrectly. | 3 | + | hash | bytes | The block header's hash. Present for convenience (can be derived from the block header). | 4 | + | height | int64 | The height of the finalized block. | 5 | + | time | [google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) | Timestamp included in the finalized block. | 6 | + | next_validators_hash | bytes | Merkle root of the next validator set. | 7 | + | proposer_address | bytes | [Address](../core/data_structures.md#address) of the validator that created the proposal.| 8 | + +* **Response**: + + | Name | Type | Description | Field Number | + |-------------------------|-------------------------------------------------------------|----------------------------------------------------------------------------------|--------------| + | events | repeated [Event](abci++_basic_concepts_002_draft.md#events) | Type & Key-Value events for indexing | 1 | + | tx_results | repeated [ExecTxResult](#txresult) | List of structures containing the data resulting from executing the transactions | 2 | + | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 3 | + | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to consensus-critical gas, size, and other parameters. | 4 | + | app_hash | bytes | The Merkle root hash of the application state. | 5 | + | retain_height | int64 | Blocks below this height may be removed. Defaults to `0` (retain all). | 6 | + +* **Usage**: + * Contains the fields of the newly decided block. + * This method is equivalent to the call sequence `BeginBlock`, [`DeliverTx`], + `EndBlock`, `Commit` in the previous version of ABCI. + * The height and timestamp values match the values from the header of the proposed block. + * The Application can use `RequestFinalizeBlock.decided_last_commit` and `RequestFinalizeBlock.byzantine_validators` + to determine rewards and punishments for the validators. + * The application must execute the transactions in full, in the order they appear in `RequestFinalizeBlock.txs`, + before returning control to Tendermint. Alternatively, it can commit the candidate state corresponding to the same block + previously executed via `PrepareProposal` or `ProcessProposal`. + * `ResponseFinalizeBlock.tx_results[i].Code == 0` only if the _i_-th transaction is fully valid. + * In next-block execution mode, the Application must provide values for `ResponseFinalizeBlock.app_hash`, + `ResponseFinalizeBlock.tx_results`, `ResponseFinalizeBlock.validator_updates`, and + `ResponseFinalizeBlock.consensus_param_updates` as a result of executing the block. + * The values for `ResponseFinalizeBlock.validator_updates`, or + `ResponseFinalizeBlock.consensus_param_updates` may be empty. In this case, Tendermint will keep + the current values. + * `ResponseFinalizeBlock.validator_updates`, triggered by block `H`, affect validation + for blocks `H+1`, `H+2`, and `H+3`. Heights following a validator update are affected in the following way: + - Height `H+1`: `NextValidatorsHash` includes the new `validator_updates` value. + - Height `H+2`: The validator set change takes effect and `ValidatorsHash` is updated. + - Height `H+3`: `decided_last_commit` now includes the altered validator set. + * `ResponseFinalizeBlock.consensus_param_updates` returned for block `H` apply to the consensus + params for block `H+1`. For more information on the consensus parameters, + see the [application spec entry on consensus parameters](../abci/apps.md#consensus-parameters). + * In same-block execution mode, Tendermint will log an error and ignore values for + `ResponseFinalizeBlock.app_hash`, `ResponseFinalizeBlock.tx_results`, `ResponseFinalizeBlock.validator_updates`, + and `ResponsePrepareProposal.consensus_param_updates`, as those must have been provided by `PrepareProposal`. + * Application is expected to persist its state at the end of this call, before calling `ResponseFinalizeBlock`. + * `ResponseFinalizeBlock.app_hash` contains an (optional) Merkle root hash of the application state. + * `ResponseFinalizeBlock.app_hash` is included + * [in next-block execution mode] as the `Header.AppHash` in the next block. + * [in same-block execution mode] as the `Header.AppHash` in the current block. In this case, + `PrepareProposal` is required to fully execute the block and set the App hash before + returning the proposed block to Tendermint. + * `ResponseFinalizeBlock.app_hash` may also be empty or hard-coded, but MUST be + **deterministic** - it must not be a function of anything that did not come from the parameters + of `RequestFinalizeBlock` and the previous committed state. + * Later calls to `Query` can return proofs about the application state anchored + in this Merkle root hash. + * Use `ResponseFinalizeBlock.retain_height` with caution! If all nodes in the network remove historical + blocks then this data is permanently lost, and no new nodes will be able to join the network and + bootstrap. Historical blocks may also be required for other purposes, e.g. auditing, replay of + non-persisted heights, light client verification, and so on. + * Just as `ProcessProposal`, the implementation of `FinalizeBlock` MUST be deterministic, since it is + making the Application's state evolve in the context of state machine replication. + * Currently, Tendermint will fill up all fields in `RequestFinalizeBlock`, even if they were + already passed on to the Application via `RequestPrepareProposal` or `RequestProcessProposal`. + If the Application is in same-block execution mode, it applies the right candidate state here + (rather than executing the whole block). In this case the Application disregards all parameters in + `RequestFinalizeBlock` except `RequestFinalizeBlock.hash`. + +#### When does Tendermint call it? + +When a validator _p_ is in Tendermint consensus height _h_, and _p_ receives + +* the Proposal message with block _v_ for a round _r_, along with all its block parts, from _q_, + which is the proposer of round _r_, height _h_, +* `Precommit` messages from _2f + 1_ validators' voting power for round _r_, height _h_, + precommitting the same block _id(v)_, + +then _p_'s Tendermint decides block _v_ and finalizes consensus for height _h_ in the following way + +1. _p_'s Tendermint persists _v_ as decision for height _h_. +2. _p_'s Tendermint locks the mempool -- no calls to checkTx on new transactions. +3. _p_'s Tendermint calls `RequestFinalizeBlock` with _id(v)_. The call is synchronous. +4. _p_'s Application processes block _v_, received in a previous call to `RequestProcessProposal`. +5. _p_'s Application commits and persists the state resulting from processing the block. +6. _p_'s Application calculates and returns the _AppHash_, along with an array of arrays of bytes representing the output of each of the transactions +7. _p_'s Tendermint hashes the array of transaction outputs and stores it in _ResultHash_ +8. _p_'s Tendermint persists _AppHash_ and _ResultHash_ +9. _p_'s Tendermint unlocks the mempool -- newly received transactions can now be checked. +10. _p_'s starts consensus for a new height _h+1_, round 0 + +## Data Types existing in ABCI + +Most of the data structures used in ABCI are shared [common data structures](../core/data_structures.md). In certain cases, ABCI uses different data structures which are documented here: + +### Validator + +* **Fields**: + + | Name | Type | Description | Field Number | + |---------|-------|---------------------------------------------------------------------|--------------| + | address | bytes | [Address](../core/data_structures.md#address) of validator | 1 | + | power | int64 | Voting power of the validator | 3 | + +* **Usage**: + * Validator identified by address + * Used in RequestBeginBlock as part of VoteInfo + * Does not include PubKey to avoid sending potentially large quantum pubkeys + over the ABCI + +### ValidatorUpdate + +* **Fields**: + + | Name | Type | Description | Field Number | + |---------|--------------------------------------------------|-------------------------------|--------------| + | pub_key | [Public Key](../core/data_structures.md#pub_key) | Public key of the validator | 1 | + | power | int64 | Voting power of the validator | 2 | + +* **Usage**: + * Validator identified by PubKey + * Used to tell Tendermint to update the validator set + +### Misbehavior + +* **Fields**: + + | Name | Type | Description | Field Number | + |--------------------|--------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------|--------------| + | type | [MisbehaviorType](#misbehaviortype) | Type of the misbehavior. An enum of possible misbehaviors. | 1 | + | validator | [Validator](#validator) | The offending validator | 2 | + | height | int64 | Height when the offense occurred | 3 | + | time | [google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) | Time of the block that was committed at the height that the offense occurred | 4 | + | total_voting_power | int64 | Total voting power of the validator set at height `Height` | 5 | + +#### MisbehaviorType + +* **Fields** + + MisbehaviorType is an enum with the listed fields: + + | Name | Field Number | + |---------------------|--------------| + | UNKNOWN | 0 | + | DUPLICATE_VOTE | 1 | + | LIGHT_CLIENT_ATTACK | 2 | + +### ConsensusParams + +* **Fields**: + + | Name | Type | Description | Field Number | + |-----------|---------------------------------------------------------------|------------------------------------------------------------------------------|--------------| + | block | [BlockParams](../core/data_structures.md#blockparams) | Parameters limiting the size of a block and time between consecutive blocks. | 1 | + | evidence | [EvidenceParams](../core/data_structures.md#evidenceparams) | Parameters limiting the validity of evidence of byzantine behaviour. | 2 | + | validator | [ValidatorParams](../core/data_structures.md#validatorparams) | Parameters limiting the types of public keys validators can use. | 3 | + | version | [VersionsParams](../core/data_structures.md#versionparams) | The ABCI application version. | 4 | + +### ProofOps + +* **Fields**: + + | Name | Type | Description | Field Number | + |------|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | ops | repeated [ProofOp](#proofop) | List of chained Merkle proofs, of possibly different types. The Merkle root of one op is the value being proven in the next op. The Merkle root of the final op should equal the ultimate root hash being verified against.. | 1 | + +### ProofOp + +* **Fields**: + + | Name | Type | Description | Field Number | + |------|--------|------------------------------------------------|--------------| + | type | string | Type of Merkle proof and how it's encoded. | 1 | + | key | bytes | Key in the Merkle tree that this proof is for. | 2 | + | data | bytes | Encoded Merkle proof for the key. | 3 | + +### Snapshot + +* **Fields**: + + | Name | Type | Description | Field Number | + |----------|--------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | height | uint64 | The height at which the snapshot was taken (after commit). | 1 | + | format | uint32 | An application-specific snapshot format, allowing applications to version their snapshot data format and make backwards-incompatible changes. Tendermint does not interpret this. | 2 | + | chunks | uint32 | The number of chunks in the snapshot. Must be at least 1 (even if empty). | 3 | + | hash | bytes | TAn arbitrary snapshot hash. Must be equal only for identical snapshots across nodes. Tendermint does not interpret the hash, it only compares them. | 3 | + | metadata | bytes | Arbitrary application metadata, for example chunk hashes or other verification data. | 3 | + +* **Usage**: + * Used for state sync snapshots, see the [state sync section](../p2p/messages/state-sync.md) for details. + * A snapshot is considered identical across nodes only if _all_ fields are equal (including + `Metadata`). Chunks may be retrieved from all nodes that have the same snapshot. + * When sent across the network, a snapshot message can be at most 4 MB. + +## Data types introduced or modified in ABCI++ + +### VoteInfo + +* **Fields**: + + | Name | Type | Description | Field Number | + |-----------------------------|-------------------------|----------------------------------------------------------------|--------------| + | validator | [Validator](#validator) | The validator that sent the vote. | 1 | + | signed_last_block | bool | Indicates whether or not the validator signed the last block. | 2 | + +* **Usage**: + * Indicates whether a validator signed the last block, allowing for rewards based on validator availability. + * This information is typically extracted from a proposed or decided block. + +### ExtendedVoteInfo + +* **Fields**: + + | Name | Type | Description | Field Number | + |-------------------|-------------------------|------------------------------------------------------------------------------|--------------| + | validator | [Validator](#validator) | The validator that sent the vote. | 1 | + | signed_last_block | bool | Indicates whether or not the validator signed the last block. | 2 | + | vote_extension | bytes | Non-deterministic extension provided by the sending validator's Application. | 3 | + +* **Usage**: + * Indicates whether a validator signed the last block, allowing for rewards based on validator availability. + * This information is extracted from Tendermint's data structures in the local process. + * `vote_extension` contains the sending validator's vote extension, which is signed by Tendermint. It can be empty + +### CommitInfo + +* **Fields**: + + | Name | Type | Description | Field Number | + |-------|--------------------------------|----------------------------------------------------------------------------------------------|--------------| + | round | int32 | Commit round. Reflects the round at which the block proposer decided in the previous height. | 1 | + | votes | repeated [VoteInfo](#voteinfo) | List of validators' addresses in the last validator set with their voting information. | 2 | + +### ExtendedCommitInfo + +* **Fields**: + + | Name | Type | Description | Field Number | + |-------|------------------------------------------------|-------------------------------------------------------------------------------------------------------------------|--------------| + | round | int32 | Commit round. Reflects the round at which the block proposer decided in the previous height. | 1 | + | votes | repeated [ExtendedVoteInfo](#extendedvoteinfo) | List of validators' addresses in the last validator set with their voting information, including vote extensions. | 2 | + +### ExecTxResult + +* **Fields**: + + | Name | Type | Description | Field Number | + |------------|-------------------------------------------------------------|-----------------------------------------------------------------------|--------------| + | code | uint32 | Response code. | 1 | + | data | bytes | Result bytes, if any. | 2 | + | log | string | The output of the application's logger. **May be non-deterministic.** | 3 | + | info | string | Additional information. **May be non-deterministic.** | 4 | + | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | + | gas_used | int64 | Amount of gas consumed by transaction. | 6 | + | events | repeated [Event](abci++_basic_concepts_002_draft.md#events) | Type & Key-Value events for indexing transactions (e.g. by account). | 7 | + | codespace | string | Namespace for the `code`. | 8 | + +### TxAction + +```proto +enum TxAction { + UNKNOWN = 0; // Unknown action + UNMODIFIED = 1; // The Application did not modify this transaction. + ADDED = 2; // The Application added this transaction. + REMOVED = 3; // The Application wants this transaction removed from the proposal and the mempool. +} +``` + +* **Usage**: + * If `Action` is `UNKNOWN`, a problem happened in the Application. Tendermint will assume the application is faulty and crash. + * If `Action` is `UNMODIFIED`, Tendermint includes the transaction in the proposal. Nothing to do on the mempool. + * If `Action` is `ADDED`, Tendermint includes the transaction in the proposal. The transaction is _not_ added to the mempool. + * If `Action` is `REMOVED`, Tendermint excludes the transaction from the proposal. The transaction is also removed from the mempool if it exists, + similar to `CheckTx` returning _false_. + +### TxRecord + +* **Fields**: + + | Name | Type | Description | Field Number | + |------------|-----------------------|------------------------------------------------------------------|--------------| + | action | [TxAction](#txaction) | What should Tendermint do with this transaction? | 1 | + | tx | bytes | Transaction contents | 2 | + +### ProposalStatus + +```proto +enum ProposalStatus { + UNKNOWN = 0; // Unknown status. Returning this from the application is always an error. + ACCEPT = 1; // Status that signals that the application finds the proposal valid. + REJECT = 2; // Status that signals that the application finds the proposal invalid. +} +``` + +* **Usage**: + * Used within the [ProcessProposal](#processproposal) response. + * If `Status` is `UNKNOWN`, a problem happened in the Application. Tendermint will assume the application is faulty and crash. + * If `Status` is `ACCEPT`, Tendermint accepts the proposal and will issue a Prevote message for it. + * If `Status` is `REJECT`, Tendermint rejects the proposal and will issue a Prevote for `nil` instead. + +### VerifyStatus + +```proto +enum VerifyStatus { + UNKNOWN = 0; // Unknown status. Returning this from the application is always an error. + ACCEPT = 1; // Status that signals that the application finds the vote extension valid. + REJECT = 2; // Status that signals that the application finds the vote extension invalid. +} +``` + +* **Usage**: + * Used within the [VerifyVoteExtension](#verifyvoteextension) response. + * If `Status` is `UNKNOWN`, a problem happened in the Application. Tendermint will assume the application is faulty and crash. + * If `Status` is `ACCEPT`, Tendermint will accept the vote as valid. + * If `Status` is `REJECT`, Tendermint will reject the vote as invalid. + + +### CanonicalVoteExtension + +>**TODO**: This protobuf message definition is not part of the ABCI++ interface, but rather belongs to the +> Precommit message which is broadcast via P2P. So it is to be moved to the relevant section of the spec. + +* **Fields**: + + | Name | Type | Description | Field Number | + |-----------|--------|--------------------------------------------------------------------------------------------|--------------| + | extension | bytes | Vote extension provided by the Application. | 1 | + | height | int64 | Height in which the extension was provided. | 2 | + | round | int32 | Round in which the extension was provided. | 3 | + | chain_id | string | ID of the blockchain running consensus. | 4 | + | address | bytes | [Address](../core/data_structures.md#address) of the validator that provided the extension | 5 | + +* **Usage**: + * Tendermint is to sign the whole data structure and attach it to a Precommit message + * Upon reception, Tendermint validates the sender's signature and sanity-checks the values of `height`, `round`, and `chain_id`. + Then it sends `extension` to the Application via `RequestVerifyVoteExtension` for verification. diff --git a/spec/abci++/abci++_tmint_expected_behavior_002_draft.md b/spec/abci++/abci++_tmint_expected_behavior_002_draft.md new file mode 100644 index 0000000000..7786894505 --- /dev/null +++ b/spec/abci++/abci++_tmint_expected_behavior_002_draft.md @@ -0,0 +1,218 @@ +--- +order: 4 +title: Tendermint's expected behavior +--- + +# Tendermint's expected behavior + +## Valid method call sequences + +This section describes what the Application can expect from Tendermint. + +The Tendermint consensus algorithm is designed to protect safety under any network conditions, as long as +less than 1/3 of validators' voting power is byzantine. Most of the time, though, the network will behave +synchronously and there will be no byzantine process. In these frequent, benign conditions: + +* Tendermint will decide in round 0; +* `PrepareProposal` will be called exactly once at the proposer process of round 0, height _h_; +* `ProcessProposal` will be called exactly once at all processes except the proposer of round 0, and + will return _accept_ in its `Response*`; +* `ExtendVote` will be called exactly once at all processes +* `VerifyVoteExtension` will be called _n-1_ times at each validator process, where _n_ is the number of validators; and +* `FinalizeBlock` will be finally called at all processes at the end of height _h_, conveying the same prepared + block that all calls to `PrepareProposal` and `ProcessProposal` had previously reported for height _h_. + +However, the Application logic must be ready to cope with any possible run of Tendermint for a given +height, including bad periods (byzantine proposers, network being asynchronous). +In these cases, the sequence of calls to ABCI++ methods may not be so straighforward, but +the Application should still be able to handle them, e.g., without crashing. +The purpose of this section is to define what these sequences look like an a precise way. + +As mentioned in the [Basic Concepts](abci++_basic_concepts_002_draft.md) section, Tendermint +acts as a client of ABCI++ and the Application acts as a server. Thus, it is up to Tendermint to +determine when and in which order the different ABCI++ methods will be called. A well-written +Application design should consider _any_ of these possible sequences. + +The following grammar, written in case-sensitive Augmented Backus–Naur form (ABNF, specified +in [IETF rfc7405](https://datatracker.ietf.org/doc/html/rfc7405)), specifies all possible +sequences of calls to ABCI++, taken by a correct process, across all heights from the genesis block, +including recovery runs, from the point of view of the Application. + +```abnf +start = clean-start / recovery + +clean-start = init-chain [state-sync] consensus-exec +state-sync = *state-sync-attempt success-sync info +state-sync-attempt = offer-snapshot *apply-chunk +success-sync = offer-snapshot 1*apply-chunk + +recovery = info *consensus-replay consensus-exec +consensus-replay = decide + +consensus-exec = (inf)consensus-height +consensus-height = *consensus-round decide +consensus-round = proposer / non-proposer + +proposer = prepare-proposal extend-proposer +extend-proposer = *got-vote [extend-vote] *got-vote + +non-proposer = *got-vote [extend-non-proposer] *got-vote +extend-non-proposer = process-proposal *got-vote [extend-vote] + +init-chain = %s"" +offer-snapshot = %s"" +apply-chunk = %s"" +info = %s"" +prepare-proposal = %s"" +process-proposal = %s"" +extend-vote = %s"" +got-vote = %s"" +decide = %s"" +``` + +>**TODO** Still hesitating... introduce _n_ as total number of validators, so that we can bound the occurrences of +>`got-vote` in a round. + +We have kept some of the ABCI++ methods out of the grammar, in order to keep it as clear and concise as possible. +A common reason for keeping all these methods out is that they all can be called at any point in a sequence defined +by the grammar above. Other reasons depend on the method in question: + +* `Echo` and `Flush` are only used for debugging purposes. Further, their handling by the Application should be trivial. +* `CheckTx` is detached from the main method call sequence that drives block execution. +* `Query` provides read-only access to the current Application state, so handling it should also be independent from + block execution. +* Similarly, `ListSnapshots` and `LoadSnapshotChunk` provide read-only access to the Application's previously created + snapshots (if any), and help populate the parameters of `OfferSnapshot` and `ApplySnapshotChunk` at a process performing + state-sync while bootstrapping. Unlike `ListSnapshots` and `LoadSnapshotChunk`, both `OfferSnapshot` + and `ApplySnapshotChunk` _are_ included in the grammar. + +Finally, method `Info` is a special case. The method's purpose is three-fold, it can be used + +1. as part of handling an RPC call from an external client, +2. as a handshake between Tendermint and the Application upon recovery to check whether any blocks need + to be replayed, and +3. at the end of _state-sync_ to verify that the correct state has been reached. + +We have left `Info`'s first purpose out of the grammar for the same reasons as all the others: it can happen +at any time, and has nothing to do with the block execution sequence. The second and third purposes, on the other +hand, are present in the grammar. + +Let us now examine the grammar line by line, providing further details. + +* When a process starts, it may do so for the first time or after a crash (it is recovering). + +>```abnf +>start = clean-start / recovery +>``` + +* If the process is starting from scratch, Tendermint first calls `InitChain`, then it may optionally + start a _state-sync_ mechanism to catch up with other processes. Finally, it enters normal + consensus execution. + +>```abnf +>clean-start = init-chain [state-sync] consensus-exec +>``` + +* In _state-sync_ mode, Tendermint makes one or more attempts at synchronizing the Application's state. + At the beginning of each attempt, it offers the Application a snapshot found at another process. + If the Application accepts the snapshop, at sequence of calls to `ApplySnapshotChunk` method follow + to provide the Application with all the snapshots needed, in order to reconstruct the state locally. + A successful attempt must provide at least one chunk via `ApplySnapshotChunk`. + At the end of a successful attempt, Tendermint calls `Info` to make sure the recontructed state's + _AppHash_ matches the one in the block header at the corresponding height. + +>```abnf +>state-sync = *state-sync-attempt success-sync info +>state-sync-attempt = offer-snapshot *apply-chunk +>success-sync = offer-snapshot 1*apply-chunk +>``` + +* In recovery mode, Tendermint first calls `Info` to know from which height it needs to replay decisions + to the Application. To replay a decision, Tendermint simply calls `FinalizeBlock` with the decided + block at that height. After this, Tendermint enters nomal consensus execution. + +>```abnf +>recovery = info *consensus-replay consensus-exec +>consensus-replay = decide +>``` + +* The non-terminal `consensus-exec` is a key point in this grammar. It is an infinite sequence of + consensus heights. The grammar is thus an + [omega-grammar](https://dl.acm.org/doi/10.5555/2361476.2361481), since it produces infinite + sequences of terminals (i.e., the API calls). + +>```abnf +>consensus-exec = (inf)consensus-height +>``` + +* A consensus height consists of zero or more rounds before deciding via a call to `FinalizeBlock`. + In each round, the sequence of method calls depends on whether the local process is the proposer or not. + +>```abnf +>consensus-height = *consensus-round decide +>consensus-round = proposer / non-proposer +>``` + +* If the local process is the proposer of the current round, Tendermint starts by calling `PrepareProposal`. + No calls to methods related to vote extensions (`ExtendVote`, `VerifyVoteExtension`) can be called + in the present round before `PrepareProposal`. Once `PrepareProposal` is called, calls to + `ExtendVote` and `VerifyVoteExtension` can come in any order, although the former will be called + at most once in this round. + +>```abnf +>proposer = prepare-proposal extend-proposer +>extend-proposer = *got-vote [extend-vote] *got-vote +>``` + +* If the local process is not the proposer of the current round, Tendermint will call `ProcessProposal` + at most once. At most one call to `ExtendVote` can occur only after `ProcessProposal` is called. + A number of calls to `VerifyVoteExtension` can occur in any order with respect to `ProcessProposal` + and `ExtendVote` throughout the round. + +>```abnf +>non-proposer = *got-vote [extend-non-proposer] *got-vote +>extend-non-proposer = process-proposal *got-vote [extend-vote] +>``` + +* Finally, the grammar describes all its terminal symbols, which denote the different ABCI++ method calls that + may appear in a sequence. + +>```abnf +>init-chain = %s"" +>offer-snapshot = %s"" +>apply-chunk = %s"" +>info = %s"" +>prepare-proposal = %s"" +>process-proposal = %s"" +>extend-vote = %s"" +>got-vote = %s"" +>decide = %s"" +>``` + +## Adapting existing Applications that use ABCI + +In some cases, an existing Application using the legacy ABCI may need to be adapted to work with ABCI++ +with as minimal changes as possible. In this case, of course, ABCI++ will not provide any advange with respect +to the existing implementation, but will keep the same guarantees already provided by ABCI. +Here is how ABCI++ methods should be implemented. + +First of all, all the methods that did not change from ABCI to ABCI++, namely `Echo`, `Flush`, `Info`, `InitChain`, +`Query`, `CheckTx`, `ListSnapshots`, `LoadSnapshotChunk`, `OfferSnapshot`, and `ApplySnapshotChunk`, do not need +to undergo any changes in their implementation. + +As for the new methods: + +* `PrepareProposal` must create a list of [TxRecord](./abci++_methods_002_draft.md#txrecord) each containing a + transaction passed in `RequestPrepareProposal.txs`, in the same other. The field `action` must be set to `UNMODIFIED` + for all [TxRecord](./abci++_methods_002_draft.md#txrecord) elements in the list. + The Application must check whether the size of all transactions exceeds the byte limit + (`RequestPrepareProposal.max_tx_bytes`). If so, the Application must remove transactions at the end of the list + until the total byte size is at or below the limit. +* `ProcessProposal` must set `ResponseProcessProposal.accept` to _true_ and return. +* `ExtendVote` is to set `ResponseExtendVote.extension` to an empty byte array and return. +* `VerifyVoteExtension` must set `ResponseVerifyVoteExtension.accept` to _true_ if the extension is an empty byte array + and _false_ otherwise, then return. +* `FinalizeBlock` is to coalesce the implementation of methods `BeginBlock`, `DeliverTx`, `EndBlock`, and `Commit`. + Legacy applications looking to reuse old code that implemented `DeliverTx` should wrap the legacy + `DeliverTx` logic in a loop that executes one transaction iteration per + transaction in `RequestFinalizeBlock.tx`. diff --git a/spec/abci++/v0.md b/spec/abci++/v0.md new file mode 100644 index 0000000000..163b3f7cbe --- /dev/null +++ b/spec/abci++/v0.md @@ -0,0 +1,156 @@ +# Tendermint v0 Markdown pseudocode + +This translates the latex code for Tendermint consensus from the Tendermint paper into markdown. + +### Initialization + +```go +h_p ← 0 +round_p ← 0 +step_p is one of {propose, prevote, precommit} +decision_p ← Vector() +lockedRound_p ← -1 +lockedValue_p ← nil +validValue_p ← nil +validRound_p ← -1 +``` + +### StartRound(round) + +```go +function startRound(round) { + round_p ← round + step_p ← propose + if proposer(h_p, round_p) = p { + if validValue_p != nil { + proposal ← validValue_p + } else { + proposal ← getValue() + } + broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ + } else { + schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) + } +} +``` + +### ReceiveProposal + +In the case where the local node is not locked on any round, the following is ran: + +```go +upon ⟨PROPOSAL, h_p, round_p, v, −1) from proposer(h_p, round_p) while step_p = propose do { + if valid(v) ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { + broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ + } else { + broadcast ⟨PREVOTE, h_p, round_p, nil⟩ + } + step_p ← prevote +} +``` + +In the case where the node is locked on a round, the following is ran: + +```go +upon ⟨PROPOSAL, h_p, round_p, v, vr⟩ from proposer(h_p, round_p) + AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ + while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { + if valid(v) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { + broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ + } else { + broadcast ⟨PREVOTE, h_p, round_p, nil⟩ + } + step_p ← prevote +} +``` + +### Prevote timeout + +Upon receiving 2f + 1 prevotes, setup a timeout. + +```go +upon 2f + 1 ⟨PREVOTE, h_p, vr, *⟩ with step_p = prevote for the first time, do { + schedule OnTimeoutPrevote(h_p, round_p) to be executed after timeoutPrevote(round_p) +} +``` + +with OnTimeoutPrevote defined as: + +```go +function OnTimeoutPrevote(height, round) { + if (height = h_p && round = round_p && step_p = prevote) { + broadcast ⟨PRECOMMIT, h_p, round_p, nil⟩ + step_p ← precommit + } +} +``` + +### Receiving enough prevotes to precommit + +The following code is ran upon receiving 2f + 1 prevotes for the same block + +```go +upon ⟨PROPOSAL, h_p, round_p, v, *⟩ + from proposer(h_p, round_p) + AND 2f + 1 ⟨PREVOTE, h_p, round_p, id(v)⟩ + while valid(v) ∧ step_p >= prevote for the first time do { + if (step_p = prevote) { + lockedValue_p ← v + lockedRound_p ← round_p + broadcast ⟨PRECOMMIT, h_p, round_p, id(v)⟩ + step_p ← precommit + } + validValue_p ← v + validRound_p ← round_p +} +``` + +And upon receiving 2f + 1 prevotes for nil: + +```go +upon 2f + 1 ⟨PREVOTE, h_p, round_p, nil⟩ + while step_p = prevote do { + broadcast ⟨PRECOMMIT, h_p, round_p, nil⟩ + step_p ← precommit +} +``` + +### Precommit timeout + +Upon receiving 2f + 1 precommits, setup a timeout. + +```go +upon 2f + 1 ⟨PRECOMMIT, h_p, vr, *⟩ for the first time, do { + schedule OnTimeoutPrecommit(h_p, round_p) to be executed after timeoutPrecommit(round_p) +} +``` + +with OnTimeoutPrecommit defined as: + +```go +function OnTimeoutPrecommit(height, round) { + if (height = h_p && round = round_p) { + StartRound(round_p + 1) + } +} +``` + +### Upon Receiving 2f + 1 precommits + +The following code is ran upon receiving 2f + 1 precommits for the same block + +```go +upon ⟨PROPOSAL, h_p, r, v, *⟩ + from proposer(h_p, r) + AND 2f + 1 ⟨ PRECOMMIT, h_p, r, id(v)⟩ + while decision_p[h_p] = nil do { + if (valid(v)) { + decision_p[h_p] ← v + h_p ← h_p + 1 + reset lockedRound_p, lockedValue_p,validRound_p and validValue_p to initial values + StartRound(0) + } +} +``` + +If we don't see 2f + 1 precommits for the same block, we wait until we get 2f + 1 precommits, and the timeout occurs. \ No newline at end of file diff --git a/spec/abci++/v1.md b/spec/abci++/v1.md new file mode 100644 index 0000000000..96dc8e674a --- /dev/null +++ b/spec/abci++/v1.md @@ -0,0 +1,162 @@ +# Tendermint v1 Markdown pseudocode + +This adds hooks for the existing ABCI to the prior pseudocode + +### Initialization + +```go +h_p ← 0 +round_p ← 0 +step_p is one of {propose, prevote, precommit} +decision_p ← Vector() +lockedValue_p ← nil +validValue_p ← nil +validRound_p ← -1 +``` + +### StartRound(round) + +```go +function startRound(round) { + round_p ← round + step_p ← propose + if proposer(h_p, round_p) = p { + if validValue_p != nil { + proposal ← validValue_p + } else { + txdata ← mempool.GetBlock() + // getBlockProposal fills in header + proposal ← getBlockProposal(txdata) + } + broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ + } else { + schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) + } +} +``` + +### ReceiveProposal + +In the case where the local node is not locked on any round, the following is ran: + +```go +upon ⟨PROPOSAL, h_p, round_p, v, −1) from proposer(h_p, round_p) while step_p = propose do { + if valid(v) ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { + broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ + } else { + broadcast ⟨PREVOTE, h_p, round_p, nil⟩ + } + step_p ← prevote +} +``` + +In the case where the node is locked on a round, the following is ran: + +```go +upon ⟨PROPOSAL, h_p, round_p, v, vr⟩ + from proposer(h_p, round_p) + AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ + while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { + if valid(v) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { + broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ + } else { + broadcast ⟨PREVOTE, h_p, round_p, nil⟩ + } + step_p ← prevote +} +``` + +### Prevote timeout + +Upon receiving 2f + 1 prevotes, setup a timeout. + +```go +upon 2f + 1 ⟨PREVOTE, h_p, vr, -1⟩ + with step_p = prevote for the first time, do { + schedule OnTimeoutPrevote(h_p, round_p) to be executed after timeoutPrevote(round_p) +} +``` + +with OnTimeoutPrevote defined as: + +```go +function OnTimeoutPrevote(height, round) { + if (height = h_p && round = round_p && step_p = prevote) { + broadcast ⟨PRECOMMIT, h_p, round_p, nil⟩ + step_p ← precommit + } +} +``` + +### Receiving enough prevotes to precommit + +The following code is ran upon receiving 2f + 1 prevotes for the same block + +```go +upon ⟨PROPOSAL, h_p, round_p, v, *⟩ + from proposer(h_p, round_p) + AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ + while valid(v) ∧ step_p >= prevote for the first time do { + if (step_p = prevote) { + lockedValue_p ← v + lockedRound_p ← round_p + broadcast ⟨PRECOMMIT, h_p, round_p, id(v)⟩ + step_p ← precommit + } + validValue_p ← v + validRound_p ← round_p +} +``` + +And upon receiving 2f + 1 prevotes for nil: + +```go +upon 2f + 1 ⟨PREVOTE, h_p, round_p, nil⟩ + while step_p = prevote do { + broadcast ⟨PRECOMMIT, h_p, round_p, nil⟩ + step_p ← precommit +} +``` + +### Precommit timeout + +Upon receiving 2f + 1 precommits, setup a timeout. + +```go +upon 2f + 1 ⟨PRECOMMIT, h_p, vr, *⟩ for the first time, do { + schedule OnTimeoutPrecommit(h_p, round_p) to be executed after timeoutPrecommit(round_p) +} +``` + +with OnTimeoutPrecommit defined as: + +```go +function OnTimeoutPrecommit(height, round) { + if (height = h_p && round = round_p) { + StartRound(round_p + 1) + } +} +``` + +### Upon Receiving 2f + 1 precommits + +The following code is ran upon receiving 2f + 1 precommits for the same block + +```go +upon ⟨PROPOSAL, h_p, r, v, *⟩ + from proposer(h_p, r) + AND 2f + 1 ⟨ PRECOMMIT, h_p, r, id(v)⟩ + while decision_p[h_p] = nil do { + if (valid(v)) { + decision_p[h_p] ← v + h_p ← h_p + 1 + reset lockedRound_p, lockedValue_p,validRound_p and validValue_p to initial values + ABCI.BeginBlock(v.header) + ABCI.DeliverTxs(v.data) + ABCI.EndBlock() + StartRound(0) + } +} +``` + +If we don't see 2f + 1 precommits for the same block, we wait until we get 2f + 1 precommits, and the timeout occurs. \ No newline at end of file diff --git a/spec/abci++/v2.md b/spec/abci++/v2.md new file mode 100644 index 0000000000..1abd8ec670 --- /dev/null +++ b/spec/abci++/v2.md @@ -0,0 +1,180 @@ +# Tendermint v2 Markdown pseudocode + +This adds a single-threaded implementation of ABCI++, +with no optimization for splitting out verifying the header and verifying the proposal. + +### Initialization + +```go +h_p ← 0 +round_p ← 0 +step_p is one of {propose, prevote, precommit} +decision_p ← Vector() +lockedValue_p ← nil +validValue_p ← nil +validRound_p ← -1 +``` + +### StartRound(round) + +```go +function startRound(round) { + round_p ← round + step_p ← propose + if proposer(h_p, round_p) = p { + if validValue_p != nil { + proposal ← validValue_p + } else { + txdata ← mempool.GetBlock() + // getUnpreparedBlockProposal takes tx data, and fills in the unprepared header data + unpreparedProposal ← getUnpreparedBlockProposal(txdata) + // ABCI++: the proposer may reorder/update transactions in `unpreparedProposal` + proposal ← ABCI.PrepareProposal(unpreparedProposal) + } + broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ + } else { + schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) + } +} +``` + +### ReceiveProposal + +In the case where the local node is not locked on any round, the following is ran: + +```go +upon ⟨PROPOSAL, h_p, round_p, v, −1) from proposer(h_p, round_p) while step_p = propose do { + if valid(v) ∧ ABCI.ProcessProposal(h_p, v).accept ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { + broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ + } else { + broadcast ⟨PREVOTE, h_p, round_p, nil⟩ + // Include any slashing evidence that may be sent in the process proposal response + for evidence in ABCI.ProcessProposal(h_p, v).evidence_list { + broadcast ⟨EVIDENCE, evidence⟩ + } + } + step_p ← prevote +} +``` + +In the case where the node is locked on a round, the following is ran: + +```go +upon ⟨PROPOSAL, h_p, round_p, v, vr⟩ + from proposer(h_p, round_p) + AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ + while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { + if valid(v) ∧ ABCI.ProcessProposal(h_p, v).accept ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { + broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ + } else { + broadcast ⟨PREVOTE, h_p, round_p, nil⟩ + // Include any slashing evidence that may be sent in the process proposal response + for evidence in ABCI.ProcessProposal(h_p, v).evidence_list { + broadcast ⟨EVIDENCE, evidence⟩ + } + } + step_p ← prevote +} +``` + +### Prevote timeout + +Upon receiving 2f + 1 prevotes, setup a timeout. + +```go +upon 2f + 1 ⟨PREVOTE, h_p, vr, -1⟩ + with step_p = prevote for the first time, do { + schedule OnTimeoutPrevote(h_p, round_p) to be executed after timeoutPrevote(round_p) +} +``` + +with OnTimeoutPrevote defined as: + +```go +function OnTimeoutPrevote(height, round) { + if (height = h_p && round = round_p && step_p = prevote) { + precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) + broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ + step_p ← precommit + } +} +``` + +### Receiving enough prevotes to precommit + +The following code is ran upon receiving 2f + 1 prevotes for the same block + +```go +upon ⟨PROPOSAL, h_p, round_p, v, *⟩ + from proposer(h_p, round_p) + AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ + while valid(v) ∧ step_p >= prevote for the first time do { + if (step_p = prevote) { + lockedValue_p ← v + lockedRound_p ← round_p + precommit_extension ← ABCI.ExtendVote(h_p, round_p, id(v)) + broadcast ⟨PRECOMMIT, h_p, round_p, id(v), precommit_extension⟩ + step_p ← precommit + } + validValue_p ← v + validRound_p ← round_p +} +``` + +And upon receiving 2f + 1 prevotes for nil: + +```go +upon 2f + 1 ⟨PREVOTE, h_p, round_p, nil⟩ + while step_p = prevote do { + precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) + broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ + step_p ← precommit +} +``` + +### Upon receiving a precommit + +Upon receiving a precommit `precommit`, we ensure that `ABCI.VerifyVoteExtension(precommit.precommit_extension) = true` +before accepting the precommit. This is akin to how we check the signature on precommits normally, hence its not wrapped +in the syntax of methods from the paper. + +### Precommit timeout + +Upon receiving 2f + 1 precommits, setup a timeout. + +```go +upon 2f + 1 ⟨PRECOMMIT, h_p, vr, *⟩ for the first time, do { + schedule OnTimeoutPrecommit(h_p, round_p) to be executed after timeoutPrecommit(round_p) +} +``` + +with OnTimeoutPrecommit defined as: + +```go +function OnTimeoutPrecommit(height, round) { + if (height = h_p && round = round_p) { + StartRound(round_p + 1) + } +} +``` + +### Upon Receiving 2f + 1 precommits + +The following code is ran upon receiving 2f + 1 precommits for the same block + +```go +upon ⟨PROPOSAL, h_p, r, v, *⟩ + from proposer(h_p, r) + AND 2f + 1 ⟨ PRECOMMIT, h_p, r, id(v)⟩ + while decision_p[h_p] = nil do { + if (valid(v)) { + decision_p[h_p] ← v + h_p ← h_p + 1 + reset lockedRound_p, lockedValue_p,validRound_p and validValue_p to initial values + ABCI.FinalizeBlock(id(v)) + StartRound(0) + } +} +``` + +If we don't see 2f + 1 precommits for the same block, we wait until we get 2f + 1 precommits, and the timeout occurs. diff --git a/spec/abci++/v3.md b/spec/abci++/v3.md new file mode 100644 index 0000000000..ed4c720b4e --- /dev/null +++ b/spec/abci++/v3.md @@ -0,0 +1,201 @@ +# Tendermint v3 Markdown pseudocode + +This is a single-threaded implementation of ABCI++, +with an optimization for the ProcessProposal phase. +Namely, processing of the header and the block data is separated into two different functions. + +### Initialization + +```go +h_p ← 0 +round_p ← 0 +step_p is one of {propose, prevote, precommit} +decision_p ← Vector() +lockedValue_p ← nil +validValue_p ← nil +validRound_p ← -1 +``` + +### StartRound(round) + +```go +function startRound(round) { + round_p ← round + step_p ← propose + if proposer(h_p, round_p) = p { + if validValue_p != nil { + proposal ← validValue_p + } else { + txdata ← mempool.GetBlock() + // getUnpreparedBlockProposal fills in header + unpreparedProposal ← getUnpreparedBlockProposal(txdata) + proposal ← ABCI.PrepareProposal(unpreparedProposal) + } + broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ + } else { + schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) + } +} +``` + +### ReceiveProposal + +In the case where the local node is not locked on any round, the following is ran: + +```go +upon ⟨PROPOSAL, h_p, round_p, v_header, −1) from proposer(h_p, round_p) while step_p = propose do { + prevote_nil ← false + // valid is Tendermints validation, ABCI.VerifyHeader is the applications + if valid(v_header) ∧ ABCI.VerifyHeader(h_p, v_header) ∧ (lockedRound_p = −1 ∨ lockedValue_p = id(v_header)) { + wait to receive proposal v corresponding to v_header + // We split up the app's header verification from the remainder of its processing of the proposal + if ABCI.ProcessProposal(h_p, v).accept { + broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ + } else { + prevote_nil ← true + // Include any slashing evidence that may be sent in the process proposal response + for evidence in ABCI.ProcessProposal(h_p, v).evidence_list { + broadcast ⟨EVIDENCE, evidence⟩ + } + } + } else { + prevote_nil ← true + } + if prevote_nil { + broadcast ⟨PREVOTE, h_p, round_p, nil⟩ + } + step_p ← prevote +} +``` + +In the case where the node is locked on a round, the following is ran: + +```go +upon ⟨PROPOSAL, h_p, round_p, v_header, vr⟩ + from proposer(h_p, round_p) + AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v_header)⟩ + while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { + prevote_nil ← false + if valid(v) ∧ ABCI.VerifyHeader(h_p, v.header) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { + wait to receive proposal v corresponding to v_header + // We split up the app's header verification from the remainder of its processing of the proposal + if ABCI.ProcessProposal(h_p, v).accept { + broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ + } else { + prevote_nil ← true + // Include any slashing evidence that may be sent in the process proposal response + for evidence in ABCI.ProcessProposal(h_p, v).evidence_list { + broadcast ⟨EVIDENCE, evidence⟩ + } + } + } else { + prevote_nil ← true + } + if prevote_nil { + broadcast ⟨PREVOTE, h_p, round_p, nil⟩ + } + step_p ← prevote +} +``` + +### Prevote timeout + +Upon receiving 2f + 1 prevotes, setup a timeout. + +```go +upon 2f + 1 ⟨PREVOTE, h_p, vr, -1⟩ + with step_p = prevote for the first time, do { + schedule OnTimeoutPrevote(h_p, round_p) to be executed after timeoutPrevote(round_p) +} +``` + +with OnTimeoutPrevote defined as: + +```go +function OnTimeoutPrevote(height, round) { + if (height = h_p && round = round_p && step_p = prevote) { + precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) + broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ + step_p ← precommit + } +} +``` + +### Receiving enough prevotes to precommit + +The following code is ran upon receiving 2f + 1 prevotes for the same block + +```go +upon ⟨PROPOSAL, h_p, round_p, v, *⟩ + from proposer(h_p, round_p) + AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ + while valid(v) ∧ step_p >= prevote for the first time do { + if (step_p = prevote) { + lockedValue_p ← v + lockedRound_p ← round_p + precommit_extension ← ABCI.ExtendVote(h_p, round_p, id(v)) + broadcast ⟨PRECOMMIT, h_p, round_p, id(v), precommit_extension⟩ + step_p ← precommit + } + validValue_p ← v + validRound_p ← round_p +} +``` + +And upon receiving 2f + 1 prevotes for nil: + +```go +upon 2f + 1 ⟨PREVOTE, h_p, round_p, nil⟩ + while step_p = prevote do { + precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) + broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ + step_p ← precommit +} +``` + +### Upon receiving a precommit + +Upon receiving a precommit `precommit`, we ensure that `ABCI.VerifyVoteExtension(precommit.precommit_extension) = true` +before accepting the precommit. This is akin to how we check the signature on precommits normally, hence its not wrapped +in the syntax of methods from the paper. + +### Precommit timeout + +Upon receiving 2f + 1 precommits, setup a timeout. + +```go +upon 2f + 1 ⟨PRECOMMIT, h_p, vr, *⟩ for the first time, do { + schedule OnTimeoutPrecommit(h_p, round_p) to be executed after timeoutPrecommit(round_p) +} +``` + +with OnTimeoutPrecommit defined as: + +```go +function OnTimeoutPrecommit(height, round) { + if (height = h_p && round = round_p) { + StartRound(round_p + 1) + } +} +``` + +### Upon Receiving 2f + 1 precommits + +The following code is ran upon receiving 2f + 1 precommits for the same block + +```go +upon ⟨PROPOSAL, h_p, r, v, *⟩ + from proposer(h_p, r) + AND 2f + 1 ⟨ PRECOMMIT, h_p, r, id(v)⟩ + while decision_p[h_p] = nil do { + if (valid(v)) { + decision_p[h_p] ← v + h_p ← h_p + 1 + reset lockedRound_p, lockedValue_p,validRound_p and validValue_p to initial values + ABCI.FinalizeBlock(id(v)) + StartRound(0) + } +} +``` + +If we don't see 2f + 1 precommits for the same block, we wait until we get 2f + 1 precommits, and the timeout occurs. \ No newline at end of file diff --git a/spec/abci++/v4.md b/spec/abci++/v4.md new file mode 100644 index 0000000000..d211fd87fc --- /dev/null +++ b/spec/abci++/v4.md @@ -0,0 +1,199 @@ +# Tendermint v4 Markdown pseudocode + +This is a multi-threaded implementation of ABCI++, +where ProcessProposal starts when the proposal is received, but ends before precommitting. + +### Initialization + +```go +h_p ← 0 +round_p ← 0 +step_p is one of {propose, prevote, precommit} +decision_p ← Vector() +lockedValue_p ← nil +validValue_p ← nil +validRound_p ← -1 +``` + +### StartRound(round) + +```go +function startRound(round) { + round_p ← round + step_p ← propose + if proposer(h_p, round_p) = p { + if validValue_p != nil { + proposal ← validValue_p + } else { + txdata ← mempool.GetBlock() + // getUnpreparedBlockProposal fills in header + unpreparedProposal ← getUnpreparedBlockProposal(txdata) + proposal ← ABCI.PrepareProposal(unpreparedProposal) + } + broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ + } else { + schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) + } +} +``` + +### ReceiveProposal + +In the case where the local node is not locked on any round, the following is ran: + +```go +upon ⟨PROPOSAL, h_p, round_p, v, −1) from proposer(h_p, round_p) while step_p = propose do { + if valid(v) ∧ ABCI.VerifyHeader(h_p, v.header) ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { + // We fork process proposal into a parallel process + Fork ABCI.ProcessProposal(h_p, v) + broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ + } else { + broadcast ⟨PREVOTE, h_p, round_p, nil⟩ + } + step_p ← prevote +} +``` + +In the case where the node is locked on a round, the following is ran: + +```go +upon ⟨PROPOSAL, h_p, round_p, v, vr⟩ + from proposer(h_p, round_p) + AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ + while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { + if valid(v) ∧ ABCI.VerifyHeader(h_p, v.header) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { + // We fork process proposal into a parallel process + Fork ABCI.ProcessProposal(h_p, v) + broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ + } else { + broadcast ⟨PREVOTE, h_p, round_p, nil⟩ + } + step_p ← prevote +} +``` + +### Prevote timeout + +Upon receiving 2f + 1 prevotes, setup a timeout. + +```go +upon 2f + 1 ⟨PREVOTE, h_p, vr, -1⟩ + with step_p = prevote for the first time, do { + schedule OnTimeoutPrevote(h_p, round_p) to be executed after timeoutPrevote(round_p) +} +``` + +with OnTimeoutPrevote defined as: + +```go +def OnTimeoutPrevote(height, round) { + if (height = h_p && round = round_p && step_p = prevote) { + // Join the ProcessProposal, and output any evidence in case it has some. + processProposalOutput ← Join ABCI.ProcessProposal(h_p, v) + for evidence in processProposalOutput.evidence_list { + broadcast ⟨EVIDENCE, evidence⟩ + } + + precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) + broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ + step_p ← precommit + } +} +``` + +### Receiving enough prevotes to precommit + +The following code is ran upon receiving 2f + 1 prevotes for the same block + +```go +upon ⟨PROPOSAL, h_p, round_p, v, *⟩ + from proposer(h_p, round_p) + AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ +while valid(v) ∧ step_p >= prevote for the first time do { + if (step_p = prevote) { + lockedValue_p ← v + lockedRound_p ← round_p + processProposalOutput ← Join ABCI.ProcessProposal(h_p, v) + // If the proposal is valid precommit as before. + // If it was invalid, precommit nil. + // Note that ABCI.ProcessProposal(h_p, v).accept is deterministic for all honest nodes. + precommit_value ← nil + if processProposalOutput.accept { + precommit_value ← id(v) + } + precommit_extension ← ABCI.ExtendVote(h_p, round_p, precommit_value) + broadcast ⟨PRECOMMIT, h_p, round_p, precommit_value, precommit_extension⟩ + for evidence in processProposalOutput.evidence_list { + broadcast ⟨EVIDENCE, evidence⟩ + } + + step_p ← precommit + } + validValue_p ← v + validRound_p ← round_p +} +``` + +And upon receiving 2f + 1 prevotes for nil: + +```go +upon 2f + 1 ⟨PREVOTE, h_p, round_p, nil⟩ + while step_p = prevote do { + // Join ABCI.ProcessProposal, and broadcast any evidence if it exists. + processProposalOutput ← Join ABCI.ProcessProposal(h_p, v) + for evidence in processProposalOutput.evidence_list { + broadcast ⟨EVIDENCE, evidence⟩ + } + + precommit_extension ← ABCI.ExtendVote(h_p, round_p, nil) + broadcast ⟨PRECOMMIT, h_p, round_p, nil, precommit_extension⟩ + step_p ← precommit +} +``` + +### Upon receiving a precommit + +Upon receiving a precommit `precommit`, we ensure that `ABCI.VerifyVoteExtension(precommit.precommit_extension) = true` +before accepting the precommit. This is akin to how we check the signature on precommits normally, hence its not wrapped +in the syntax of methods from the paper. + +### Precommit timeout + +Upon receiving 2f + 1 precommits, setup a timeout. + +```go +upon 2f + 1 ⟨PRECOMMIT, h_p, vr, *⟩ for the first time, do { + schedule OnTimeoutPrecommit(h_p, round_p) to be executed after timeoutPrecommit(round_p) +} +``` + +with OnTimeoutPrecommit defined as: + +```go +def OnTimeoutPrecommit(height, round) { + if (height = h_p && round = round_p) { + StartRound(round_p + 1) + } +} +``` + +### Upon Receiving 2f + 1 precommits + +The following code is ran upon receiving 2f + 1 precommits for the same block + +```go +upon ⟨PROPOSAL, h_p, r, v, *⟩ + from proposer(h_p, r) + AND 2f + 1 ⟨ PRECOMMIT, h_p, r, id(v)⟩ + while decision_p[h_p] = nil do { + if (valid(v)) { + decision_p[h_p] ← v + h_p ← h_p + 1 + reset lockedRound_p, lockedValue_p,validRound_p and validValue_p to initial values + ABCI.FinalizeBlock(id(v)) + StartRound(0) + } +} +``` + +If we don't see 2f + 1 precommits for the same block, we wait until we get 2f + 1 precommits, and the timeout occurs. \ No newline at end of file diff --git a/spec/abci/README.md b/spec/abci/README.md new file mode 100644 index 0000000000..c356165af4 --- /dev/null +++ b/spec/abci/README.md @@ -0,0 +1,27 @@ +--- +order: 1 +parent: + title: ABCI + order: 2 +--- + +# ABCI + +ABCI stands for "**A**pplication **B**lock**c**hain **I**nterface". +ABCI is the interface between Tendermint (a state-machine replication engine) +and your application (the actual state machine). It consists of a set of +_methods_, each with a corresponding `Request` and `Response`message type. +To perform state-machine replication, Tendermint calls the ABCI methods on the +ABCI application by sending the `Request*` messages and receiving the `Response*` messages in return. + +All ABCI messages and methods are defined in [protocol buffers](../../proto/tendermint/abci/types.proto). +This allows Tendermint to run with applications written in many programming languages. + +This specification is split as follows: + +- [Methods and Types](./abci.md) - complete details on all ABCI methods and + message types +- [Applications](./apps.md) - how to manage ABCI application state and other + details about building ABCI applications +- [Client and Server](./client-server.md) - for those looking to implement their + own ABCI application servers diff --git a/spec/abci/abci.md b/spec/abci/abci.md new file mode 100644 index 0000000000..5d9d59b711 --- /dev/null +++ b/spec/abci/abci.md @@ -0,0 +1,757 @@ +--- +order: 1 +title: Method and Types +--- + +# Methods and Types + +## Connections + +ABCI applications can run either within the _same_ process as the Tendermint +state-machine replication engine, or as a _separate_ process from the state-machine +replication engine. When run within the same process, Tendermint will call the ABCI +application methods directly as Go method calls. + +When Tendermint and the ABCI application are run as separate processes, Tendermint +opens maintains a connection over either a native socket protocol or +gRPC. + +More details on managing state across connections can be found in the +section on [ABCI Applications](apps.md). + +## Errors + +The `Query`, `CheckTx` and `DeliverTx` methods include a `Code` field in their `Response*`. +This field is meant to contain an application-specific response code. +A response code of `0` indicates no error. Any other response code +indicates to Tendermint that an error occurred. + +These methods also return a `Codespace` string to Tendermint. This field is +used to disambiguate `Code` values returned by different domains of the +application. The `Codespace` is a namespace for the `Code`. + +The handling of non-zero response codes by Tendermint is described +below. + +Applications should always terminate if they encounter an issue in a +method where continuing would corrupt their own state, or for which +tendermint should not continue. + +In the Go implementation these methods take a context and may return +an error. The context exists so that applications can terminate +gracefully during shutdown, and the error return value makes it +possible for applications to singal transient errors to Tendermint. + +### CheckTx + +The `CheckTx` ABCI method controls what transactions are considered for inclusion in a block. +When Tendermint receives a `ResponseCheckTx` with a non-zero `Code`, the associated +transaction will be not be added to Tendermint's mempool or it will be removed if +it is already included. + +### DeliverTx + +The `DeliverTx` ABCI method delivers transactions from Tendermint to the application. +When Tendermint recieves a `ResponseDeliverTx` with a non-zero `Code`, the response code is logged. +The transaction was already included in a block, so the `Code` does not influence +Tendermint consensus. + +### Query + +The `Query` ABCI method query queries the application for information about application state. +When Tendermint receives a `ResponseQuery` with a non-zero `Code`, this code is +returned directly to the client that initiated the query. + +## Events + +The `CheckTx`, `BeginBlock`, `DeliverTx`, `EndBlock` methods include an `Events` +field in their `Response*`. Applications may respond to these ABCI methods with a set of events. +Events allow applications to associate metadata about ABCI method execution with the +transactions and blocks this metadata relates to. +Events returned via these ABCI methods do not impact Tendermint consensus in any way +and instead exist to power subscriptions and queries of Tendermint state. + +An `Event` contains a `type` and a list of `EventAttributes`, which are key-value +string pairs denoting metadata about what happened during the method's execution. +`Event` values can be used to index transactions and blocks according to what happened +during their execution. Note that the set of events returned for a block from +`BeginBlock` and `EndBlock` are merged. In case both methods return the same +key, only the value defined in `EndBlock` is used. + +Each event has a `type` which is meant to categorize the event for a particular +`Response*` or `Tx`. A `Response*` or `Tx` may contain multiple events with duplicate +`type` values, where each distinct entry is meant to categorize attributes for a +particular event. Every key and value in an event's attributes must be UTF-8 +encoded strings along with the event type itself. + +```protobuf +message Event { + string type = 1; + repeated EventAttribute attributes = 2; +} +``` + +The attributes of an `Event` consist of a `key`, a `value`, and an `index` flag. The +index flag notifies the Tendermint indexer to index the attribute. The value of +the `index` flag is non-deterministic and may vary across different nodes in the network. + +```protobuf +message EventAttribute { + bytes key = 1; + bytes value = 2; + bool index = 3; // nondeterministic +} +``` + +Example: + +```go + abci.ResponseDeliverTx{ + // ... + Events: []abci.Event{ + { + Type: "validator.provisions", + Attributes: []abci.EventAttribute{ + abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("balance"), Value: []byte("..."), Index: true}, + }, + }, + { + Type: "validator.provisions", + Attributes: []abci.EventAttribute{ + abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: false}, + abci.EventAttribute{Key: []byte("balance"), Value: []byte("..."), Index: false}, + }, + }, + { + Type: "validator.slashed", + Attributes: []abci.EventAttribute{ + abci.EventAttribute{Key: []byte("address"), Value: []byte("..."), Index: false}, + abci.EventAttribute{Key: []byte("amount"), Value: []byte("..."), Index: true}, + abci.EventAttribute{Key: []byte("reason"), Value: []byte("..."), Index: true}, + }, + }, + // ... + }, +} +``` + +## EvidenceType + +Tendermint's security model relies on the use of "evidence". Evidence is proof of +malicious behaviour by a network participant. It is the responsibility of Tendermint +to detect such malicious behaviour. When malicious behavior is detected, Tendermint +will gossip evidence of the behavior to other nodes and commit the evidence to +the chain once it is verified by all validators. This evidence will then be +passed it on to the application through the ABCI. It is the responsibility of the +application to handle the evidence and exercise punishment. + +EvidenceType has the following protobuf format: + +```proto +enum EvidenceType { + UNKNOWN = 0; + DUPLICATE_VOTE = 1; + LIGHT_CLIENT_ATTACK = 2; +} +``` + +There are two forms of evidence: Duplicate Vote and Light Client Attack. More +information can be found in either [data structures](../core/data_structures.md) +or [accountability](../light-client/accountability/README.md) + +## Determinism + +ABCI applications must implement deterministic finite-state machines to be +securely replicated by the Tendermint consensus engine. This means block execution +over the Consensus Connection must be strictly deterministic: given the same +ordered set of requests, all nodes will compute identical responses, for all +BeginBlock, DeliverTx, EndBlock, and Commit. This is critical, because the +responses are included in the header of the next block, either via a Merkle root +or directly, so all nodes must agree on exactly what they are. + +For this reason, it is recommended that applications not be exposed to any +external user or process except via the ABCI connections to a consensus engine +like Tendermint Core. The application must only change its state based on input +from block execution (BeginBlock, DeliverTx, EndBlock, Commit), and not through +any other kind of request. This is the only way to ensure all nodes see the same +transactions and compute the same results. + +If there is some non-determinism in the state machine, consensus will eventually +fail as nodes disagree over the correct values for the block header. The +non-determinism must be fixed and the nodes restarted. + +Sources of non-determinism in applications may include: + +* Hardware failures + * Cosmic rays, overheating, etc. +* Node-dependent state + * Random numbers + * Time +* Underspecification + * Library version changes + * Race conditions + * Floating point numbers + * JSON serialization + * Iterating through hash-tables/maps/dictionaries +* External Sources + * Filesystem + * Network calls (eg. some external REST API service) + +See [#56](https://github.com/tendermint/abci/issues/56) for original discussion. + +Note that some methods (`Query, CheckTx, DeliverTx`) return +explicitly non-deterministic data in the form of `Info` and `Log` fields. The `Log` is +intended for the literal output from the application's logger, while the +`Info` is any additional info that should be returned. These are the only fields +that are not included in block header computations, so we don't need agreement +on them. All other fields in the `Response*` must be strictly deterministic. + +## Block Execution + +The first time a new blockchain is started, Tendermint calls +`InitChain`. From then on, the following sequence of methods is executed for each +block: + +`BeginBlock, [DeliverTx], EndBlock, Commit` + +where one `DeliverTx` is called for each transaction in the block. +The result is an updated application state. +Cryptographic commitments to the results of DeliverTx, EndBlock, and +Commit are included in the header of the next block. + +## State Sync + +State sync allows new nodes to rapidly bootstrap by discovering, fetching, and applying +state machine snapshots instead of replaying historical blocks. For more details, see the +[state sync section](../p2p/messages/state-sync.md). + +New nodes will discover and request snapshots from other nodes in the P2P network. +A Tendermint node that receives a request for snapshots from a peer will call +`ListSnapshots` on its application to retrieve any local state snapshots. After receiving + snapshots from peers, the new node will offer each snapshot received from a peer +to its local application via the `OfferSnapshot` method. + +Snapshots may be quite large and are thus broken into smaller "chunks" that can be +assembled into the whole snapshot. Once the application accepts a snapshot and +begins restoring it, Tendermint will fetch snapshot "chunks" from existing nodes. +The node providing "chunks" will fetch them from its local application using +the `LoadSnapshotChunk` method. + +As the new node receives "chunks" it will apply them sequentially to the local +application with `ApplySnapshotChunk`. When all chunks have been applied, the application +`AppHash` is retrieved via an `Info` query. The `AppHash` is then compared to +the blockchain's `AppHash` which is verified via [light client verification](../light-client/verification/README.md). + +## Messages + +### Echo + +* **Request**: + * `Message (string)`: A string to echo back +* **Response**: + * `Message (string)`: The input string +* **Usage**: + * Echo a string to test an abci client/server implementation + +### Flush + +* **Usage**: + * Signals that messages queued on the client should be flushed to + the server. It is called periodically by the client + implementation to ensure asynchronous requests are actually + sent, and is called immediately to make a synchronous request, + which returns when the Flush response comes back. + +### Info + +* **Request**: + + | Name | Type | Description | Field Number | + |---------------|--------|------------------------------------------|--------------| + | version | string | The Tendermint software semantic version | 1 | + | block_version | uint64 | The Tendermint Block Protocol version | 2 | + | p2p_version | uint64 | The Tendermint P2P Protocol version | 3 | + | abci_version | string | The Tendermint ABCI semantic version | 4 | + +* **Response**: + + | Name | Type | Description | Field Number | + |---------------------|--------|--------------------------------------------------|--------------| + | data | string | Some arbitrary information | 1 | + | version | string | The application software semantic version | 2 | + | app_version | uint64 | The application protocol version | 3 | + | last_block_height | int64 | Latest block for which the app has called Commit | 4 | + | last_block_app_hash | bytes | Latest result of Commit | 5 | + +* **Usage**: + * Return information about the application state. + * Used to sync Tendermint with the application during a handshake + that happens on startup. + * The returned `app_version` will be included in the Header of every block. + * Tendermint expects `last_block_app_hash` and `last_block_height` to + be updated during `Commit`, ensuring that `Commit` is never + called twice for the same block height. + +> Note: Semantic version is a reference to [semantic versioning](https://semver.org/). Semantic versions in info will be displayed as X.X.x. + +### InitChain + +* **Request**: + + | Name | Type | Description | Field Number | + |------------------|--------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------|--------------| + | time | [google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) | Genesis time | 1 | + | chain_id | string | ID of the blockchain. | 2 | + | consensus_params | [ConsensusParams](#consensusparams) | Initial consensus-critical parameters. | 3 | + | validators | repeated [ValidatorUpdate](#validatorupdate) | Initial genesis validators, sorted by voting power. | 4 | + | app_state_bytes | bytes | Serialized initial application state. JSON bytes. | 5 | + | initial_height | int64 | Height of the initial block (typically `1`). | 6 | + +* **Response**: + + | Name | Type | Description | Field Number | + |------------------|----------------------------------------------|-------------------------------------------------|--------------| + | consensus_params | [ConsensusParams](#consensusparams) | Initial consensus-critical parameters (optional | 1 | + | validators | repeated [ValidatorUpdate](#validatorupdate) | Initial validator set (optional). | 2 | + | app_hash | bytes | Initial application hash. | 3 | + +* **Usage**: + * Called once upon genesis. + * If ResponseInitChain.Validators is empty, the initial validator set will be the RequestInitChain.Validators + * If ResponseInitChain.Validators is not empty, it will be the initial + validator set (regardless of what is in RequestInitChain.Validators). + * This allows the app to decide if it wants to accept the initial validator + set proposed by tendermint (ie. in the genesis file), or if it wants to use + a different one (perhaps computed based on some application specific + information in the genesis file). + +### Query + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|--------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | data | bytes | Raw query bytes. Can be used with or in lieu of Path. | 1 | + | path | string | Path field of the request URI. Can be used with or in lieu of `data`. Apps MUST interpret `/store` as a query by key on the underlying store. The key SHOULD be specified in the `data` field. Apps SHOULD allow queries over specific types like `/accounts/...` or `/votes/...` | 2 | + | height | int64 | The block height for which you want the query (default=0 returns data for the latest committed block). Note that this is the height of the block containing the application's Merkle root hash, which represents the state as it was after committing the block at Height-1 | 3 | + | prove | bool | Return Merkle proof with response if possible | 4 | + +* **Response**: + + | Name | Type | Description | Field Number | + |-----------|-----------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | code | uint32 | Response code. | 1 | + | log | string | The output of the application's logger. **May be non-deterministic.** | 3 | + | info | string | Additional information. **May be non-deterministic.** | 4 | + | index | int64 | The index of the key in the tree. | 5 | + | key | bytes | The key of the matching data. | 6 | + | value | bytes | The value of the matching data. | 7 | + | proof_ops | [ProofOps](#proofops) | Serialized proof for the value data, if requested, to be verified against the `app_hash` for the given Height. | 8 | + | height | int64 | The block height from which data was derived. Note that this is the height of the block containing the application's Merkle root hash, which represents the state as it was after committing the block at Height-1 | 9 | + | codespace | string | Namespace for the `code`. | 10 | + +* **Usage**: + * Query for data from the application at current or past height. + * Optionally return Merkle proof. + * Merkle proof includes self-describing `type` field to support many types + of Merkle trees and encoding formats. + +### BeginBlock + +* **Request**: + + | Name | Type | Description | Field Number | + |----------------------|---------------------------------------------|-------------------------------------------------------------------------------------------------------------------|--------------| + | hash | bytes | The block's hash. This can be derived from the block header. | 1 | + | header | [Header](../core/data_structures.md#header) | The block header. | 2 | + | last_commit_info | [LastCommitInfo](#lastcommitinfo) | Info about the last commit, including the round, and the list of validators and which ones signed the last block. | 3 | + | byzantine_validators | repeated [Evidence](#evidence) | List of evidence of validators that acted maliciously. | 4 | + +* **Response**: + + | Name | Type | Description | Field Number | + |--------|---------------------------|-------------------------------------|--------------| + | events | repeated [Event](#events) | type & Key-Value events for indexing | 1 | + +* **Usage**: + * Signals the beginning of a new block. + * Called prior to any `DeliverTx` method calls. + * The header contains the height, timestamp, and more - it exactly matches the + Tendermint block header. We may seek to generalize this in the future. + * The `LastCommitInfo` and `ByzantineValidators` can be used to determine + rewards and punishments for the validators. + +### CheckTx + +* **Request**: + + | Name | Type | Description | Field Number | + |------|-------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | tx | bytes | The request transaction bytes | 1 | + | type | CheckTxType | One of `CheckTx_New` or `CheckTx_Recheck`. `CheckTx_New` is the default and means that a full check of the tranasaction is required. `CheckTx_Recheck` types are used when the mempool is initiating a normal recheck of a transaction. | 2 | + +* **Response**: + + | Name | Type | Description | Field Number | + |------------|---------------------------|-----------------------------------------------------------------------|--------------| + | code | uint32 | Response code. | 1 | + | data | bytes | Result bytes, if any. | 2 | + | log | string | The output of the application's logger. **May be non-deterministic.** | 3 | + | info | string | Additional information. **May be non-deterministic.** | 4 | + | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | + | gas_used | int64 | Amount of gas consumed by transaction. | 6 | + | events | repeated [Event](#events) | Type & Key-Value events for indexing transactions (eg. by account). | 7 | + | codespace | string | Namespace for the `code`. | 8 | + | sender | string | The transaction's sender (e.g. the signer) | 9 | + | priority | int64 | The transaction's priority (for mempool ordering) | 10 | + +* **Usage**: + + * Technically optional - not involved in processing blocks. + * Guardian of the mempool: every node runs `CheckTx` before letting a + transaction into its local mempool. + * The transaction may come from an external user or another node + * `CheckTx` validates the transaction against the current state of the application, + for example, checking signatures and account balances, but does not apply any + of the state changes described in the transaction. + not running code in a virtual machine. + * Transactions where `ResponseCheckTx.Code != 0` will be rejected - they will not be broadcast to + other nodes or included in a proposal block. + * Tendermint attributes no other value to the response code + +### DeliverTx + +* **Request**: + + | Name | Type | Description | Field Number | + |------|-------|--------------------------------|--------------| + | tx | bytes | The request transaction bytes. | 1 | + +* **Response**: + + | Name | Type | Description | Field Number | + |------------|---------------------------|-----------------------------------------------------------------------|--------------| + | code | uint32 | Response code. | 1 | + | data | bytes | Result bytes, if any. | 2 | + | log | string | The output of the application's logger. **May be non-deterministic.** | 3 | + | info | string | Additional information. **May be non-deterministic.** | 4 | + | gas_wanted | int64 | Amount of gas requested for transaction. | 5 | + | gas_used | int64 | Amount of gas consumed by transaction. | 6 | + | events | repeated [Event](#events) | Type & Key-Value events for indexing transactions (eg. by account). | 7 | + | codespace | string | Namespace for the `code`. | 8 | + +* **Usage**: + * [**Required**] The core method of the application. + * When `DeliverTx` is called, the application must execute the transaction in full before returning control to Tendermint. + * `ResponseDeliverTx.Code == 0` only if the transaction is fully valid. + +### EndBlock + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|-------|------------------------------------|--------------| + | height | int64 | Height of the block just executed. | 1 | + +* **Response**: + + | Name | Type | Description | Field Number | + |-------------------------|----------------------------------------------|-----------------------------------------------------------------|--------------| + | validator_updates | repeated [ValidatorUpdate](#validatorupdate) | Changes to validator set (set voting power to 0 to remove). | 1 | + | consensus_param_updates | [ConsensusParams](#consensusparams) | Changes to consensus-critical time, size, and other parameters. | 2 | + | events | repeated [Event](#events) | Type & Key-Value events for indexing | 3 | + +* **Usage**: + * Signals the end of a block. + * Called after all the transactions for the current block have been delivered, prior to the block's `Commit` message. + * Optional `validator_updates` triggered by block `H`. These updates affect validation + for blocks `H+1`, `H+2`, and `H+3`. + * Heights following a validator update are affected in the following way: + * `H+1`: `NextValidatorsHash` includes the new `validator_updates` value. + * `H+2`: The validator set change takes effect and `ValidatorsHash` is updated. + * `H+3`: `LastCommitInfo` is changed to include the altered validator set. + * `consensus_param_updates` returned for block `H` apply to the consensus + params for block `H+1`. For more information on the consensus parameters, + see the [application spec entry on consensus parameters](./apps.md#consensus-parameters). + +### Commit + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|-------|------------------------------------|--------------| + + Commit signals the application to persist application state. It takes no parameters. +* **Response**: + + | Name | Type | Description | Field Number | + |---------------|-------|------------------------------------------------------------------------|--------------| + | data | bytes | The Merkle root hash of the application state. | 2 | + | retain_height | int64 | Blocks below this height may be removed. Defaults to `0` (retain all). | 3 | + +* **Usage**: + * Signal the application to persist the application state. + * Return an (optional) Merkle root hash of the application state + * `ResponseCommit.Data` is included as the `Header.AppHash` in the next block + * it may be empty + * Later calls to `Query` can return proofs about the application state anchored + in this Merkle root hash + * Note developers can return whatever they want here (could be nothing, or a + constant string, etc.), so long as it is deterministic - it must not be a + function of anything that did not come from the + BeginBlock/DeliverTx/EndBlock methods. + * Use `RetainHeight` with caution! If all nodes in the network remove historical + blocks then this data is permanently lost, and no new nodes will be able to + join the network and bootstrap. Historical blocks may also be required for + other purposes, e.g. auditing, replay of non-persisted heights, light client + verification, and so on. + +### ListSnapshots + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|-------|------------------------------------|--------------| + + Empty request asking the application for a list of snapshots. + +* **Response**: + + | Name | Type | Description | Field Number | + |-----------|--------------------------------|--------------------------------|--------------| + | snapshots | repeated [Snapshot](#snapshot) | List of local state snapshots. | 1 | + +* **Usage**: + * Used during state sync to discover available snapshots on peers. + * See `Snapshot` data type for details. + +### LoadSnapshotChunk + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|--------|-----------------------------------------------------------------------|--------------| + | height | uint64 | The height of the snapshot the chunks belongs to. | 1 | + | format | uint32 | The application-specific format of the snapshot the chunk belongs to. | 2 | + | chunk | uint32 | The chunk index, starting from `0` for the initial chunk. | 3 | + +* **Response**: + + | Name | Type | Description | Field Number | + |-------|-------|-------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | chunk | bytes | The binary chunk contents, in an arbitray format. Chunk messages cannot be larger than 16 MB _including metadata_, so 10 MB is a good starting point. | 1 | + +* **Usage**: + * Used during state sync to retrieve snapshot chunks from peers. + +### OfferSnapshot + +* **Request**: + + | Name | Type | Description | Field Number | + |----------|-----------------------|--------------------------------------------------------------------------|--------------| + | snapshot | [Snapshot](#snapshot) | The snapshot offered for restoration. | 1 | + | app_hash | bytes | The light client-verified app hash for this height, from the blockchain. | 2 | + +* **Response**: + + | Name | Type | Description | Field Number | + |--------|-------------------|-----------------------------------|--------------| + | result | [Result](#result) | The result of the snapshot offer. | 1 | + +#### Result + +```proto + enum Result { + UNKNOWN = 0; // Unknown result, abort all snapshot restoration + ACCEPT = 1; // Snapshot is accepted, start applying chunks. + ABORT = 2; // Abort snapshot restoration, and don't try any other snapshots. + REJECT = 3; // Reject this specific snapshot, try others. + REJECT_FORMAT = 4; // Reject all snapshots with this `format`, try others. + REJECT_SENDER = 5; // Reject all snapshots from all senders of this snapshot, try others. + } +``` + +* **Usage**: + * `OfferSnapshot` is called when bootstrapping a node using state sync. The application may + accept or reject snapshots as appropriate. Upon accepting, Tendermint will retrieve and + apply snapshot chunks via `ApplySnapshotChunk`. The application may also choose to reject a + snapshot in the chunk response, in which case it should be prepared to accept further + `OfferSnapshot` calls. + * Only `AppHash` can be trusted, as it has been verified by the light client. Any other data + can be spoofed by adversaries, so applications should employ additional verification schemes + to avoid denial-of-service attacks. The verified `AppHash` is automatically checked against + the restored application at the end of snapshot restoration. + * For more information, see the `Snapshot` data type or the [state sync section](../p2p/messages/state-sync.md). + +### ApplySnapshotChunk + +* **Request**: + + | Name | Type | Description | Field Number | + |--------|--------|-----------------------------------------------------------------------------|--------------| + | index | uint32 | The chunk index, starting from `0`. Tendermint applies chunks sequentially. | 1 | + | chunk | bytes | The binary chunk contents, as returned by `LoadSnapshotChunk`. | 2 | + | sender | string | The P2P ID of the node who sent this chunk. | 3 | + +* **Response**: + + | Name | Type | Description | Field Number | + |----------------|---------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | result | Result (see below) | The result of applying this chunk. | 1 | + | refetch_chunks | repeated uint32 | Refetch and reapply the given chunks, regardless of `result`. Only the listed chunks will be refetched, and reapplied in sequential order. | 2 | + | reject_senders | repeated string | Reject the given P2P senders, regardless of `Result`. Any chunks already applied will not be refetched unless explicitly requested, but queued chunks from these senders will be discarded, and new chunks or other snapshots rejected. | 3 | + +```proto + enum Result { + UNKNOWN = 0; // Unknown result, abort all snapshot restoration + ACCEPT = 1; // The chunk was accepted. + ABORT = 2; // Abort snapshot restoration, and don't try any other snapshots. + RETRY = 3; // Reapply this chunk, combine with `RefetchChunks` and `RejectSenders` as appropriate. + RETRY_SNAPSHOT = 4; // Restart this snapshot from `OfferSnapshot`, reusing chunks unless instructed otherwise. + REJECT_SNAPSHOT = 5; // Reject this snapshot, try a different one. + } +``` + +* **Usage**: + * The application can choose to refetch chunks and/or ban P2P peers as appropriate. Tendermint + will not do this unless instructed by the application. + * The application may want to verify each chunk, e.g. by attaching chunk hashes in + `Snapshot.Metadata` and/or incrementally verifying contents against `AppHash`. + * When all chunks have been accepted, Tendermint will make an ABCI `Info` call to verify that + `LastBlockAppHash` and `LastBlockHeight` matches the expected values, and record the + `AppVersion` in the node state. It then switches to fast sync or consensus and joins the + network. + * If Tendermint is unable to retrieve the next chunk after some time (e.g. because no suitable + peers are available), it will reject the snapshot and try a different one via `OfferSnapshot`. + The application should be prepared to reset and accept it or abort as appropriate. + +## Data Types + +Most of the data structures used in ABCI are shared [common data structures](../core/data_structures.md). In certain cases, ABCI uses different data structures which are documented here: + +### Validator + +* **Fields**: + + | Name | Type | Description | Field Number | + |---------|-------|---------------------------------------------------------------------|--------------| + | address | bytes | [Address](../core/data_structures.md#address) of validator | 1 | + | power | int64 | Voting power of the validator | 3 | + +* **Usage**: + * Validator identified by address + * Used in RequestBeginBlock as part of VoteInfo + * Does not include PubKey to avoid sending potentially large quantum pubkeys + over the ABCI + +### ValidatorUpdate + +* **Fields**: + + | Name | Type | Description | Field Number | + |---------|--------------------------------------------------|-------------------------------|--------------| + | pub_key | [Public Key](../core/data_structures.md#pub_key) | Public key of the validator | 1 | + | power | int64 | Voting power of the validator | 2 | + +* **Usage**: + * Validator identified by PubKey + * Used to tell Tendermint to update the validator set + +### VoteInfo + +* **Fields**: + + | Name | Type | Description | Field Number | + |-------------------|-------------------------|--------------------------------------------------------------|--------------| + | validator | [Validator](#validator) | A validator | 1 | + | signed_last_block | bool | Indicates whether or not the validator signed the last block | 2 | + +* **Usage**: + * Indicates whether a validator signed the last block, allowing for rewards + based on validator availability + +### Evidence + +* **Fields**: + + | Name | Type | Description | Field Number | + |--------------------|--------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------|--------------| + | type | [EvidenceType](#evidencetype) | Type of the evidence. An enum of possible evidence's. | 1 | + | validator | [Validator](#validator) | The offending validator | 2 | + | height | int64 | Height when the offense occurred | 3 | + | time | [google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) | Time of the block that was committed at the height that the offense occurred | 4 | + | total_voting_power | int64 | Total voting power of the validator set at height `Height` | 5 | + +#### EvidenceType + +* **Fields** + + EvidenceType is an enum with the listed fields: + + | Name | Field Number | + |---------------------|--------------| + | UNKNOWN | 0 | + | DUPLICATE_VOTE | 1 | + | LIGHT_CLIENT_ATTACK | 2 | + +### LastCommitInfo + +* **Fields**: + + | Name | Type | Description | Field Number | + |-------|--------------------------------|-----------------------------------------------------------------------------------------------------------------------|--------------| + | round | int32 | Commit round. Reflects the total amount of rounds it took to come to consensus for the current block. | 1 | + | votes | repeated [VoteInfo](#voteinfo) | List of validators addresses in the last validator set with their voting power and whether or not they signed a vote. | 2 | + +### ConsensusParams + +* **Fields**: + + | Name | Type | Description | Field Number | + |-----------|---------------------------------------------------------------|------------------------------------------------------------------------------|--------------| + | block | [BlockParams](../core/data_structures.md#blockparams) | Parameters limiting the size of a block and time between consecutive blocks. | 1 | + | evidence | [EvidenceParams](../core/data_structures.md#evidenceparams) | Parameters limiting the validity of evidence of byzantine behaviour. | 2 | + | validator | [ValidatorParams](../core/data_structures.md#validatorparams) | Parameters limiting the types of public keys validators can use. | 3 | + | version | [VersionsParams](../core/data_structures.md#versionparams) | The ABCI application version. | 4 | + | synchrony | [SynchronyParams](../core/data_structures.md#synchronyparams) | Parameters that determine the bounds under which a proposed block's timestamp is considered valid. | 5 | + | timeout | [TimeoutParams](../core/data_structures.md#timeoutparams) | Parameters that configure the timeouts for the steps of the Tendermint consensus algorithm. | 6 | + +### ProofOps + +* **Fields**: + + | Name | Type | Description | Field Number | + |------|------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | ops | repeated [ProofOp](#proofop) | List of chained Merkle proofs, of possibly different types. The Merkle root of one op is the value being proven in the next op. The Merkle root of the final op should equal the ultimate root hash being verified against.. | 1 | + +### ProofOp + +* **Fields**: + + | Name | Type | Description | Field Number | + |------|--------|------------------------------------------------|--------------| + | type | string | Type of Merkle proof and how it's encoded. | 1 | + | key | bytes | Key in the Merkle tree that this proof is for. | 2 | + | data | bytes | Encoded Merkle proof for the key. | 3 | + +### Snapshot + +* **Fields**: + + | Name | Type | Description | Field Number | + |----------|--------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| + | height | uint64 | The height at which the snapshot was taken (after commit). | 1 | + | format | uint32 | An application-specific snapshot format, allowing applications to version their snapshot data format and make backwards-incompatible changes. Tendermint does not interpret this. | 2 | + | chunks | uint32 | The number of chunks in the snapshot. Must be at least 1 (even if empty). | 3 | + | hash | bytes | TAn arbitrary snapshot hash. Must be equal only for identical snapshots across nodes. Tendermint does not interpret the hash, it only compares them. | 3 | + | metadata | bytes | Arbitrary application metadata, for example chunk hashes or other verification data. | 3 | + +* **Usage**: + * Used for state sync snapshots, see the [state sync section](../p2p/messages/state-sync.md) for details. + * A snapshot is considered identical across nodes only if _all_ fields are equal (including + `Metadata`). Chunks may be retrieved from all nodes that have the same snapshot. + * When sent across the network, a snapshot message can be at most 4 MB. diff --git a/spec/abci/apps.md b/spec/abci/apps.md new file mode 100644 index 0000000000..d6ec198323 --- /dev/null +++ b/spec/abci/apps.md @@ -0,0 +1,685 @@ +--- +order: 2 +title: Applications +--- + +# Applications + +Please ensure you've first read the spec for [ABCI Methods and Types](abci.md) + +Here we cover the following components of ABCI applications: + +- [Connection State](#connection-state) - the interplay between ABCI connections and application state + and the differences between `CheckTx` and `DeliverTx`. +- [Transaction Results](#transaction-results) - rules around transaction + results and validity +- [Validator Set Updates](#validator-updates) - how validator sets are + changed during `InitChain` and `EndBlock` +- [Query](#query) - standards for using the `Query` method and proofs about the + application state +- [Crash Recovery](#crash-recovery) - handshake protocol to synchronize + Tendermint and the application on startup. +- [State Sync](#state-sync) - rapid bootstrapping of new nodes by restoring state machine snapshots + +## Connection State + +Since Tendermint maintains four concurrent ABCI connections, it is typical +for an application to maintain a distinct state for each, and for the states to +be synchronized during `Commit`. + +### Concurrency + +In principle, each of the four ABCI connections operate concurrently with one +another. This means applications need to ensure access to state is +thread safe. In practice, both the +[default in-process ABCI client](https://github.com/tendermint/tendermint/blob/v0.34.4/abci/client/local_client.go#L18) +and the +[default Go ABCI +server](https://github.com/tendermint/tendermint/blob/v0.34.4/abci/server/socket_server.go#L32) +use global locks across all connections, so they are not +concurrent at all. This means if your app is written in Go, and compiled in-process with Tendermint +using the default `NewLocalClient`, or run out-of-process using the default `SocketServer`, +ABCI messages from all connections will be linearizable (received one at a +time). + +The existence of this global mutex means Go application developers can get +thread safety for application state by routing *all* reads and writes through the ABCI +system. Thus it may be *unsafe* to expose application state directly to an RPC +interface, and unless explicit measures are taken, all queries should be routed through the ABCI Query method. + +### BeginBlock + +The BeginBlock request can be used to run some code at the beginning of +every block. It also allows Tendermint to send the current block hash +and header to the application, before it sends any of the transactions. + +The app should remember the latest height and header (ie. from which it +has run a successful Commit) so that it can tell Tendermint where to +pick up from when it restarts. See information on the Handshake, below. + +### Commit + +Application state should only be persisted to disk during `Commit`. + +Before `Commit` is called, Tendermint locks and flushes the mempool so that no new messages will +be received on the mempool connection. This provides an opportunity to safely update all four connection +states to the latest committed state at once. + +When `Commit` completes, it unlocks the mempool. + +WARNING: if the ABCI app logic processing the `Commit` message sends a +`/broadcast_tx_sync` or `/broadcast_tx_commit` and waits for the response +before proceeding, it will deadlock. Executing those `broadcast_tx` calls +involves acquiring a lock that is held during the `Commit` call, so it's not +possible. If you make the call to the `broadcast_tx` endpoints concurrently, +that's no problem, it just can't be part of the sequential logic of the +`Commit` function. + +### Consensus Connection + +The Consensus Connection should maintain a `DeliverTxState` - the working state +for block execution. It should be updated by the calls to `BeginBlock`, `DeliverTx`, +and `EndBlock` during block execution and committed to disk as the "latest +committed state" during `Commit`. + +Updates made to the `DeliverTxState` by each method call must be readable by each subsequent method - +ie. the updates are linearizable. + +### Mempool Connection + +The mempool Connection should maintain a `CheckTxState` +to sequentially process pending transactions in the mempool that have +not yet been committed. It should be initialized to the latest committed state +at the end of every `Commit`. + +Before calling `Commit`, Tendermint will lock and flush the mempool connection, +ensuring that all existing CheckTx are responded to and no new ones can begin. +The `CheckTxState` may be updated concurrently with the `DeliverTxState`, as +messages may be sent concurrently on the Consensus and Mempool connections. + +After `Commit`, while still holding the mempool lock, CheckTx is run again on all transactions that remain in the +node's local mempool after filtering those included in the block. +An additional `Type` parameter is made available to the CheckTx function that +indicates whether an incoming transaction is new (`CheckTxType_New`), or a +recheck (`CheckTxType_Recheck`). + +Finally, after re-checking transactions in the mempool, Tendermint will unlock +the mempool connection. New transactions are once again able to be processed through CheckTx. + +Note that CheckTx is just a weak filter to keep invalid transactions out of the block chain. +CheckTx doesn't have to check everything that affects transaction validity; the +expensive things can be skipped. It's weak because a Byzantine node doesn't +care about CheckTx; it can propose a block full of invalid transactions if it wants. + +#### Replay Protection + +To prevent old transactions from being replayed, CheckTx must implement +replay protection. + +It is possible for old transactions to be sent to the application. So +it is important CheckTx implements some logic to handle them. + +### Query Connection + +The Info Connection should maintain a `QueryState` for answering queries from the user, +and for initialization when Tendermint first starts up (both described further +below). +It should always contain the latest committed state associated with the +latest committed block. + +`QueryState` should be set to the latest `DeliverTxState` at the end of every `Commit`, +after the full block has been processed and the state committed to disk. +Otherwise it should never be modified. + +Tendermint Core currently uses the Query connection to filter peers upon +connecting, according to IP address or node ID. For instance, +returning non-OK ABCI response to either of the following queries will +cause Tendermint to not connect to the corresponding peer: + +- `p2p/filter/addr/`, where `` is an IP address. +- `p2p/filter/id/`, where `` is the hex-encoded node ID (the hash of + the node's p2p pubkey). + +Note: these query formats are subject to change! + +### Snapshot Connection + +The Snapshot Connection is optional, and is only used to serve state sync snapshots for other nodes +and/or restore state sync snapshots to a local node being bootstrapped. + +For more information, see [the state sync section of this document](#state-sync). + +## Transaction Results + +The `Info` and `Log` fields are non-deterministic values for debugging/convenience purposes +that are otherwise ignored. + +The `Data` field must be strictly deterministic, but can be arbitrary data. + +### Gas + +Ethereum introduced the notion of `gas` as an abstract representation of the +cost of resources used by nodes when processing transactions. Every operation in the +Ethereum Virtual Machine uses some amount of gas, and gas can be accepted at a market-variable price. +Users propose a maximum amount of gas for their transaction; if the tx uses less, they get +the difference credited back. Tendermint adopts a similar abstraction, +though uses it only optionally and weakly, allowing applications to define +their own sense of the cost of execution. + +In Tendermint, the +[ConsensusParams.Block.MaxGas](../../proto/tendermint/types/params.proto) +limits the amount of `gas` that can be used in a block. The default value is +`-1`, meaning no limit, or that the concept of gas is meaningless. + +Responses contain a `GasWanted` and `GasUsed` field. The former is the maximum +amount of gas the sender of a tx is willing to use, and the later is how much it actually +used. Applications should enforce that `GasUsed <= GasWanted` - ie. tx execution +should halt before it can use more resources than it requested. + +When `MaxGas > -1`, Tendermint enforces the following rules: + +- `GasWanted <= MaxGas` for all txs in the mempool +- `(sum of GasWanted in a block) <= MaxGas` when proposing a block + +If `MaxGas == -1`, no rules about gas are enforced. + +Note that Tendermint does not currently enforce anything about Gas in the consensus, only the mempool. +This means it does not guarantee that committed blocks satisfy these rules! +It is the application's responsibility to return non-zero response codes when gas limits are exceeded. + +The `GasUsed` field is ignored completely by Tendermint. That said, applications should enforce: + +- `GasUsed <= GasWanted` for any given transaction +- `(sum of GasUsed in a block) <= MaxGas` for every block + +In the future, we intend to add a `Priority` field to the responses that can be +used to explicitly prioritize txs in the mempool for inclusion in a block +proposal. See [#1861](https://github.com/tendermint/tendermint/issues/1861). + +### CheckTx + +If `Code != 0`, it will be rejected from the mempool and hence +not broadcasted to other peers and not included in a proposal block. + +`Data` contains the result of the CheckTx transaction execution, if any. It is +semantically meaningless to Tendermint. + +`Events` include any events for the execution, though since the transaction has not +been committed yet, they are effectively ignored by Tendermint. + +### DeliverTx + +DeliverTx is the workhorse of the blockchain. Tendermint sends the +DeliverTx requests asynchronously but in order, and relies on the +underlying socket protocol (ie. TCP) to ensure they are received by the +app in order. They have already been ordered in the global consensus by +the Tendermint protocol. + +If DeliverTx returns `Code != 0`, the transaction will be considered invalid, +though it is still included in the block. + +DeliverTx also returns a [Code, Data, and Log](../../proto/tendermint/abci/types.proto#L189-L191). + +`Data` contains the result of the CheckTx transaction execution, if any. It is +semantically meaningless to Tendermint. + +Both the `Code` and `Data` are included in a structure that is hashed into the +`LastResultsHash` of the next block header. + +`Events` include any events for the execution, which Tendermint will use to index +the transaction by. This allows transactions to be queried according to what +events took place during their execution. + +## Updating the Validator Set + +The application may set the validator set during InitChain, and may update it during +EndBlock. + +Note that the maximum total power of the validator set is bounded by +`MaxTotalVotingPower = MaxInt64 / 8`. Applications are responsible for ensuring +they do not make changes to the validator set that cause it to exceed this +limit. + +Additionally, applications must ensure that a single set of updates does not contain any duplicates - +a given public key can only appear once within a given update. If an update includes +duplicates, the block execution will fail irrecoverably. + +### InitChain + +The `InitChain` method can return a list of validators. +If the list is empty, Tendermint will use the validators loaded in the genesis +file. +If the list returned by `InitChain` is not empty, Tendermint will use its contents as the validator set. +This way the application can set the initial validator set for the +blockchain. + +### EndBlock + +Updates to the Tendermint validator set can be made by returning +`ValidatorUpdate` objects in the `ResponseEndBlock`: + +```protobuf +message ValidatorUpdate { + tendermint.crypto.keys.PublicKey pub_key + int64 power +} + +message PublicKey { + oneof { + ed25519 bytes = 1; + } +``` + +The `pub_key` currently supports only one type: + +- `type = "ed25519"` + +The `power` is the new voting power for the validator, with the +following rules: + +- power must be non-negative +- if power is 0, the validator must already exist, and will be removed from the + validator set +- if power is non-0: + - if the validator does not already exist, it will be added to the validator + set with the given power + - if the validator does already exist, its power will be adjusted to the given power +- the total power of the new validator set must not exceed MaxTotalVotingPower + +Note the updates returned in block `H` will only take effect at block `H+2`. + +## Consensus Parameters + +ConsensusParams enforce certain limits in the blockchain, like the maximum size +of blocks, amount of gas used in a block, and the maximum acceptable age of +evidence. They can be set in InitChain and updated in EndBlock. + +### BlockParams.MaxBytes + +The maximum size of a complete Protobuf encoded block. +This is enforced by Tendermint consensus. + +This implies a maximum transaction size that is this MaxBytes, less the expected size of +the header, the validator set, and any included evidence in the block. + +Must have `0 < MaxBytes < 100 MB`. + +### BlockParams.MaxGas + +The maximum of the sum of `GasWanted` that will be allowed in a proposed block. +This is *not* enforced by Tendermint consensus. +It is left to the app to enforce (ie. if txs are included past the +limit, they should return non-zero codes). It is used by Tendermint to limit the +txs included in a proposed block. + +Must have `MaxGas >= -1`. +If `MaxGas == -1`, no limit is enforced. + +### EvidenceParams.MaxAgeDuration + +This is the maximum age of evidence in time units. +This is enforced by Tendermint consensus. + +If a block includes evidence older than this (AND the evidence was created more +than `MaxAgeNumBlocks` ago), the block will be rejected (validators won't vote +for it). + +Must have `MaxAgeDuration > 0`. + +### EvidenceParams.MaxAgeNumBlocks + +This is the maximum age of evidence in blocks. +This is enforced by Tendermint consensus. + +If a block includes evidence older than this (AND the evidence was created more +than `MaxAgeDuration` ago), the block will be rejected (validators won't vote +for it). + +Must have `MaxAgeNumBlocks > 0`. + +### EvidenceParams.MaxNum + +This is the maximum number of evidence that can be committed to a single block. + +The product of this and the `MaxEvidenceBytes` must not exceed the size of +a block minus it's overhead ( ~ `MaxBytes`). + +Must have `MaxNum > 0`. + +### SynchronyParams.Precision + +`SynchronyParams.Precision` is a parameter of the Proposer-Based Timestamps algorithm. +that configures the acceptable upper-bound of clock drift among +all of the nodes on a Tendermint network. Any two nodes on a Tendermint network +are expected to have clocks that differ by at most `Precision`. + +### SynchronyParams.MessageDelay + +`SynchronyParams.MessageDelay` is a parameter of the Proposer-Based Timestamps +algorithm that configures the acceptable upper-bound for transmitting a `Proposal` +message from the proposer to all of the validators on the network. + +### Updates + +The application may set the ConsensusParams during InitChain, and update them during +EndBlock. If the ConsensusParams is empty, it will be ignored. Each field +that is not empty will be applied in full. For instance, if updating the +Block.MaxBytes, applications must also set the other Block fields (like +Block.MaxGas), even if they are unchanged, as they will otherwise cause the +value to be updated to 0. + +#### InitChain + +ResponseInitChain includes a ConsensusParams. +If ConsensusParams is nil, Tendermint will use the params loaded in the genesis +file. If ConsensusParams is not nil, Tendermint will use it. +This way the application can determine the initial consensus params for the +blockchain. + +#### EndBlock + +ResponseEndBlock includes a ConsensusParams. +If ConsensusParams nil, Tendermint will do nothing. +If ConsensusParam is not nil, Tendermint will use it. +This way the application can update the consensus params over time. + +Note the updates returned in block `H` will take effect right away for block +`H+1`. + +## Query + +Query is a generic method with lots of flexibility to enable diverse sets +of queries on application state. Tendermint makes use of Query to filter new peers +based on ID and IP, and exposes Query to the user over RPC. + +Note that calls to Query are not replicated across nodes, but rather query the +local node's state - hence they may return stale reads. For reads that require +consensus, use a transaction. + +The most important use of Query is to return Merkle proofs of the application state at some height +that can be used for efficient application-specific light-clients. + +Note Tendermint has technically no requirements from the Query +message for normal operation - that is, the ABCI app developer need not implement +Query functionality if they do not wish too. + +### Query Proofs + +The Tendermint block header includes a number of hashes, each providing an +anchor for some type of proof about the blockchain. The `ValidatorsHash` enables +quick verification of the validator set, the `DataHash` gives quick +verification of the transactions included in the block, etc. + +The `AppHash` is unique in that it is application specific, and allows for +application-specific Merkle proofs about the state of the application. +While some applications keep all relevant state in the transactions themselves +(like Bitcoin and its UTXOs), others maintain a separated state that is +computed deterministically *from* transactions, but is not contained directly in +the transactions themselves (like Ethereum contracts and accounts). +For such applications, the `AppHash` provides a much more efficient way to verify light-client proofs. + +ABCI applications can take advantage of more efficient light-client proofs for +their state as follows: + +- return the Merkle root of the deterministic application state in +`ResponseCommit.Data`. This Merkle root will be included as the `AppHash` in the next block. +- return efficient Merkle proofs about that application state in `ResponseQuery.Proof` + that can be verified using the `AppHash` of the corresponding block. + +For instance, this allows an application's light-client to verify proofs of +absence in the application state, something which is much less efficient to do using the block hash. + +Some applications (eg. Ethereum, Cosmos-SDK) have multiple "levels" of Merkle trees, +where the leaves of one tree are the root hashes of others. To support this, and +the general variability in Merkle proofs, the `ResponseQuery.Proof` has some minimal structure: + +```protobuf +message ProofOps { + repeated ProofOp ops +} + +message ProofOp { + string type = 1; + bytes key = 2; + bytes data = 3; +} +``` + +Each `ProofOp` contains a proof for a single key in a single Merkle tree, of the specified `type`. +This allows ABCI to support many different kinds of Merkle trees, encoding +formats, and proofs (eg. of presence and absence) just by varying the `type`. +The `data` contains the actual encoded proof, encoded according to the `type`. +When verifying the full proof, the root hash for one ProofOp is the value being +verified for the next ProofOp in the list. The root hash of the final ProofOp in +the list should match the `AppHash` being verified against. + +### Peer Filtering + +When Tendermint connects to a peer, it sends two queries to the ABCI application +using the following paths, with no additional data: + +- `/p2p/filter/addr/`, where `` denote the IP address and + the port of the connection +- `p2p/filter/id/`, where `` is the peer node ID (ie. the + pubkey.Address() for the peer's PubKey) + +If either of these queries return a non-zero ABCI code, Tendermint will refuse +to connect to the peer. + +### Paths + +Queries are directed at paths, and may optionally include additional data. + +The expectation is for there to be some number of high level paths +differentiating concerns, like `/p2p`, `/store`, and `/app`. Currently, +Tendermint only uses `/p2p`, for filtering peers. For more advanced use, see the +implementation of +[Query in the Cosmos-SDK](https://github.com/cosmos/cosmos-sdk/blob/v0.23.1/baseapp/baseapp.go#L333). + +## Crash Recovery + +On startup, Tendermint calls the `Info` method on the Info Connection to get the latest +committed state of the app. The app MUST return information consistent with the +last block it succesfully completed Commit for. + +If the app succesfully committed block H, then `last_block_height = H` and `last_block_app_hash = `. If the app +failed during the Commit of block H, then `last_block_height = H-1` and +`last_block_app_hash = `. + +We now distinguish three heights, and describe how Tendermint syncs itself with +the app. + +```md +storeBlockHeight = height of the last block Tendermint saw a commit for +stateBlockHeight = height of the last block for which Tendermint completed all + block processing and saved all ABCI results to disk +appBlockHeight = height of the last block for which ABCI app succesfully + completed Commit + +``` + +Note we always have `storeBlockHeight >= stateBlockHeight` and `storeBlockHeight >= appBlockHeight` +Note also Tendermint never calls Commit on an ABCI app twice for the same height. + +The procedure is as follows. + +First, some simple start conditions: + +If `appBlockHeight == 0`, then call InitChain. + +If `storeBlockHeight == 0`, we're done. + +Now, some sanity checks: + +If `storeBlockHeight < appBlockHeight`, error +If `storeBlockHeight < stateBlockHeight`, panic +If `storeBlockHeight > stateBlockHeight+1`, panic + +Now, the meat: + +If `storeBlockHeight == stateBlockHeight && appBlockHeight < storeBlockHeight`, +replay all blocks in full from `appBlockHeight` to `storeBlockHeight`. +This happens if we completed processing the block, but the app forgot its height. + +If `storeBlockHeight == stateBlockHeight && appBlockHeight == storeBlockHeight`, we're done. +This happens if we crashed at an opportune spot. + +If `storeBlockHeight == stateBlockHeight+1` +This happens if we started processing the block but didn't finish. + +If `appBlockHeight < stateBlockHeight` + replay all blocks in full from `appBlockHeight` to `storeBlockHeight-1`, + and replay the block at `storeBlockHeight` using the WAL. +This happens if the app forgot the last block it committed. + +If `appBlockHeight == stateBlockHeight`, + replay the last block (storeBlockHeight) in full. +This happens if we crashed before the app finished Commit + +If `appBlockHeight == storeBlockHeight` + update the state using the saved ABCI responses but dont run the block against the real app. +This happens if we crashed after the app finished Commit but before Tendermint saved the state. + +## State Sync + +A new node joining the network can simply join consensus at the genesis height and replay all +historical blocks until it is caught up. However, for large chains this can take a significant +amount of time, often on the order of days or weeks. + +State sync is an alternative mechanism for bootstrapping a new node, where it fetches a snapshot +of the state machine at a given height and restores it. Depending on the application, this can +be several orders of magnitude faster than replaying blocks. + +Note that state sync does not currently backfill historical blocks, so the node will have a +truncated block history - users are advised to consider the broader network implications of this in +terms of block availability and auditability. This functionality may be added in the future. + +For details on the specific ABCI calls and types, see the [methods and types section](abci.md). + +### Taking Snapshots + +Applications that want to support state syncing must take state snapshots at regular intervals. How +this is accomplished is entirely up to the application. A snapshot consists of some metadata and +a set of binary chunks in an arbitrary format: + +- `Height (uint64)`: The height at which the snapshot is taken. It must be taken after the given + height has been committed, and must not contain data from any later heights. + +- `Format (uint32)`: An arbitrary snapshot format identifier. This can be used to version snapshot + formats, e.g. to switch from Protobuf to MessagePack for serialization. The application can use + this when restoring to choose whether to accept or reject a snapshot. + +- `Chunks (uint32)`: The number of chunks in the snapshot. Each chunk contains arbitrary binary + data, and should be less than 16 MB; 10 MB is a good starting point. + +- `Hash ([]byte)`: An arbitrary hash of the snapshot. This is used to check whether a snapshot is + the same across nodes when downloading chunks. + +- `Metadata ([]byte)`: Arbitrary snapshot metadata, e.g. chunk hashes for verification or any other + necessary info. + +For a snapshot to be considered the same across nodes, all of these fields must be identical. When +sent across the network, snapshot metadata messages are limited to 4 MB. + +When a new node is running state sync and discovering snapshots, Tendermint will query an existing +application via the ABCI `ListSnapshots` method to discover available snapshots, and load binary +snapshot chunks via `LoadSnapshotChunk`. The application is free to choose how to implement this +and which formats to use, but must provide the following guarantees: + +- **Consistent:** A snapshot must be taken at a single isolated height, unaffected by + concurrent writes. This can be accomplished by using a data store that supports ACID + transactions with snapshot isolation. + +- **Asynchronous:** Taking a snapshot can be time-consuming, so it must not halt chain progress, + for example by running in a separate thread. + +- **Deterministic:** A snapshot taken at the same height in the same format must be identical + (at the byte level) across nodes, including all metadata. This ensures good availability of + chunks, and that they fit together across nodes. + +A very basic approach might be to use a datastore with MVCC transactions (such as RocksDB), +start a transaction immediately after block commit, and spawn a new thread which is passed the +transaction handle. This thread can then export all data items, serialize them using e.g. +Protobuf, hash the byte stream, split it into chunks, and store the chunks in the file system +along with some metadata - all while the blockchain is applying new blocks in parallel. + +A more advanced approach might include incremental verification of individual chunks against the +chain app hash, parallel or batched exports, compression, and so on. + +Old snapshots should be removed after some time - generally only the last two snapshots are needed +(to prevent the last one from being removed while a node is restoring it). + +### Bootstrapping a Node + +An empty node can be state synced by setting the configuration option `statesync.enabled = +true`. The node also needs the chain genesis file for basic chain info, and configuration for +light client verification of the restored snapshot: a set of Tendermint RPC servers, and a +trusted header hash and corresponding height from a trusted source, via the `statesync` +configuration section. + +Once started, the node will connect to the P2P network and begin discovering snapshots. These +will be offered to the local application via the `OfferSnapshot` ABCI method. Once a snapshot +is accepted Tendermint will fetch and apply the snapshot chunks. After all chunks have been +successfully applied, Tendermint verifies the app's `AppHash` against the chain using the light +client, then switches the node to normal consensus operation. + +#### Snapshot Discovery + +When the empty node join the P2P network, it asks all peers to report snapshots via the +`ListSnapshots` ABCI call (limited to 10 per node). After some time, the node picks the most +suitable snapshot (generally prioritized by height, format, and number of peers), and offers it +to the application via `OfferSnapshot`. The application can choose a number of responses, +including accepting or rejecting it, rejecting the offered format, rejecting the peer who sent +it, and so on. Tendermint will keep discovering and offering snapshots until one is accepted or +the application aborts. + +#### Snapshot Restoration + +Once a snapshot has been accepted via `OfferSnapshot`, Tendermint begins downloading chunks from +any peers that have the same snapshot (i.e. that have identical metadata fields). Chunks are +spooled in a temporary directory, and then given to the application in sequential order via +`ApplySnapshotChunk` until all chunks have been accepted. + +The method for restoring snapshot chunks is entirely up to the application. + +During restoration, the application can respond to `ApplySnapshotChunk` with instructions for how +to continue. This will typically be to accept the chunk and await the next one, but it can also +ask for chunks to be refetched (either the current one or any number of previous ones), P2P peers +to be banned, snapshots to be rejected or retried, and a number of other responses - see the ABCI +reference for details. + +If Tendermint fails to fetch a chunk after some time, it will reject the snapshot and try a +different one via `OfferSnapshot` - the application can choose whether it wants to support +restarting restoration, or simply abort with an error. + +#### Snapshot Verification + +Once all chunks have been accepted, Tendermint issues an `Info` ABCI call to retrieve the +`LastBlockAppHash`. This is compared with the trusted app hash from the chain, retrieved and +verified using the light client. Tendermint also checks that `LastBlockHeight` corresponds to the +height of the snapshot. + +This verification ensures that an application is valid before joining the network. However, the +snapshot restoration may take a long time to complete, so applications may want to employ additional +verification during the restore to detect failures early. This might e.g. include incremental +verification of each chunk against the app hash (using bundled Merkle proofs), checksums to +protect against data corruption by the disk or network, and so on. However, it is important to +note that the only trusted information available is the app hash, and all other snapshot metadata +can be spoofed by adversaries. + +Apps may also want to consider state sync denial-of-service vectors, where adversaries provide +invalid or harmful snapshots to prevent nodes from joining the network. The application can +counteract this by asking Tendermint to ban peers. As a last resort, node operators can use +P2P configuration options to whitelist a set of trusted peers that can provide valid snapshots. + +#### Transition to Consensus + +Once the snapshots have all been restored, Tendermint gathers additional information necessary for +bootstrapping the node (e.g. chain ID, consensus parameters, validator sets, and block headers) +from the genesis file and light client RPC servers. It also fetches and records the `AppVersion` +from the ABCI application. + +Once the state machine has been restored and Tendermint has gathered this additional +information, it transitions to block sync (if enabled) to fetch any remaining blocks up the chain +head, and then transitions to regular consensus operation. At this point the node operates like +any other node, apart from having a truncated block history at the height of the restored snapshot. diff --git a/spec/abci/client-server.md b/spec/abci/client-server.md new file mode 100644 index 0000000000..912270e660 --- /dev/null +++ b/spec/abci/client-server.md @@ -0,0 +1,113 @@ +--- +order: 3 +title: Client and Server +--- + +# Client and Server + +This section is for those looking to implement their own ABCI Server, perhaps in +a new programming language. + +You are expected to have read [ABCI Methods and Types](./abci.md) and [ABCI +Applications](./apps.md). + +## Message Protocol + +The message protocol consists of pairs of requests and responses defined in the +[protobuf file](../../proto/tendermint/abci/types.proto). + +Some messages have no fields, while others may include byte-arrays, strings, integers, +or custom protobuf types. + +For more details on protobuf, see the [documentation](https://developers.google.com/protocol-buffers/docs/overview). + +For each request, a server should respond with the corresponding +response, where the order of requests is preserved in the order of +responses. + +## Server Implementations + +To use ABCI in your programming language of choice, there must be a ABCI +server in that language. Tendermint supports three implementations of the ABCI, written in Go: + +- In-process ([Golang](https://github.com/tendermint/tendermint/tree/master/abci), [Rust](https://github.com/tendermint/rust-abci)) +- ABCI-socket +- GRPC + +The latter two can be tested using the `abci-cli` by setting the `--abci` flag +appropriately (ie. to `socket` or `grpc`). + +See examples, in various stages of maintenance, in +[Go](https://github.com/tendermint/tendermint/tree/master/abci/server), +[JavaScript](https://github.com/tendermint/js-abci), +[C++](https://github.com/mdyring/cpp-tmsp), and +[Java](https://github.com/jTendermint/jabci). + +### In Process + +The simplest implementation uses function calls within Golang. +This means ABCI applications written in Golang can be compiled with Tendermint Core and run as a single binary. + +### GRPC + +If GRPC is available in your language, this is the easiest approach, +though it will have significant performance overhead. + +To get started with GRPC, copy in the [protobuf +file](../../proto/tendermint/abci/types.proto) and compile it using the GRPC +plugin for your language. For instance, for golang, the command is `protoc +--go_out=plugins=grpc:. types.proto`. See the [grpc documentation for more +details](http://www.grpc.io/docs/). `protoc` will autogenerate all the +necessary code for ABCI client and server in your language, including whatever +interface your application must satisfy to be used by the ABCI server for +handling requests. + +Note the length-prefixing used in the socket implementation (TSP) does not apply for GRPC. + +### TSP + +Tendermint Socket Protocol is an asynchronous, raw socket server which provides ordered message passing over unix or tcp. +Messages are serialized using Protobuf3 and length-prefixed with a [signed Varint](https://developers.google.com/protocol-buffers/docs/encoding?csw=1#signed-integers) + +If GRPC is not available in your language, or you require higher +performance, or otherwise enjoy programming, you may implement your own +ABCI server using the Tendermint Socket Protocol. The first step is still to auto-generate the relevant data +types and codec in your language using `protoc`. In addition to being proto3 encoded, messages coming over +the socket are length-prefixed to facilitate use as a streaming protocol. proto3 doesn't have an +official length-prefix standard, so we use our own. The first byte in +the prefix represents the length of the Big Endian encoded length. The +remaining bytes in the prefix are the Big Endian encoded length. + +For example, if the proto3 encoded ABCI message is 0xDEADBEEF (4 +bytes), the length-prefixed message is 0x0104DEADBEEF. If the proto3 +encoded ABCI message is 65535 bytes long, the length-prefixed message +would be like 0x02FFFF.... + +The benefit of using this `varint` encoding over the old version (where integers were encoded as `` is that +it is the standard way to encode integers in Protobuf. It is also generally shorter. + +As noted above, this prefixing does not apply for GRPC. + +An ABCI server must also be able to support multiple connections, as +Tendermint uses four connections. + +### Async vs Sync + +The main ABCI server (ie. non-GRPC) provides ordered asynchronous messages. +This is useful for DeliverTx and CheckTx, since it allows Tendermint to forward +transactions to the app before it's finished processing previous ones. + +Thus, DeliverTx and CheckTx messages are sent asynchronously, while all other +messages are sent synchronously. + +## Client + +There are currently two use-cases for an ABCI client. One is a testing +tool, as in the `abci-cli`, which allows ABCI requests to be sent via +command line. The other is a consensus engine, such as Tendermint Core, +which makes requests to the application every time a new transaction is +received or a block is committed. + +It is unlikely that you will need to implement a client. For details of +our client, see +[here](https://github.com/tendermint/tendermint/tree/master/abci/client). diff --git a/spec/consensus/bft-time.md b/spec/consensus/bft-time.md new file mode 100644 index 0000000000..cec3b91ab9 --- /dev/null +++ b/spec/consensus/bft-time.md @@ -0,0 +1,55 @@ +--- +order: 2 +--- +# BFT Time + +Tendermint provides a deterministic, Byzantine fault-tolerant, source of time. +Time in Tendermint is defined with the Time field of the block header. + +It satisfies the following properties: + +- Time Monotonicity: Time is monotonically increasing, i.e., given +a header H1 for height h1 and a header H2 for height `h2 = h1 + 1`, `H1.Time < H2.Time`. +- Time Validity: Given a set of Commit votes that forms the `block.LastCommit` field, a range of +valid values for the Time field of the block header is defined only by +Precommit messages (from the LastCommit field) sent by correct processes, i.e., +a faulty process cannot arbitrarily increase the Time value. + +In the context of Tendermint, time is of type int64 and denotes UNIX time in milliseconds, i.e., +corresponds to the number of milliseconds since January 1, 1970. Before defining rules that need to be enforced by the +Tendermint consensus protocol, so the properties above holds, we introduce the following definition: + +- median of a Commit is equal to the median of `Vote.Time` fields of the `Vote` messages, +where the value of `Vote.Time` is counted number of times proportional to the process voting power. As in Tendermint +the voting power is not uniform (one process one vote), a vote message is actually an aggregator of the same votes whose +number is equal to the voting power of the process that has casted the corresponding votes message. + +Let's consider the following example: + +- we have four processes p1, p2, p3 and p4, with the following voting power distribution (p1, 23), (p2, 27), (p3, 10) +and (p4, 10). The total voting power is 70 (`N = 3f+1`, where `N` is the total voting power, and `f` is the maximum voting +power of the faulty processes), so we assume that the faulty processes have at most 23 of voting power. +Furthermore, we have the following vote messages in some LastCommit field (we ignore all fields except Time field): + - (p1, 100), (p2, 98), (p3, 1000), (p4, 500). We assume that p3 and p4 are faulty processes. Let's assume that the + `block.LastCommit` message contains votes of processes p2, p3 and p4. Median is then chosen the following way: + the value 98 is counted 27 times, the value 1000 is counted 10 times and the value 500 is counted also 10 times. + So the median value will be the value 98. No matter what set of messages with at least `2f+1` voting power we + choose, the median value will always be between the values sent by correct processes. + +We ensure Time Monotonicity and Time Validity properties by the following rules: + +- let rs denotes `RoundState` (consensus internal state) of some process. Then +`rs.ProposalBlock.Header.Time == median(rs.LastCommit) && +rs.Proposal.Timestamp == rs.ProposalBlock.Header.Time`. + +- Furthermore, when creating the `vote` message, the following rules for determining `vote.Time` field should hold: + + - if `rs.LockedBlock` is defined then + `vote.Time = max(rs.LockedBlock.Timestamp + time.Millisecond, time.Now())`, where `time.Now()` + denotes local Unix time in milliseconds + + - else if `rs.Proposal` is defined then + `vote.Time = max(rs.Proposal.Timestamp + time.Millisecond,, time.Now())`, + + - otherwise, `vote.Time = time.Now())`. In this case vote is for `nil` so it is not taken into account for + the timestamp of the next block. diff --git a/spec/consensus/consensus-paper/IEEEtran.bst b/spec/consensus/consensus-paper/IEEEtran.bst new file mode 100644 index 0000000000..53fbc030aa --- /dev/null +++ b/spec/consensus/consensus-paper/IEEEtran.bst @@ -0,0 +1,2417 @@ +%% +%% IEEEtran.bst +%% BibTeX Bibliography Style file for IEEE Journals and Conferences (unsorted) +%% Version 1.12 (2007/01/11) +%% +%% Copyright (c) 2003-2007 Michael Shell +%% +%% Original starting code base and algorithms obtained from the output of +%% Patrick W. Daly's makebst package as well as from prior versions of +%% IEEE BibTeX styles: +%% +%% 1. Howard Trickey and Oren Patashnik's ieeetr.bst (1985/1988) +%% 2. Silvano Balemi and Richard H. Roy's IEEEbib.bst (1993) +%% +%% Support sites: +%% http://www.michaelshell.org/tex/ieeetran/ +%% http://www.ctan.org/tex-archive/macros/latex/contrib/IEEEtran/ +%% and/or +%% http://www.ieee.org/ +%% +%% For use with BibTeX version 0.99a or later +%% +%% This is a numerical citation style. +%% +%%************************************************************************* +%% Legal Notice: +%% This code is offered as-is without any warranty either expressed or +%% implied; without even the implied warranty of MERCHANTABILITY or +%% FITNESS FOR A PARTICULAR PURPOSE! +%% User assumes all risk. +%% In no event shall IEEE or any contributor to this code be liable for +%% any damages or losses, including, but not limited to, incidental, +%% consequential, or any other damages, resulting from the use or misuse +%% of any information contained here. +%% +%% All comments are the opinions of their respective authors and are not +%% necessarily endorsed by the IEEE. +%% +%% This work is distributed under the LaTeX Project Public License (LPPL) +%% ( http://www.latex-project.org/ ) version 1.3, and may be freely used, +%% distributed and modified. A copy of the LPPL, version 1.3, is included +%% in the base LaTeX documentation of all distributions of LaTeX released +%% 2003/12/01 or later. +%% Retain all contribution notices and credits. +%% ** Modified files should be clearly indicated as such, including ** +%% ** renaming them and changing author support contact information. ** +%% +%% File list of work: IEEEabrv.bib, IEEEfull.bib, IEEEexample.bib, +%% IEEEtran.bst, IEEEtranS.bst, IEEEtranSA.bst, +%% IEEEtranN.bst, IEEEtranSN.bst, IEEEtran_bst_HOWTO.pdf +%%************************************************************************* +% +% +% Changelog: +% +% 1.00 (2002/08/13) Initial release +% +% 1.10 (2002/09/27) +% 1. Corrected minor bug for improperly formed warning message when a +% book was not given a title. Thanks to Ming Kin Lai for reporting this. +% 2. Added support for CTLname_format_string and CTLname_latex_cmd fields +% in the BST control entry type. +% +% 1.11 (2003/04/02) +% 1. Fixed bug with URLs containing underscores when using url.sty. Thanks +% to Ming Kin Lai for reporting this. +% +% 1.12 (2007/01/11) +% 1. Fixed bug with unwanted comma before "et al." when an entry contained +% more than two author names. Thanks to Pallav Gupta for reporting this. +% 2. Fixed bug with anomalous closing quote in tech reports that have a +% type, but without a number or address. Thanks to Mehrdad Mirreza for +% reporting this. +% 3. Use braces in \providecommand in begin.bib to better support +% latex2html. TeX style length assignments OK with recent versions +% of latex2html - 1.71 (2002/2/1) or later is strongly recommended. +% Use of the language field still causes trouble with latex2html. +% Thanks to Federico Beffa for reporting this. +% 4. Added IEEEtran.bst ID and version comment string to .bbl output. +% 5. Provide a \BIBdecl hook that allows the user to execute commands +% just prior to the first entry. +% 6. Use default urlstyle (is using url.sty) of "same" rather than rm to +% better work with a wider variety of bibliography styles. +% 7. Changed month abbreviations from Sept., July and June to Sep., Jul., +% and Jun., respectively, as IEEE now does. Thanks to Moritz Borgmann +% for reporting this. +% 8. Control entry types should not be considered when calculating longest +% label width. +% 9. Added alias www for electronic/online. +% 10. Added CTLname_url_prefix control entry type. + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% DEFAULTS FOR THE CONTROLS OF THE BST STYLE %% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +% These are the defaults for the user adjustable controls. The values used +% here can be overridden by the user via IEEEtranBSTCTL entry type. + +% NOTE: The recommended LaTeX command to invoke a control entry type is: +% +%\makeatletter +%\def\bstctlcite{\@ifnextchar[{\@bstctlcite}{\@bstctlcite[@auxout]}} +%\def\@bstctlcite[#1]#2{\@bsphack +% \@for\@citeb:=#2\do{% +% \edef\@citeb{\expandafter\@firstofone\@citeb}% +% \if@filesw\immediate\write\csname #1\endcsname{\string\citation{\@citeb}}\fi}% +% \@esphack} +%\makeatother +% +% It is called at the start of the document, before the first \cite, like: +% \bstctlcite{IEEEexample:BSTcontrol} +% +% IEEEtran.cls V1.6 and later does provide this command. + + + +% #0 turns off the display of the number for articles. +% #1 enables +FUNCTION {default.is.use.number.for.article} { #1 } + + +% #0 turns off the display of the paper and type fields in @inproceedings. +% #1 enables +FUNCTION {default.is.use.paper} { #1 } + + +% #0 turns off the forced use of "et al." +% #1 enables +FUNCTION {default.is.forced.et.al} { #0 } + +% The maximum number of names that can be present beyond which an "et al." +% usage is forced. Be sure that num.names.shown.with.forced.et.al (below) +% is not greater than this value! +% Note: There are many instances of references in IEEE journals which have +% a very large number of authors as well as instances in which "et al." is +% used profusely. +FUNCTION {default.max.num.names.before.forced.et.al} { #10 } + +% The number of names that will be shown with a forced "et al.". +% Must be less than or equal to max.num.names.before.forced.et.al +FUNCTION {default.num.names.shown.with.forced.et.al} { #1 } + + +% #0 turns off the alternate interword spacing for entries with URLs. +% #1 enables +FUNCTION {default.is.use.alt.interword.spacing} { #1 } + +% If alternate interword spacing for entries with URLs is enabled, this is +% the interword spacing stretch factor that will be used. For example, the +% default "4" here means that the interword spacing in entries with URLs can +% stretch to four times normal. Does not have to be an integer. Note that +% the value specified here can be overridden by the user in their LaTeX +% code via a command such as: +% "\providecommand\BIBentryALTinterwordstretchfactor{1.5}" in addition to +% that via the IEEEtranBSTCTL entry type. +FUNCTION {default.ALTinterwordstretchfactor} { "4" } + + +% #0 turns off the "dashification" of repeated (i.e., identical to those +% of the previous entry) names. IEEE normally does this. +% #1 enables +FUNCTION {default.is.dash.repeated.names} { #1 } + + +% The default name format control string. +FUNCTION {default.name.format.string}{ "{f.~}{vv~}{ll}{, jj}" } + + +% The default LaTeX font command for the names. +FUNCTION {default.name.latex.cmd}{ "" } + + +% The default URL prefix. +FUNCTION {default.name.url.prefix}{ "[Online]. Available:" } + + +% Other controls that cannot be accessed via IEEEtranBSTCTL entry type. + +% #0 turns off the terminal startup banner/completed message so as to +% operate more quietly. +% #1 enables +FUNCTION {is.print.banners.to.terminal} { #1 } + + + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% FILE VERSION AND BANNER %% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +FUNCTION{bst.file.version} { "1.12" } +FUNCTION{bst.file.date} { "2007/01/11" } +FUNCTION{bst.file.website} { "http://www.michaelshell.org/tex/ieeetran/bibtex/" } + +FUNCTION {banner.message} +{ is.print.banners.to.terminal + { "-- IEEEtran.bst version" " " * bst.file.version * + " (" * bst.file.date * ") " * "by Michael Shell." * + top$ + "-- " bst.file.website * + top$ + "-- See the " quote$ * "IEEEtran_bst_HOWTO.pdf" * quote$ * " manual for usage information." * + top$ + } + { skip$ } + if$ +} + +FUNCTION {completed.message} +{ is.print.banners.to.terminal + { "" + top$ + "Done." + top$ + } + { skip$ } + if$ +} + + + + +%%%%%%%%%%%%%%%%%%%%%% +%% STRING CONSTANTS %% +%%%%%%%%%%%%%%%%%%%%%% + +FUNCTION {bbl.and}{ "and" } +FUNCTION {bbl.etal}{ "et~al." } +FUNCTION {bbl.editors}{ "eds." } +FUNCTION {bbl.editor}{ "ed." } +FUNCTION {bbl.edition}{ "ed." } +FUNCTION {bbl.volume}{ "vol." } +FUNCTION {bbl.of}{ "of" } +FUNCTION {bbl.number}{ "no." } +FUNCTION {bbl.in}{ "in" } +FUNCTION {bbl.pages}{ "pp." } +FUNCTION {bbl.page}{ "p." } +FUNCTION {bbl.chapter}{ "ch." } +FUNCTION {bbl.paper}{ "paper" } +FUNCTION {bbl.part}{ "pt." } +FUNCTION {bbl.patent}{ "Patent" } +FUNCTION {bbl.patentUS}{ "U.S." } +FUNCTION {bbl.revision}{ "Rev." } +FUNCTION {bbl.series}{ "ser." } +FUNCTION {bbl.standard}{ "Std." } +FUNCTION {bbl.techrep}{ "Tech. Rep." } +FUNCTION {bbl.mthesis}{ "Master's thesis" } +FUNCTION {bbl.phdthesis}{ "Ph.D. dissertation" } +FUNCTION {bbl.st}{ "st" } +FUNCTION {bbl.nd}{ "nd" } +FUNCTION {bbl.rd}{ "rd" } +FUNCTION {bbl.th}{ "th" } + + +% This is the LaTeX spacer that is used when a larger than normal space +% is called for (such as just before the address:publisher). +FUNCTION {large.space} { "\hskip 1em plus 0.5em minus 0.4em\relax " } + +% The LaTeX code for dashes that are used to represent repeated names. +% Note: Some older IEEE journals used something like +% "\rule{0.275in}{0.5pt}\," which is fairly thick and runs right along +% the baseline. However, IEEE now uses a thinner, above baseline, +% six dash long sequence. +FUNCTION {repeated.name.dashes} { "------" } + + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% PREDEFINED STRING MACROS %% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +MACRO {jan} {"Jan."} +MACRO {feb} {"Feb."} +MACRO {mar} {"Mar."} +MACRO {apr} {"Apr."} +MACRO {may} {"May"} +MACRO {jun} {"Jun."} +MACRO {jul} {"Jul."} +MACRO {aug} {"Aug."} +MACRO {sep} {"Sep."} +MACRO {oct} {"Oct."} +MACRO {nov} {"Nov."} +MACRO {dec} {"Dec."} + + + +%%%%%%%%%%%%%%%%%% +%% ENTRY FIELDS %% +%%%%%%%%%%%%%%%%%% + +ENTRY + { address + assignee + author + booktitle + chapter + day + dayfiled + edition + editor + howpublished + institution + intype + journal + key + language + month + monthfiled + nationality + note + number + organization + pages + paper + publisher + school + series + revision + title + type + url + volume + year + yearfiled + CTLuse_article_number + CTLuse_paper + CTLuse_forced_etal + CTLmax_names_forced_etal + CTLnames_show_etal + CTLuse_alt_spacing + CTLalt_stretch_factor + CTLdash_repeated_names + CTLname_format_string + CTLname_latex_cmd + CTLname_url_prefix + } + {} + { label } + + + + +%%%%%%%%%%%%%%%%%%%%%%% +%% INTEGER VARIABLES %% +%%%%%%%%%%%%%%%%%%%%%%% + +INTEGERS { prev.status.punct this.status.punct punct.std + punct.no punct.comma punct.period + prev.status.space this.status.space space.std + space.no space.normal space.large + prev.status.quote this.status.quote quote.std + quote.no quote.close + prev.status.nline this.status.nline nline.std + nline.no nline.newblock + status.cap cap.std + cap.no cap.yes} + +INTEGERS { longest.label.width multiresult nameptr namesleft number.label numnames } + +INTEGERS { is.use.number.for.article + is.use.paper + is.forced.et.al + max.num.names.before.forced.et.al + num.names.shown.with.forced.et.al + is.use.alt.interword.spacing + is.dash.repeated.names} + + +%%%%%%%%%%%%%%%%%%%%%% +%% STRING VARIABLES %% +%%%%%%%%%%%%%%%%%%%%%% + +STRINGS { bibinfo + longest.label + oldname + s + t + ALTinterwordstretchfactor + name.format.string + name.latex.cmd + name.url.prefix} + + + + +%%%%%%%%%%%%%%%%%%%%%%%%% +%% LOW LEVEL FUNCTIONS %% +%%%%%%%%%%%%%%%%%%%%%%%%% + +FUNCTION {initialize.controls} +{ default.is.use.number.for.article 'is.use.number.for.article := + default.is.use.paper 'is.use.paper := + default.is.forced.et.al 'is.forced.et.al := + default.max.num.names.before.forced.et.al 'max.num.names.before.forced.et.al := + default.num.names.shown.with.forced.et.al 'num.names.shown.with.forced.et.al := + default.is.use.alt.interword.spacing 'is.use.alt.interword.spacing := + default.is.dash.repeated.names 'is.dash.repeated.names := + default.ALTinterwordstretchfactor 'ALTinterwordstretchfactor := + default.name.format.string 'name.format.string := + default.name.latex.cmd 'name.latex.cmd := + default.name.url.prefix 'name.url.prefix := +} + + +% This IEEEtran.bst features a very powerful and flexible mechanism for +% controlling the capitalization, punctuation, spacing, quotation, and +% newlines of the formatted entry fields. (Note: IEEEtran.bst does not need +% or use the newline/newblock feature, but it has been implemented for +% possible future use.) The output states of IEEEtran.bst consist of +% multiple independent attributes and, as such, can be thought of as being +% vectors, rather than the simple scalar values ("before.all", +% "mid.sentence", etc.) used in most other .bst files. +% +% The more flexible and complex design used here was motivated in part by +% IEEE's rather unusual bibliography style. For example, IEEE ends the +% previous field item with a period and large space prior to the publisher +% address; the @electronic entry types use periods as inter-item punctuation +% rather than the commas used by the other entry types; and URLs are never +% followed by periods even though they are the last item in the entry. +% Although it is possible to accommodate these features with the conventional +% output state system, the seemingly endless exceptions make for convoluted, +% unreliable and difficult to maintain code. +% +% IEEEtran.bst's output state system can be easily understood via a simple +% illustration of two most recently formatted entry fields (on the stack): +% +% CURRENT_ITEM +% "PREVIOUS_ITEM +% +% which, in this example, is to eventually appear in the bibliography as: +% +% "PREVIOUS_ITEM," CURRENT_ITEM +% +% It is the job of the output routine to take the previous item off of the +% stack (while leaving the current item at the top of the stack), apply its +% trailing punctuation (including closing quote marks) and spacing, and then +% to write the result to BibTeX's output buffer: +% +% "PREVIOUS_ITEM," +% +% Punctuation (and spacing) between items is often determined by both of the +% items rather than just the first one. The presence of quotation marks +% further complicates the situation because, in standard English, trailing +% punctuation marks are supposed to be contained within the quotes. +% +% IEEEtran.bst maintains two output state (aka "status") vectors which +% correspond to the previous and current (aka "this") items. Each vector +% consists of several independent attributes which track punctuation, +% spacing, quotation, and newlines. Capitalization status is handled by a +% separate scalar because the format routines, not the output routine, +% handle capitalization and, therefore, there is no need to maintain the +% capitalization attribute for both the "previous" and "this" items. +% +% When a format routine adds a new item, it copies the current output status +% vector to the previous output status vector and (usually) resets the +% current (this) output status vector to a "standard status" vector. Using a +% "standard status" vector in this way allows us to redefine what we mean by +% "standard status" at the start of each entry handler and reuse the same +% format routines under the various inter-item separation schemes. For +% example, the standard status vector for the @book entry type may use +% commas for item separators, while the @electronic type may use periods, +% yet both entry handlers exploit many of the exact same format routines. +% +% Because format routines have write access to the output status vector of +% the previous item, they can override the punctuation choices of the +% previous format routine! Therefore, it becomes trivial to implement rules +% such as "Always use a period and a large space before the publisher." By +% pushing the generation of the closing quote mark to the output routine, we +% avoid all the problems caused by having to close a quote before having all +% the information required to determine what the punctuation should be. +% +% The IEEEtran.bst output state system can easily be expanded if needed. +% For instance, it is easy to add a "space.tie" attribute value if the +% bibliography rules mandate that two items have to be joined with an +% unbreakable space. + +FUNCTION {initialize.status.constants} +{ #0 'punct.no := + #1 'punct.comma := + #2 'punct.period := + #0 'space.no := + #1 'space.normal := + #2 'space.large := + #0 'quote.no := + #1 'quote.close := + #0 'cap.no := + #1 'cap.yes := + #0 'nline.no := + #1 'nline.newblock := +} + +FUNCTION {std.status.using.comma} +{ punct.comma 'punct.std := + space.normal 'space.std := + quote.no 'quote.std := + nline.no 'nline.std := + cap.no 'cap.std := +} + +FUNCTION {std.status.using.period} +{ punct.period 'punct.std := + space.normal 'space.std := + quote.no 'quote.std := + nline.no 'nline.std := + cap.yes 'cap.std := +} + +FUNCTION {initialize.prev.this.status} +{ punct.no 'prev.status.punct := + space.no 'prev.status.space := + quote.no 'prev.status.quote := + nline.no 'prev.status.nline := + punct.no 'this.status.punct := + space.no 'this.status.space := + quote.no 'this.status.quote := + nline.no 'this.status.nline := + cap.yes 'status.cap := +} + +FUNCTION {this.status.std} +{ punct.std 'this.status.punct := + space.std 'this.status.space := + quote.std 'this.status.quote := + nline.std 'this.status.nline := +} + +FUNCTION {cap.status.std}{ cap.std 'status.cap := } + +FUNCTION {this.to.prev.status} +{ this.status.punct 'prev.status.punct := + this.status.space 'prev.status.space := + this.status.quote 'prev.status.quote := + this.status.nline 'prev.status.nline := +} + + +FUNCTION {not} +{ { #0 } + { #1 } + if$ +} + +FUNCTION {and} +{ { skip$ } + { pop$ #0 } + if$ +} + +FUNCTION {or} +{ { pop$ #1 } + { skip$ } + if$ +} + + +% convert the strings "yes" or "no" to #1 or #0 respectively +FUNCTION {yes.no.to.int} +{ "l" change.case$ duplicate$ + "yes" = + { pop$ #1 } + { duplicate$ "no" = + { pop$ #0 } + { "unknown boolean " quote$ * swap$ * quote$ * + " in " * cite$ * warning$ + #0 + } + if$ + } + if$ +} + + +% pushes true if the single char string on the stack is in the +% range of "0" to "9" +FUNCTION {is.num} +{ chr.to.int$ + duplicate$ "0" chr.to.int$ < not + swap$ "9" chr.to.int$ > not and +} + +% multiplies the integer on the stack by a factor of 10 +FUNCTION {bump.int.mag} +{ #0 'multiresult := + { duplicate$ #0 > } + { #1 - + multiresult #10 + + 'multiresult := + } + while$ +pop$ +multiresult +} + +% converts a single character string on the stack to an integer +FUNCTION {char.to.integer} +{ duplicate$ + is.num + { chr.to.int$ "0" chr.to.int$ - } + {"noninteger character " quote$ * swap$ * quote$ * + " in integer field of " * cite$ * warning$ + #0 + } + if$ +} + +% converts a string on the stack to an integer +FUNCTION {string.to.integer} +{ duplicate$ text.length$ 'namesleft := + #1 'nameptr := + #0 'numnames := + { nameptr namesleft > not } + { duplicate$ nameptr #1 substring$ + char.to.integer numnames bump.int.mag + + 'numnames := + nameptr #1 + + 'nameptr := + } + while$ +pop$ +numnames +} + + + + +% The output routines write out the *next* to the top (previous) item on the +% stack, adding punctuation and such as needed. Since IEEEtran.bst maintains +% the output status for the top two items on the stack, these output +% routines have to consider the previous output status (which corresponds to +% the item that is being output). Full independent control of punctuation, +% closing quote marks, spacing, and newblock is provided. +% +% "output.nonnull" does not check for the presence of a previous empty +% item. +% +% "output" does check for the presence of a previous empty item and will +% remove an empty item rather than outputing it. +% +% "output.warn" is like "output", but will issue a warning if it detects +% an empty item. + +FUNCTION {output.nonnull} +{ swap$ + prev.status.punct punct.comma = + { "," * } + { skip$ } + if$ + prev.status.punct punct.period = + { add.period$ } + { skip$ } + if$ + prev.status.quote quote.close = + { "''" * } + { skip$ } + if$ + prev.status.space space.normal = + { " " * } + { skip$ } + if$ + prev.status.space space.large = + { large.space * } + { skip$ } + if$ + write$ + prev.status.nline nline.newblock = + { newline$ "\newblock " write$ } + { skip$ } + if$ +} + +FUNCTION {output} +{ duplicate$ empty$ + 'pop$ + 'output.nonnull + if$ +} + +FUNCTION {output.warn} +{ 't := + duplicate$ empty$ + { pop$ "empty " t * " in " * cite$ * warning$ } + 'output.nonnull + if$ +} + +% "fin.entry" is the output routine that handles the last item of the entry +% (which will be on the top of the stack when "fin.entry" is called). + +FUNCTION {fin.entry} +{ this.status.punct punct.no = + { skip$ } + { add.period$ } + if$ + this.status.quote quote.close = + { "''" * } + { skip$ } + if$ +write$ +newline$ +} + + +FUNCTION {is.last.char.not.punct} +{ duplicate$ + "}" * add.period$ + #-1 #1 substring$ "." = +} + +FUNCTION {is.multiple.pages} +{ 't := + #0 'multiresult := + { multiresult not + t empty$ not + and + } + { t #1 #1 substring$ + duplicate$ "-" = + swap$ duplicate$ "," = + swap$ "+" = + or or + { #1 'multiresult := } + { t #2 global.max$ substring$ 't := } + if$ + } + while$ + multiresult +} + +FUNCTION {capitalize}{ "u" change.case$ "t" change.case$ } + +FUNCTION {emphasize} +{ duplicate$ empty$ + { pop$ "" } + { "\emph{" swap$ * "}" * } + if$ +} + +FUNCTION {do.name.latex.cmd} +{ name.latex.cmd + empty$ + { skip$ } + { name.latex.cmd "{" * swap$ * "}" * } + if$ +} + +% IEEEtran.bst uses its own \BIBforeignlanguage command which directly +% invokes the TeX hyphenation patterns without the need of the Babel +% package. Babel does a lot more than switch hyphenation patterns and +% its loading can cause unintended effects in many class files (such as +% IEEEtran.cls). +FUNCTION {select.language} +{ duplicate$ empty$ 'pop$ + { language empty$ 'skip$ + { "\BIBforeignlanguage{" language * "}{" * swap$ * "}" * } + if$ + } + if$ +} + +FUNCTION {tie.or.space.prefix} +{ duplicate$ text.length$ #3 < + { "~" } + { " " } + if$ + swap$ +} + +FUNCTION {get.bbl.editor} +{ editor num.names$ #1 > 'bbl.editors 'bbl.editor if$ } + +FUNCTION {space.word}{ " " swap$ * " " * } + + +% Field Conditioners, Converters, Checkers and External Interfaces + +FUNCTION {empty.field.to.null.string} +{ duplicate$ empty$ + { pop$ "" } + { skip$ } + if$ +} + +FUNCTION {either.or.check} +{ empty$ + { pop$ } + { "can't use both " swap$ * " fields in " * cite$ * warning$ } + if$ +} + +FUNCTION {empty.entry.warn} +{ author empty$ title empty$ howpublished empty$ + month empty$ year empty$ note empty$ url empty$ + and and and and and and + { "all relevant fields are empty in " cite$ * warning$ } + 'skip$ + if$ +} + + +% The bibinfo system provides a way for the electronic parsing/acquisition +% of a bibliography's contents as is done by ReVTeX. For example, a field +% could be entered into the bibliography as: +% \bibinfo{volume}{2} +% Only the "2" would show up in the document, but the LaTeX \bibinfo command +% could do additional things with the information. IEEEtran.bst does provide +% a \bibinfo command via "\providecommand{\bibinfo}[2]{#2}". However, it is +% currently not used as the bogus bibinfo functions defined here output the +% entry values directly without the \bibinfo wrapper. The bibinfo functions +% themselves (and the calls to them) are retained for possible future use. +% +% bibinfo.check avoids acting on missing fields while bibinfo.warn will +% issue a warning message if a missing field is detected. Prior to calling +% the bibinfo functions, the user should push the field value and then its +% name string, in that order. + +FUNCTION {bibinfo.check} +{ swap$ duplicate$ missing$ + { pop$ pop$ "" } + { duplicate$ empty$ + { swap$ pop$ } + { swap$ pop$ } + if$ + } + if$ +} + +FUNCTION {bibinfo.warn} +{ swap$ duplicate$ missing$ + { swap$ "missing " swap$ * " in " * cite$ * warning$ pop$ "" } + { duplicate$ empty$ + { swap$ "empty " swap$ * " in " * cite$ * warning$ } + { swap$ pop$ } + if$ + } + if$ +} + + +% IEEE separates large numbers with more than 4 digits into groups of +% three. IEEE uses a small space to separate these number groups. +% Typical applications include patent and page numbers. + +% number of consecutive digits required to trigger the group separation. +FUNCTION {large.number.trigger}{ #5 } + +% For numbers longer than the trigger, this is the blocksize of the groups. +% The blocksize must be less than the trigger threshold, and 2 * blocksize +% must be greater than the trigger threshold (can't do more than one +% separation on the initial trigger). +FUNCTION {large.number.blocksize}{ #3 } + +% What is actually inserted between the number groups. +FUNCTION {large.number.separator}{ "\," } + +% So as to save on integer variables by reusing existing ones, numnames +% holds the current number of consecutive digits read and nameptr holds +% the number that will trigger an inserted space. +FUNCTION {large.number.separate} +{ 't := + "" + #0 'numnames := + large.number.trigger 'nameptr := + { t empty$ not } + { t #-1 #1 substring$ is.num + { numnames #1 + 'numnames := } + { #0 'numnames := + large.number.trigger 'nameptr := + } + if$ + t #-1 #1 substring$ swap$ * + t #-2 global.max$ substring$ 't := + numnames nameptr = + { duplicate$ #1 nameptr large.number.blocksize - substring$ swap$ + nameptr large.number.blocksize - #1 + global.max$ substring$ + large.number.separator swap$ * * + nameptr large.number.blocksize - 'numnames := + large.number.blocksize #1 + 'nameptr := + } + { skip$ } + if$ + } + while$ +} + +% Converts all single dashes "-" to double dashes "--". +FUNCTION {n.dashify} +{ large.number.separate + 't := + "" + { t empty$ not } + { t #1 #1 substring$ "-" = + { t #1 #2 substring$ "--" = not + { "--" * + t #2 global.max$ substring$ 't := + } + { { t #1 #1 substring$ "-" = } + { "-" * + t #2 global.max$ substring$ 't := + } + while$ + } + if$ + } + { t #1 #1 substring$ * + t #2 global.max$ substring$ 't := + } + if$ + } + while$ +} + + +% This function detects entries with names that are identical to that of +% the previous entry and replaces the repeated names with dashes (if the +% "is.dash.repeated.names" user control is nonzero). +FUNCTION {name.or.dash} +{ 's := + oldname empty$ + { s 'oldname := s } + { s oldname = + { is.dash.repeated.names + { repeated.name.dashes } + { s 'oldname := s } + if$ + } + { s 'oldname := s } + if$ + } + if$ +} + +% Converts the number string on the top of the stack to +% "numerical ordinal form" (e.g., "7" to "7th"). There is +% no artificial limit to the upper bound of the numbers as the +% least significant digit always determines the ordinal form. +FUNCTION {num.to.ordinal} +{ duplicate$ #-1 #1 substring$ "1" = + { bbl.st * } + { duplicate$ #-1 #1 substring$ "2" = + { bbl.nd * } + { duplicate$ #-1 #1 substring$ "3" = + { bbl.rd * } + { bbl.th * } + if$ + } + if$ + } + if$ +} + +% If the string on the top of the stack begins with a number, +% (e.g., 11th) then replace the string with the leading number +% it contains. Otherwise retain the string as-is. s holds the +% extracted number, t holds the part of the string that remains +% to be scanned. +FUNCTION {extract.num} +{ duplicate$ 't := + "" 's := + { t empty$ not } + { t #1 #1 substring$ + t #2 global.max$ substring$ 't := + duplicate$ is.num + { s swap$ * 's := } + { pop$ "" 't := } + if$ + } + while$ + s empty$ + 'skip$ + { pop$ s } + if$ +} + +% Converts the word number string on the top of the stack to +% Arabic string form. Will be successful up to "tenth". +FUNCTION {word.to.num} +{ duplicate$ "l" change.case$ 's := + s "first" = + { pop$ "1" } + { skip$ } + if$ + s "second" = + { pop$ "2" } + { skip$ } + if$ + s "third" = + { pop$ "3" } + { skip$ } + if$ + s "fourth" = + { pop$ "4" } + { skip$ } + if$ + s "fifth" = + { pop$ "5" } + { skip$ } + if$ + s "sixth" = + { pop$ "6" } + { skip$ } + if$ + s "seventh" = + { pop$ "7" } + { skip$ } + if$ + s "eighth" = + { pop$ "8" } + { skip$ } + if$ + s "ninth" = + { pop$ "9" } + { skip$ } + if$ + s "tenth" = + { pop$ "10" } + { skip$ } + if$ +} + + +% Converts the string on the top of the stack to numerical +% ordinal (e.g., "11th") form. +FUNCTION {convert.edition} +{ duplicate$ empty$ 'skip$ + { duplicate$ #1 #1 substring$ is.num + { extract.num + num.to.ordinal + } + { word.to.num + duplicate$ #1 #1 substring$ is.num + { num.to.ordinal } + { "edition ordinal word " quote$ * edition * quote$ * + " may be too high (or improper) for conversion" * " in " * cite$ * warning$ + } + if$ + } + if$ + } + if$ +} + + + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% LATEX BIBLIOGRAPHY CODE %% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +FUNCTION {start.entry} +{ newline$ + "\bibitem{" write$ + cite$ write$ + "}" write$ + newline$ + "" + initialize.prev.this.status +} + +% Here we write out all the LaTeX code that we will need. The most involved +% code sequences are those that control the alternate interword spacing and +% foreign language hyphenation patterns. The heavy use of \providecommand +% gives users a way to override the defaults. Special thanks to Javier Bezos, +% Johannes Braams, Robin Fairbairns, Heiko Oberdiek, Donald Arseneau and all +% the other gurus on comp.text.tex for their help and advice on the topic of +% \selectlanguage, Babel and BibTeX. +FUNCTION {begin.bib} +{ "% Generated by IEEEtran.bst, version: " bst.file.version * " (" * bst.file.date * ")" * + write$ newline$ + preamble$ empty$ 'skip$ + { preamble$ write$ newline$ } + if$ + "\begin{thebibliography}{" longest.label * "}" * + write$ newline$ + "\providecommand{\url}[1]{#1}" + write$ newline$ + "\csname url@samestyle\endcsname" + write$ newline$ + "\providecommand{\newblock}{\relax}" + write$ newline$ + "\providecommand{\bibinfo}[2]{#2}" + write$ newline$ + "\providecommand{\BIBentrySTDinterwordspacing}{\spaceskip=0pt\relax}" + write$ newline$ + "\providecommand{\BIBentryALTinterwordstretchfactor}{" + ALTinterwordstretchfactor * "}" * + write$ newline$ + "\providecommand{\BIBentryALTinterwordspacing}{\spaceskip=\fontdimen2\font plus " + write$ newline$ + "\BIBentryALTinterwordstretchfactor\fontdimen3\font minus \fontdimen4\font\relax}" + write$ newline$ + "\providecommand{\BIBforeignlanguage}[2]{{%" + write$ newline$ + "\expandafter\ifx\csname l@#1\endcsname\relax" + write$ newline$ + "\typeout{** WARNING: IEEEtran.bst: No hyphenation pattern has been}%" + write$ newline$ + "\typeout{** loaded for the language `#1'. Using the pattern for}%" + write$ newline$ + "\typeout{** the default language instead.}%" + write$ newline$ + "\else" + write$ newline$ + "\language=\csname l@#1\endcsname" + write$ newline$ + "\fi" + write$ newline$ + "#2}}" + write$ newline$ + "\providecommand{\BIBdecl}{\relax}" + write$ newline$ + "\BIBdecl" + write$ newline$ +} + +FUNCTION {end.bib} +{ newline$ "\end{thebibliography}" write$ newline$ } + +FUNCTION {if.url.alt.interword.spacing} +{ is.use.alt.interword.spacing + {url empty$ 'skip$ {"\BIBentryALTinterwordspacing" write$ newline$} if$} + { skip$ } + if$ +} + +FUNCTION {if.url.std.interword.spacing} +{ is.use.alt.interword.spacing + {url empty$ 'skip$ {"\BIBentrySTDinterwordspacing" write$ newline$} if$} + { skip$ } + if$ +} + + + + +%%%%%%%%%%%%%%%%%%%%%%%% +%% LONGEST LABEL PASS %% +%%%%%%%%%%%%%%%%%%%%%%%% + +FUNCTION {initialize.longest.label} +{ "" 'longest.label := + #1 'number.label := + #0 'longest.label.width := +} + +FUNCTION {longest.label.pass} +{ type$ "ieeetranbstctl" = + { skip$ } + { number.label int.to.str$ 'label := + number.label #1 + 'number.label := + label width$ longest.label.width > + { label 'longest.label := + label width$ 'longest.label.width := + } + { skip$ } + if$ + } + if$ +} + + + + +%%%%%%%%%%%%%%%%%%%%% +%% FORMAT HANDLERS %% +%%%%%%%%%%%%%%%%%%%%% + +%% Lower Level Formats (used by higher level formats) + +FUNCTION {format.address.org.or.pub.date} +{ 't := + "" + year empty$ + { "empty year in " cite$ * warning$ } + { skip$ } + if$ + address empty$ t empty$ and + year empty$ and month empty$ and + { skip$ } + { this.to.prev.status + this.status.std + cap.status.std + address "address" bibinfo.check * + t empty$ + { skip$ } + { punct.period 'prev.status.punct := + space.large 'prev.status.space := + address empty$ + { skip$ } + { ": " * } + if$ + t * + } + if$ + year empty$ month empty$ and + { skip$ } + { t empty$ address empty$ and + { skip$ } + { ", " * } + if$ + month empty$ + { year empty$ + { skip$ } + { year "year" bibinfo.check * } + if$ + } + { month "month" bibinfo.check * + year empty$ + { skip$ } + { " " * year "year" bibinfo.check * } + if$ + } + if$ + } + if$ + } + if$ +} + + +FUNCTION {format.names} +{ 'bibinfo := + duplicate$ empty$ 'skip$ { + this.to.prev.status + this.status.std + 's := + "" 't := + #1 'nameptr := + s num.names$ 'numnames := + numnames 'namesleft := + { namesleft #0 > } + { s nameptr + name.format.string + format.name$ + bibinfo bibinfo.check + 't := + nameptr #1 > + { nameptr num.names.shown.with.forced.et.al #1 + = + numnames max.num.names.before.forced.et.al > + is.forced.et.al and and + { "others" 't := + #1 'namesleft := + } + { skip$ } + if$ + namesleft #1 > + { ", " * t do.name.latex.cmd * } + { s nameptr "{ll}" format.name$ duplicate$ "others" = + { 't := } + { pop$ } + if$ + t "others" = + { " " * bbl.etal emphasize * } + { numnames #2 > + { "," * } + { skip$ } + if$ + bbl.and + space.word * t do.name.latex.cmd * + } + if$ + } + if$ + } + { t do.name.latex.cmd } + if$ + nameptr #1 + 'nameptr := + namesleft #1 - 'namesleft := + } + while$ + cap.status.std + } if$ +} + + + + +%% Higher Level Formats + +%% addresses/locations + +FUNCTION {format.address} +{ address duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + } + if$ +} + + + +%% author/editor names + +FUNCTION {format.authors}{ author "author" format.names } + +FUNCTION {format.editors} +{ editor "editor" format.names duplicate$ empty$ 'skip$ + { ", " * + get.bbl.editor + capitalize + * + } + if$ +} + + + +%% date + +FUNCTION {format.date} +{ + month "month" bibinfo.check duplicate$ empty$ + year "year" bibinfo.check duplicate$ empty$ + { swap$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + "there's a month but no year in " cite$ * warning$ } + if$ + * + } + { this.to.prev.status + this.status.std + cap.status.std + swap$ 'skip$ + { + swap$ + " " * swap$ + } + if$ + * + } + if$ +} + +FUNCTION {format.date.electronic} +{ month "month" bibinfo.check duplicate$ empty$ + year "year" bibinfo.check duplicate$ empty$ + { swap$ + { pop$ } + { "there's a month but no year in " cite$ * warning$ + pop$ ")" * "(" swap$ * + this.to.prev.status + punct.no 'this.status.punct := + space.normal 'this.status.space := + quote.no 'this.status.quote := + cap.yes 'status.cap := + } + if$ + } + { swap$ + { swap$ pop$ ")" * "(" swap$ * } + { "(" swap$ * ", " * swap$ * ")" * } + if$ + this.to.prev.status + punct.no 'this.status.punct := + space.normal 'this.status.space := + quote.no 'this.status.quote := + cap.yes 'status.cap := + } + if$ +} + + + +%% edition/title + +% Note: IEEE considers the edition to be closely associated with +% the title of a book. So, in IEEEtran.bst the edition is normally handled +% within the formatting of the title. The format.edition function is +% retained here for possible future use. +FUNCTION {format.edition} +{ edition duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + convert.edition + status.cap + { "t" } + { "l" } + if$ change.case$ + "edition" bibinfo.check + "~" * bbl.edition * + cap.status.std + } + if$ +} + +% This is used to format the booktitle of a conference proceedings. +% Here we use the "intype" field to provide the user a way to +% override the word "in" (e.g., with things like "presented at") +% Use of intype stops the emphasis of the booktitle to indicate that +% we no longer mean the written conference proceedings, but the +% conference itself. +FUNCTION {format.in.booktitle} +{ booktitle "booktitle" bibinfo.check duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + select.language + intype missing$ + { emphasize + bbl.in " " * + } + { intype " " * } + if$ + swap$ * + cap.status.std + } + if$ +} + +% This is used to format the booktitle of collection. +% Here the "intype" field is not supported, but "edition" is. +FUNCTION {format.in.booktitle.edition} +{ booktitle "booktitle" bibinfo.check duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + select.language + emphasize + edition empty$ 'skip$ + { ", " * + edition + convert.edition + "l" change.case$ + * "~" * bbl.edition * + } + if$ + bbl.in " " * swap$ * + cap.status.std + } + if$ +} + +FUNCTION {format.article.title} +{ title duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + "t" change.case$ + } + if$ + "title" bibinfo.check + duplicate$ empty$ 'skip$ + { quote.close 'this.status.quote := + is.last.char.not.punct + { punct.std 'this.status.punct := } + { punct.no 'this.status.punct := } + if$ + select.language + "``" swap$ * + cap.status.std + } + if$ +} + +FUNCTION {format.article.title.electronic} +{ title duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + "t" change.case$ + } + if$ + "title" bibinfo.check + duplicate$ empty$ + { skip$ } + { select.language } + if$ +} + +FUNCTION {format.book.title.edition} +{ title "title" bibinfo.check + duplicate$ empty$ + { "empty title in " cite$ * warning$ } + { this.to.prev.status + this.status.std + select.language + emphasize + edition empty$ 'skip$ + { ", " * + edition + convert.edition + status.cap + { "t" } + { "l" } + if$ + change.case$ + * "~" * bbl.edition * + } + if$ + cap.status.std + } + if$ +} + +FUNCTION {format.book.title} +{ title "title" bibinfo.check + duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + select.language + emphasize + } + if$ +} + + + +%% journal + +FUNCTION {format.journal} +{ journal duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + select.language + emphasize + } + if$ +} + + + +%% how published + +FUNCTION {format.howpublished} +{ howpublished duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + } + if$ +} + + + +%% institutions/organization/publishers/school + +FUNCTION {format.institution} +{ institution duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + } + if$ +} + +FUNCTION {format.organization} +{ organization duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + } + if$ +} + +FUNCTION {format.address.publisher.date} +{ publisher "publisher" bibinfo.warn format.address.org.or.pub.date } + +FUNCTION {format.address.publisher.date.nowarn} +{ publisher "publisher" bibinfo.check format.address.org.or.pub.date } + +FUNCTION {format.address.organization.date} +{ organization "organization" bibinfo.check format.address.org.or.pub.date } + +FUNCTION {format.school} +{ school duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + cap.status.std + } + if$ +} + + + +%% volume/number/series/chapter/pages + +FUNCTION {format.volume} +{ volume empty.field.to.null.string + duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + bbl.volume + status.cap + { capitalize } + { skip$ } + if$ + swap$ tie.or.space.prefix + "volume" bibinfo.check + * * + cap.status.std + } + if$ +} + +FUNCTION {format.number} +{ number empty.field.to.null.string + duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + status.cap + { bbl.number capitalize } + { bbl.number } + if$ + swap$ tie.or.space.prefix + "number" bibinfo.check + * * + cap.status.std + } + if$ +} + +FUNCTION {format.number.if.use.for.article} +{ is.use.number.for.article + { format.number } + { "" } + if$ +} + +% IEEE does not seem to tie the series so closely with the volume +% and number as is done in other bibliography styles. Instead the +% series is treated somewhat like an extension of the title. +FUNCTION {format.series} +{ series empty$ + { "" } + { this.to.prev.status + this.status.std + bbl.series " " * + series "series" bibinfo.check * + cap.status.std + } + if$ +} + + +FUNCTION {format.chapter} +{ chapter empty$ + { "" } + { this.to.prev.status + this.status.std + type empty$ + { bbl.chapter } + { type "l" change.case$ + "type" bibinfo.check + } + if$ + chapter tie.or.space.prefix + "chapter" bibinfo.check + * * + cap.status.std + } + if$ +} + + +% The intended use of format.paper is for paper numbers of inproceedings. +% The paper type can be overridden via the type field. +% We allow the type to be displayed even if the paper number is absent +% for things like "postdeadline paper" +FUNCTION {format.paper} +{ is.use.paper + { paper empty$ + { type empty$ + { "" } + { this.to.prev.status + this.status.std + type "type" bibinfo.check + cap.status.std + } + if$ + } + { this.to.prev.status + this.status.std + type empty$ + { bbl.paper } + { type "type" bibinfo.check } + if$ + " " * paper + "paper" bibinfo.check + * + cap.status.std + } + if$ + } + { "" } + if$ +} + + +FUNCTION {format.pages} +{ pages duplicate$ empty$ 'skip$ + { this.to.prev.status + this.status.std + duplicate$ is.multiple.pages + { + bbl.pages swap$ + n.dashify + } + { + bbl.page swap$ + } + if$ + tie.or.space.prefix + "pages" bibinfo.check + * * + cap.status.std + } + if$ +} + + + +%% technical report number + +FUNCTION {format.tech.report.number} +{ number "number" bibinfo.check + this.to.prev.status + this.status.std + cap.status.std + type duplicate$ empty$ + { pop$ + bbl.techrep + } + { skip$ } + if$ + "type" bibinfo.check + swap$ duplicate$ empty$ + { pop$ } + { tie.or.space.prefix * * } + if$ +} + + + +%% note + +FUNCTION {format.note} +{ note empty$ + { "" } + { this.to.prev.status + this.status.std + punct.period 'this.status.punct := + note #1 #1 substring$ + duplicate$ "{" = + { skip$ } + { status.cap + { "u" } + { "l" } + if$ + change.case$ + } + if$ + note #2 global.max$ substring$ * "note" bibinfo.check + cap.yes 'status.cap := + } + if$ +} + + + +%% patent + +FUNCTION {format.patent.date} +{ this.to.prev.status + this.status.std + year empty$ + { monthfiled duplicate$ empty$ + { "monthfiled" bibinfo.check pop$ "" } + { "monthfiled" bibinfo.check } + if$ + dayfiled duplicate$ empty$ + { "dayfiled" bibinfo.check pop$ "" * } + { "dayfiled" bibinfo.check + monthfiled empty$ + { "dayfiled without a monthfiled in " cite$ * warning$ + * + } + { " " swap$ * * } + if$ + } + if$ + yearfiled empty$ + { "no year or yearfiled in " cite$ * warning$ } + { yearfiled "yearfiled" bibinfo.check + swap$ + duplicate$ empty$ + { pop$ } + { ", " * swap$ * } + if$ + } + if$ + } + { month duplicate$ empty$ + { "month" bibinfo.check pop$ "" } + { "month" bibinfo.check } + if$ + day duplicate$ empty$ + { "day" bibinfo.check pop$ "" * } + { "day" bibinfo.check + month empty$ + { "day without a month in " cite$ * warning$ + * + } + { " " swap$ * * } + if$ + } + if$ + year "year" bibinfo.check + swap$ + duplicate$ empty$ + { pop$ } + { ", " * swap$ * } + if$ + } + if$ + cap.status.std +} + +FUNCTION {format.patent.nationality.type.number} +{ this.to.prev.status + this.status.std + nationality duplicate$ empty$ + { "nationality" bibinfo.warn pop$ "" } + { "nationality" bibinfo.check + duplicate$ "l" change.case$ "united states" = + { pop$ bbl.patentUS } + { skip$ } + if$ + " " * + } + if$ + type empty$ + { bbl.patent "type" bibinfo.check } + { type "type" bibinfo.check } + if$ + * + number duplicate$ empty$ + { "number" bibinfo.warn pop$ } + { "number" bibinfo.check + large.number.separate + swap$ " " * swap$ * + } + if$ + cap.status.std +} + + + +%% standard + +FUNCTION {format.organization.institution.standard.type.number} +{ this.to.prev.status + this.status.std + organization duplicate$ empty$ + { pop$ + institution duplicate$ empty$ + { "institution" bibinfo.warn } + { "institution" bibinfo.warn " " * } + if$ + } + { "organization" bibinfo.warn " " * } + if$ + type empty$ + { bbl.standard "type" bibinfo.check } + { type "type" bibinfo.check } + if$ + * + number duplicate$ empty$ + { "number" bibinfo.check pop$ } + { "number" bibinfo.check + large.number.separate + swap$ " " * swap$ * + } + if$ + cap.status.std +} + +FUNCTION {format.revision} +{ revision empty$ + { "" } + { this.to.prev.status + this.status.std + bbl.revision + revision tie.or.space.prefix + "revision" bibinfo.check + * * + cap.status.std + } + if$ +} + + +%% thesis + +FUNCTION {format.master.thesis.type} +{ this.to.prev.status + this.status.std + type empty$ + { + bbl.mthesis + } + { + type "type" bibinfo.check + } + if$ +cap.status.std +} + +FUNCTION {format.phd.thesis.type} +{ this.to.prev.status + this.status.std + type empty$ + { + bbl.phdthesis + } + { + type "type" bibinfo.check + } + if$ +cap.status.std +} + + + +%% URL + +FUNCTION {format.url} +{ url empty$ + { "" } + { this.to.prev.status + this.status.std + cap.yes 'status.cap := + name.url.prefix " " * + "\url{" * url * "}" * + punct.no 'this.status.punct := + punct.period 'prev.status.punct := + space.normal 'this.status.space := + space.normal 'prev.status.space := + quote.no 'this.status.quote := + } + if$ +} + + + + +%%%%%%%%%%%%%%%%%%%% +%% ENTRY HANDLERS %% +%%%%%%%%%%%%%%%%%%%% + + +% Note: In many journals, IEEE (or the authors) tend not to show the number +% for articles, so the display of the number is controlled here by the +% switch "is.use.number.for.article" +FUNCTION {article} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors "author" output.warn + name.or.dash + format.article.title "title" output.warn + format.journal "journal" bibinfo.check "journal" output.warn + format.volume output + format.number.if.use.for.article output + format.pages output + format.date "year" output.warn + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {book} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + author empty$ + { format.editors "author and editor" output.warn } + { format.authors output.nonnull } + if$ + name.or.dash + format.book.title.edition output + format.series output + author empty$ + { skip$ } + { format.editors output } + if$ + format.address.publisher.date output + format.volume output + format.number output + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {booklet} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors output + name.or.dash + format.article.title "title" output.warn + format.howpublished "howpublished" bibinfo.check output + format.organization "organization" bibinfo.check output + format.address "address" bibinfo.check output + format.date output + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {electronic} +{ std.status.using.period + start.entry + if.url.alt.interword.spacing + format.authors output + name.or.dash + format.date.electronic output + format.article.title.electronic output + format.howpublished "howpublished" bibinfo.check output + format.organization "organization" bibinfo.check output + format.address "address" bibinfo.check output + format.note output + format.url output + fin.entry + empty.entry.warn + if.url.std.interword.spacing +} + +FUNCTION {inbook} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + author empty$ + { format.editors "author and editor" output.warn } + { format.authors output.nonnull } + if$ + name.or.dash + format.book.title.edition output + format.series output + format.address.publisher.date output + format.volume output + format.number output + format.chapter output + format.pages output + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {incollection} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors "author" output.warn + name.or.dash + format.article.title "title" output.warn + format.in.booktitle.edition "booktitle" output.warn + format.series output + format.editors output + format.address.publisher.date.nowarn output + format.volume output + format.number output + format.chapter output + format.pages output + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {inproceedings} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors "author" output.warn + name.or.dash + format.article.title "title" output.warn + format.in.booktitle "booktitle" output.warn + format.series output + format.editors output + format.volume output + format.number output + publisher empty$ + { format.address.organization.date output } + { format.organization "organization" bibinfo.check output + format.address.publisher.date output + } + if$ + format.paper output + format.pages output + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {manual} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors output + name.or.dash + format.book.title.edition "title" output.warn + format.howpublished "howpublished" bibinfo.check output + format.organization "organization" bibinfo.check output + format.address "address" bibinfo.check output + format.date output + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {mastersthesis} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors "author" output.warn + name.or.dash + format.article.title "title" output.warn + format.master.thesis.type output.nonnull + format.school "school" bibinfo.warn output + format.address "address" bibinfo.check output + format.date "year" output.warn + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {misc} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors output + name.or.dash + format.article.title output + format.howpublished "howpublished" bibinfo.check output + format.organization "organization" bibinfo.check output + format.address "address" bibinfo.check output + format.pages output + format.date output + format.note output + format.url output + fin.entry + empty.entry.warn + if.url.std.interword.spacing +} + +FUNCTION {patent} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors output + name.or.dash + format.article.title output + format.patent.nationality.type.number output + format.patent.date output + format.note output + format.url output + fin.entry + empty.entry.warn + if.url.std.interword.spacing +} + +FUNCTION {periodical} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.editors output + name.or.dash + format.book.title "title" output.warn + format.series output + format.volume output + format.number output + format.organization "organization" bibinfo.check output + format.date "year" output.warn + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {phdthesis} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors "author" output.warn + name.or.dash + format.article.title "title" output.warn + format.phd.thesis.type output.nonnull + format.school "school" bibinfo.warn output + format.address "address" bibinfo.check output + format.date "year" output.warn + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {proceedings} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.editors output + name.or.dash + format.book.title "title" output.warn + format.series output + format.volume output + format.number output + publisher empty$ + { format.address.organization.date output } + { format.organization "organization" bibinfo.check output + format.address.publisher.date output + } + if$ + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {standard} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors output + name.or.dash + format.book.title "title" output.warn + format.howpublished "howpublished" bibinfo.check output + format.organization.institution.standard.type.number output + format.revision output + format.date output + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {techreport} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors "author" output.warn + name.or.dash + format.article.title "title" output.warn + format.howpublished "howpublished" bibinfo.check output + format.institution "institution" bibinfo.warn output + format.address "address" bibinfo.check output + format.tech.report.number output.nonnull + format.date "year" output.warn + format.note output + format.url output + fin.entry + if.url.std.interword.spacing +} + +FUNCTION {unpublished} +{ std.status.using.comma + start.entry + if.url.alt.interword.spacing + format.authors "author" output.warn + name.or.dash + format.article.title "title" output.warn + format.date output + format.note "note" output.warn + format.url output + fin.entry + if.url.std.interword.spacing +} + + +% The special entry type which provides the user interface to the +% BST controls +FUNCTION {IEEEtranBSTCTL} +{ is.print.banners.to.terminal + { "** IEEEtran BST control entry " quote$ * cite$ * quote$ * " detected." * + top$ + } + { skip$ } + if$ + CTLuse_article_number + empty$ + { skip$ } + { CTLuse_article_number + yes.no.to.int + 'is.use.number.for.article := + } + if$ + CTLuse_paper + empty$ + { skip$ } + { CTLuse_paper + yes.no.to.int + 'is.use.paper := + } + if$ + CTLuse_forced_etal + empty$ + { skip$ } + { CTLuse_forced_etal + yes.no.to.int + 'is.forced.et.al := + } + if$ + CTLmax_names_forced_etal + empty$ + { skip$ } + { CTLmax_names_forced_etal + string.to.integer + 'max.num.names.before.forced.et.al := + } + if$ + CTLnames_show_etal + empty$ + { skip$ } + { CTLnames_show_etal + string.to.integer + 'num.names.shown.with.forced.et.al := + } + if$ + CTLuse_alt_spacing + empty$ + { skip$ } + { CTLuse_alt_spacing + yes.no.to.int + 'is.use.alt.interword.spacing := + } + if$ + CTLalt_stretch_factor + empty$ + { skip$ } + { CTLalt_stretch_factor + 'ALTinterwordstretchfactor := + "\renewcommand{\BIBentryALTinterwordstretchfactor}{" + ALTinterwordstretchfactor * "}" * + write$ newline$ + } + if$ + CTLdash_repeated_names + empty$ + { skip$ } + { CTLdash_repeated_names + yes.no.to.int + 'is.dash.repeated.names := + } + if$ + CTLname_format_string + empty$ + { skip$ } + { CTLname_format_string + 'name.format.string := + } + if$ + CTLname_latex_cmd + empty$ + { skip$ } + { CTLname_latex_cmd + 'name.latex.cmd := + } + if$ + CTLname_url_prefix + missing$ + { skip$ } + { CTLname_url_prefix + 'name.url.prefix := + } + if$ + + + num.names.shown.with.forced.et.al max.num.names.before.forced.et.al > + { "CTLnames_show_etal cannot be greater than CTLmax_names_forced_etal in " cite$ * warning$ + max.num.names.before.forced.et.al 'num.names.shown.with.forced.et.al := + } + { skip$ } + if$ +} + + +%%%%%%%%%%%%%%%%%%% +%% ENTRY ALIASES %% +%%%%%%%%%%%%%%%%%%% +FUNCTION {conference}{inproceedings} +FUNCTION {online}{electronic} +FUNCTION {internet}{electronic} +FUNCTION {webpage}{electronic} +FUNCTION {www}{electronic} +FUNCTION {default.type}{misc} + + + +%%%%%%%%%%%%%%%%%% +%% MAIN PROGRAM %% +%%%%%%%%%%%%%%%%%% + +READ + +EXECUTE {initialize.controls} +EXECUTE {initialize.status.constants} +EXECUTE {banner.message} + +EXECUTE {initialize.longest.label} +ITERATE {longest.label.pass} + +EXECUTE {begin.bib} +ITERATE {call.type$} +EXECUTE {end.bib} + +EXECUTE{completed.message} + + +%% That's all folks, mds. diff --git a/spec/consensus/consensus-paper/IEEEtran.cls b/spec/consensus/consensus-paper/IEEEtran.cls new file mode 100644 index 0000000000..9c967d555f --- /dev/null +++ b/spec/consensus/consensus-paper/IEEEtran.cls @@ -0,0 +1,4733 @@ +%% +%% IEEEtran.cls 2011/11/03 version V1.8 based on +%% IEEEtran.cls 2007/03/05 version V1.7a +%% The changes in V1.8 are made with a single goal in mind: +%% to change the look of the output using the [conference] option +%% and the default font size (10pt) to match the Word template more closely. +%% These changes may well have undesired side effects when other options +%% are in force! +%% +%% +%% This is the official IEEE LaTeX class for authors of the Institute of +%% Electrical and Electronics Engineers (IEEE) Transactions journals and +%% conferences. +%% +%% Support sites: +%% http://www.michaelshell.org/tex/ieeetran/ +%% http://www.ctan.org/tex-archive/macros/latex/contrib/IEEEtran/ +%% and +%% http://www.ieee.org/ +%% +%% Based on the original 1993 IEEEtran.cls, but with many bug fixes +%% and enhancements (from both JVH and MDS) over the 1996/7 version. +%% +%% +%% Contributors: +%% Gerry Murray (1993), Silvano Balemi (1993), +%% Jon Dixon (1996), Peter N"uchter (1996), +%% Juergen von Hagen (2000), and Michael Shell (2001-2007) +%% +%% +%% Copyright (c) 1993-2000 by Gerry Murray, Silvano Balemi, +%% Jon Dixon, Peter N"uchter, +%% Juergen von Hagen +%% and +%% Copyright (c) 2001-2007 by Michael Shell +%% +%% Current maintainer (V1.3 to V1.7): Michael Shell +%% See: +%% http://www.michaelshell.org/ +%% for current contact information. +%% +%% Special thanks to Peter Wilson (CUA) and Donald Arseneau +%% for allowing the inclusion of the \@ifmtarg command +%% from their ifmtarg LaTeX package. +%% +%%************************************************************************* +%% Legal Notice: +%% This code is offered as-is without any warranty either expressed or +%% implied; without even the implied warranty of MERCHANTABILITY or +%% FITNESS FOR A PARTICULAR PURPOSE! +%% User assumes all risk. +%% In no event shall IEEE or any contributor to this code be liable for +%% any damages or losses, including, but not limited to, incidental, +%% consequential, or any other damages, resulting from the use or misuse +%% of any information contained here. +%% +%% All comments are the opinions of their respective authors and are not +%% necessarily endorsed by the IEEE. +%% +%% This work is distributed under the LaTeX Project Public License (LPPL) +%% ( http://www.latex-project.org/ ) version 1.3, and may be freely used, +%% distributed and modified. A copy of the LPPL, version 1.3, is included +%% in the base LaTeX documentation of all distributions of LaTeX released +%% 2003/12/01 or later. +%% Retain all contribution notices and credits. +%% ** Modified files should be clearly indicated as such, including ** +%% ** renaming them and changing author support contact information. ** +%% +%% File list of work: IEEEtran.cls, IEEEtran_HOWTO.pdf, bare_adv.tex, +%% bare_conf.tex, bare_jrnl.tex, bare_jrnl_compsoc.tex +%% +%% Major changes to the user interface should be indicated by an +%% increase in the version numbers. If a version is a beta, it will +%% be indicated with a BETA suffix, i.e., 1.4 BETA. +%% Small changes can be indicated by appending letters to the version +%% such as "IEEEtran_v14a.cls". +%% In all cases, \Providesclass, any \typeout messages to the user, +%% \IEEEtransversionmajor and \IEEEtransversionminor must reflect the +%% correct version information. +%% The changes should also be documented via source comments. +%%************************************************************************* +%% +% +% Available class options +% e.g., \documentclass[10pt,conference]{IEEEtran} +% +% *** choose only one from each category *** +% +% 9pt, 10pt, 11pt, 12pt +% Sets normal font size. The default is 10pt. +% +% conference, journal, technote, peerreview, peerreviewca +% determines format mode - conference papers, journal papers, +% correspondence papers (technotes), or peer review papers. The user +% should also select 9pt when using technote. peerreview is like +% journal mode, but provides for a single-column "cover" title page for +% anonymous peer review. The paper title (without the author names) is +% repeated at the top of the page after the cover page. For peer review +% papers, the \IEEEpeerreviewmaketitle command must be executed (will +% automatically be ignored for non-peerreview modes) at the place the +% cover page is to end, usually just after the abstract (keywords are +% not normally used with peer review papers). peerreviewca is like +% peerreview, but allows the author names to be entered and formatted +% as with conference mode so that author affiliation and contact +% information can be easily seen on the cover page. +% The default is journal. +% +% draft, draftcls, draftclsnofoot, final +% determines if paper is formatted as a widely spaced draft (for +% handwritten editor comments) or as a properly typeset final version. +% draftcls restricts draft mode to the class file while all other LaTeX +% packages (i.e., \usepackage{graphicx}) will behave as final - allows +% for a draft paper with visible figures, etc. draftclsnofoot is like +% draftcls, but does not display the date and the word "DRAFT" at the foot +% of the pages. If using one of the draft modes, the user will probably +% also want to select onecolumn. +% The default is final. +% +% letterpaper, a4paper +% determines paper size: 8.5in X 11in or 210mm X 297mm. CHANGING THE PAPER +% SIZE WILL NOT ALTER THE TYPESETTING OF THE DOCUMENT - ONLY THE MARGINS +% WILL BE AFFECTED. In particular, documents using the a4paper option will +% have reduced side margins (A4 is narrower than US letter) and a longer +% bottom margin (A4 is longer than US letter). For both cases, the top +% margins will be the same and the text will be horizontally centered. +% For final submission to IEEE, authors should use US letter (8.5 X 11in) +% paper. Note that authors should ensure that all post-processing +% (ps, pdf, etc.) uses the same paper specificiation as the .tex document. +% Problems here are by far the number one reason for incorrect margins. +% IEEEtran will automatically set the default paper size under pdflatex +% (without requiring a change to pdftex.cfg), so this issue is more +% important to dvips users. Fix config.ps, config.pdf, or ~/.dvipsrc for +% dvips, or use the dvips -t papersize option instead as needed. See the +% testflow documentation +% http://www.ctan.org/tex-archive/macros/latex/contrib/IEEEtran/testflow +% for more details on dvips paper size configuration. +% The default is letterpaper. +% +% oneside, twoside +% determines if layout follows single sided or two sided (duplex) +% printing. The only notable change is with the headings at the top of +% the pages. +% The default is oneside. +% +% onecolumn, twocolumn +% determines if text is organized into one or two columns per page. One +% column mode is usually used only with draft papers. +% The default is twocolumn. +% +% compsoc +% Use the format of the IEEE Computer Society. +% +% romanappendices +% Use the "Appendix I" convention when numbering appendices. IEEEtran.cls +% now defaults to Alpha "Appendix A" convention - the opposite of what +% v1.6b and earlier did. +% +% captionsoff +% disables the display of the figure/table captions. Some IEEE journals +% request that captions be removed and figures/tables be put on pages +% of their own at the end of an initial paper submission. The endfloat +% package can be used with this class option to achieve this format. +% +% nofonttune +% turns off tuning of the font interword spacing. Maybe useful to those +% not using the standard Times fonts or for those who have already "tuned" +% their fonts. +% The default is to enable IEEEtran to tune font parameters. +% +% +%---------- +% Available CLASSINPUTs provided (all are macros unless otherwise noted): +% \CLASSINPUTbaselinestretch +% \CLASSINPUTinnersidemargin +% \CLASSINPUToutersidemargin +% \CLASSINPUTtoptextmargin +% \CLASSINPUTbottomtextmargin +% +% Available CLASSINFOs provided: +% \ifCLASSINFOpdf (TeX if conditional) +% \CLASSINFOpaperwidth (macro) +% \CLASSINFOpaperheight (macro) +% \CLASSINFOnormalsizebaselineskip (length) +% \CLASSINFOnormalsizeunitybaselineskip (length) +% +% Available CLASSOPTIONs provided: +% all class option flags (TeX if conditionals) unless otherwise noted, +% e.g., \ifCLASSOPTIONcaptionsoff +% point size options provided as a single macro: +% \CLASSOPTIONpt +% which will be defined as 9, 10, 11, or 12 depending on the document's +% normalsize point size. +% also, class option peerreviewca implies the use of class option peerreview +% and classoption draft implies the use of class option draftcls + + + + + +\ProvidesClass{IEEEtran}[2012/11/21 V1.8c by Harald Hanche-Olsen and Anders Christensen] +\typeout{-- Based on V1.7a by Michael Shell} +\typeout{-- See the "IEEEtran_HOWTO" manual for usage information.} +\typeout{-- http://www.michaelshell.org/tex/ieeetran/} +\NeedsTeXFormat{LaTeX2e} + +% IEEEtran.cls version numbers, provided as of V1.3 +% These values serve as a way a .tex file can +% determine if the new features are provided. +% The version number of this IEEEtrans.cls can be obtained from +% these values. i.e., V1.4 +% KEEP THESE AS INTEGERS! i.e., NO {4a} or anything like that- +% (no need to enumerate "a" minor changes here) +\def\IEEEtransversionmajor{1} +\def\IEEEtransversionminor{7} + +% These do nothing, but provide them like in article.cls +\newif\if@restonecol +\newif\if@titlepage + + +% class option conditionals +\newif\ifCLASSOPTIONonecolumn \CLASSOPTIONonecolumnfalse +\newif\ifCLASSOPTIONtwocolumn \CLASSOPTIONtwocolumntrue + +\newif\ifCLASSOPTIONoneside \CLASSOPTIONonesidetrue +\newif\ifCLASSOPTIONtwoside \CLASSOPTIONtwosidefalse + +\newif\ifCLASSOPTIONfinal \CLASSOPTIONfinaltrue +\newif\ifCLASSOPTIONdraft \CLASSOPTIONdraftfalse +\newif\ifCLASSOPTIONdraftcls \CLASSOPTIONdraftclsfalse +\newif\ifCLASSOPTIONdraftclsnofoot \CLASSOPTIONdraftclsnofootfalse + +\newif\ifCLASSOPTIONpeerreview \CLASSOPTIONpeerreviewfalse +\newif\ifCLASSOPTIONpeerreviewca \CLASSOPTIONpeerreviewcafalse + +\newif\ifCLASSOPTIONjournal \CLASSOPTIONjournaltrue +\newif\ifCLASSOPTIONconference \CLASSOPTIONconferencefalse +\newif\ifCLASSOPTIONtechnote \CLASSOPTIONtechnotefalse + +\newif\ifCLASSOPTIONnofonttune \CLASSOPTIONnofonttunefalse + +\newif\ifCLASSOPTIONcaptionsoff \CLASSOPTIONcaptionsofffalse + +\newif\ifCLASSOPTIONcompsoc \CLASSOPTIONcompsocfalse + +\newif\ifCLASSOPTIONromanappendices \CLASSOPTIONromanappendicesfalse + + +% class info conditionals + +% indicates if pdf (via pdflatex) output +\newif\ifCLASSINFOpdf \CLASSINFOpdffalse + + +% V1.6b internal flag to show if using a4paper +\newif\if@IEEEusingAfourpaper \@IEEEusingAfourpaperfalse + + + +% IEEEtran class scratch pad registers +% dimen +\newdimen\@IEEEtrantmpdimenA +\newdimen\@IEEEtrantmpdimenB +% count +\newcount\@IEEEtrantmpcountA +\newcount\@IEEEtrantmpcountB +% token list +\newtoks\@IEEEtrantmptoksA + +% we use \CLASSOPTIONpt so that we can ID the point size (even for 9pt docs) +% as well as LaTeX's \@ptsize to retain some compatability with some +% external packages +\def\@ptsize{0} +% LaTeX does not support 9pt, so we set \@ptsize to 0 - same as that of 10pt +\DeclareOption{9pt}{\def\CLASSOPTIONpt{9}\def\@ptsize{0}} +\DeclareOption{10pt}{\def\CLASSOPTIONpt{10}\def\@ptsize{0}} +\DeclareOption{11pt}{\def\CLASSOPTIONpt{11}\def\@ptsize{1}} +\DeclareOption{12pt}{\def\CLASSOPTIONpt{12}\def\@ptsize{2}} + + + +\DeclareOption{letterpaper}{\setlength{\paperheight}{11in}% + \setlength{\paperwidth}{8.5in}% + \@IEEEusingAfourpaperfalse + \def\CLASSOPTIONpaper{letter}% + \def\CLASSINFOpaperwidth{8.5in}% + \def\CLASSINFOpaperheight{11in}} + + +\DeclareOption{a4paper}{\setlength{\paperheight}{297mm}% + \setlength{\paperwidth}{210mm}% + \@IEEEusingAfourpapertrue + \def\CLASSOPTIONpaper{a4}% + \def\CLASSINFOpaperwidth{210mm}% + \def\CLASSINFOpaperheight{297mm}} + +\DeclareOption{oneside}{\@twosidefalse\@mparswitchfalse + \CLASSOPTIONonesidetrue\CLASSOPTIONtwosidefalse} +\DeclareOption{twoside}{\@twosidetrue\@mparswitchtrue + \CLASSOPTIONtwosidetrue\CLASSOPTIONonesidefalse} + +\DeclareOption{onecolumn}{\CLASSOPTIONonecolumntrue\CLASSOPTIONtwocolumnfalse} +\DeclareOption{twocolumn}{\CLASSOPTIONtwocolumntrue\CLASSOPTIONonecolumnfalse} + +% If the user selects draft, then this class AND any packages +% will go into draft mode. +\DeclareOption{draft}{\CLASSOPTIONdrafttrue\CLASSOPTIONdraftclstrue + \CLASSOPTIONdraftclsnofootfalse} +% draftcls is for a draft mode which will not affect any packages +% used by the document. +\DeclareOption{draftcls}{\CLASSOPTIONdraftfalse\CLASSOPTIONdraftclstrue + \CLASSOPTIONdraftclsnofootfalse} +% draftclsnofoot is like draftcls, but without the footer. +\DeclareOption{draftclsnofoot}{\CLASSOPTIONdraftfalse\CLASSOPTIONdraftclstrue + \CLASSOPTIONdraftclsnofoottrue} +\DeclareOption{final}{\CLASSOPTIONdraftfalse\CLASSOPTIONdraftclsfalse + \CLASSOPTIONdraftclsnofootfalse} + +\DeclareOption{journal}{\CLASSOPTIONpeerreviewfalse\CLASSOPTIONpeerreviewcafalse + \CLASSOPTIONjournaltrue\CLASSOPTIONconferencefalse\CLASSOPTIONtechnotefalse} + +\DeclareOption{conference}{\CLASSOPTIONpeerreviewfalse\CLASSOPTIONpeerreviewcafalse + \CLASSOPTIONjournalfalse\CLASSOPTIONconferencetrue\CLASSOPTIONtechnotefalse} + +\DeclareOption{technote}{\CLASSOPTIONpeerreviewfalse\CLASSOPTIONpeerreviewcafalse + \CLASSOPTIONjournalfalse\CLASSOPTIONconferencefalse\CLASSOPTIONtechnotetrue} + +\DeclareOption{peerreview}{\CLASSOPTIONpeerreviewtrue\CLASSOPTIONpeerreviewcafalse + \CLASSOPTIONjournalfalse\CLASSOPTIONconferencefalse\CLASSOPTIONtechnotefalse} + +\DeclareOption{peerreviewca}{\CLASSOPTIONpeerreviewtrue\CLASSOPTIONpeerreviewcatrue + \CLASSOPTIONjournalfalse\CLASSOPTIONconferencefalse\CLASSOPTIONtechnotefalse} + +\DeclareOption{nofonttune}{\CLASSOPTIONnofonttunetrue} + +\DeclareOption{captionsoff}{\CLASSOPTIONcaptionsofftrue} + +\DeclareOption{compsoc}{\CLASSOPTIONcompsoctrue} + +\DeclareOption{romanappendices}{\CLASSOPTIONromanappendicestrue} + + +% default to US letter paper, 10pt, twocolumn, one sided, final, journal +\ExecuteOptions{letterpaper,10pt,twocolumn,oneside,final,journal} +% overrride these defaults per user requests +\ProcessOptions + + + +% Computer Society conditional execution command +\long\def\@IEEEcompsoconly#1{\relax\ifCLASSOPTIONcompsoc\relax#1\relax\fi\relax} +% inverse +\long\def\@IEEEnotcompsoconly#1{\relax\ifCLASSOPTIONcompsoc\else\relax#1\relax\fi\relax} +% compsoc conference +\long\def\@IEEEcompsocconfonly#1{\relax\ifCLASSOPTIONcompsoc\ifCLASSOPTIONconference\relax#1\relax\fi\fi\relax} +% compsoc not conference +\long\def\@IEEEcompsocnotconfonly#1{\relax\ifCLASSOPTIONcompsoc\ifCLASSOPTIONconference\else\relax#1\relax\fi\fi\relax} + + +% IEEE uses Times Roman font, so we'll default to Times. +% These three commands make up the entire times.sty package. +\renewcommand{\sfdefault}{phv} +\renewcommand{\rmdefault}{ptm} +\renewcommand{\ttdefault}{pcr} + +\@IEEEcompsoconly{\typeout{-- Using IEEE Computer Society mode.}} + +% V1.7 compsoc nonconference papers, use Palatino/Palladio as the main text font, +% not Times Roman. +\@IEEEcompsocnotconfonly{\renewcommand{\rmdefault}{ppl}} + +% enable Times/Palatino main text font +\normalfont\selectfont + + + + + +% V1.7 conference notice message hook +\def\@IEEEconsolenoticeconference{\typeout{}% +\typeout{** Conference Paper **}% +\typeout{Before submitting the final camera ready copy, remember to:}% +\typeout{}% +\typeout{ 1. Manually equalize the lengths of two columns on the last page}% +\typeout{ of your paper;}% +\typeout{}% +\typeout{ 2. Ensure that any PostScript and/or PDF output post-processing}% +\typeout{ uses only Type 1 fonts and that every step in the generation}% +\typeout{ process uses the appropriate paper size.}% +\typeout{}} + + +% we can send console reminder messages to the user here +\AtEndDocument{\ifCLASSOPTIONconference\@IEEEconsolenoticeconference\fi} + + +% warn about the use of single column other than for draft mode +\ifCLASSOPTIONtwocolumn\else% + \ifCLASSOPTIONdraftcls\else% + \typeout{** ATTENTION: Single column mode is not typically used with IEEE publications.}% + \fi% +\fi + + +% V1.7 improved paper size setting code. +% Set pdfpage and dvips paper sizes. Conditional tests are similar to that +% of ifpdf.sty. Retain within {} to ensure tested macros are never altered, +% even if only effect is to set them to \relax. +% if \pdfoutput is undefined or equal to relax, output a dvips special +{\@ifundefined{pdfoutput}{\AtBeginDvi{\special{papersize=\CLASSINFOpaperwidth,\CLASSINFOpaperheight}}}{% +% pdfoutput is defined and not equal to \relax +% check for pdfpageheight existence just in case someone sets pdfoutput +% under non-pdflatex. If exists, set them regardless of value of \pdfoutput. +\@ifundefined{pdfpageheight}{\relax}{\global\pdfpagewidth\paperwidth +\global\pdfpageheight\paperheight}% +% if using \pdfoutput=0 under pdflatex, send dvips papersize special +\ifcase\pdfoutput +\AtBeginDvi{\special{papersize=\CLASSINFOpaperwidth,\CLASSINFOpaperheight}}% +\else +% we are using pdf output, set CLASSINFOpdf flag +\global\CLASSINFOpdftrue +\fi}} + +% let the user know the selected papersize +\typeout{-- Using \CLASSINFOpaperwidth\space x \CLASSINFOpaperheight\space +(\CLASSOPTIONpaper)\space paper.} + +\ifCLASSINFOpdf +\typeout{-- Using PDF output.} +\else +\typeout{-- Using DVI output.} +\fi + + +% The idea hinted here is for LaTeX to generate markleft{} and markright{} +% automatically for you after you enter \author{}, \journal{}, +% \journaldate{}, journalvol{}, \journalnum{}, etc. +% However, there may be some backward compatibility issues here as +% well as some special applications for IEEEtran.cls and special issues +% that may require the flexible \markleft{}, \markright{} and/or \markboth{}. +% We'll leave this as an open future suggestion. +%\newcommand{\journal}[1]{\def\@journal{#1}} +%\def\@journal{} + + + +% pointsize values +% used with ifx to determine the document's normal size +\def\@IEEEptsizenine{9} +\def\@IEEEptsizeten{10} +\def\@IEEEptsizeeleven{11} +\def\@IEEEptsizetwelve{12} + + + +% FONT DEFINITIONS (No sizexx.clo file needed) +% V1.6 revised font sizes, displayskip values and +% revised normalsize baselineskip to reduce underfull vbox problems +% on the 58pc = 696pt = 9.5in text height we want +% normalsize #lines/column baselineskip (aka leading) +% 9pt 63 11.0476pt (truncated down) +% 10pt 58 12pt (exact) +% 11pt 52 13.3846pt (truncated down) +% 12pt 50 13.92pt (exact) +% + +% we need to store the nominal baselineskip for the given font size +% in case baselinestretch ever changes. +% this is a dimen, so it will not hold stretch or shrink +\newdimen\@IEEEnormalsizeunitybaselineskip +\@IEEEnormalsizeunitybaselineskip\baselineskip + +\ifx\CLASSOPTIONpt\@IEEEptsizenine +\typeout{-- This is a 9 point document.} +\def\normalsize{\@setfontsize{\normalsize}{9}{11.0476pt}}% +\setlength{\@IEEEnormalsizeunitybaselineskip}{11.0476pt}% +\normalsize +\abovedisplayskip 1.5ex plus3pt minus1pt% +\belowdisplayskip \abovedisplayskip% +\abovedisplayshortskip 0pt plus3pt% +\belowdisplayshortskip 1.5ex plus3pt minus1pt +\def\small{\@setfontsize{\small}{8.5}{10pt}} +\def\footnotesize{\@setfontsize{\footnotesize}{8}{9pt}} +\def\scriptsize{\@setfontsize{\scriptsize}{7}{8pt}} +\def\tiny{\@setfontsize{\tiny}{5}{6pt}} +% sublargesize is the same as large - 10pt +\def\sublargesize{\@setfontsize{\sublargesize}{10}{12pt}} +\def\large{\@setfontsize{\large}{10}{12pt}} +\def\Large{\@setfontsize{\Large}{12}{14pt}} +\def\LARGE{\@setfontsize{\LARGE}{14}{17pt}} +\def\huge{\@setfontsize{\huge}{17}{20pt}} +\def\Huge{\@setfontsize{\Huge}{20}{24pt}} +\fi + + +% Check if we have selected 10 points +\ifx\CLASSOPTIONpt\@IEEEptsizeten +\typeout{-- This is a 10 point document.} +\def\normalsize{\@setfontsize{\normalsize}{10}{11}}% +\setlength{\@IEEEnormalsizeunitybaselineskip}{11pt}% +\normalsize +\abovedisplayskip 1.5ex plus4pt minus2pt% +\belowdisplayskip \abovedisplayskip% +\abovedisplayshortskip 0pt plus4pt% +\belowdisplayshortskip 1.5ex plus4pt minus2pt +\def\small{\@setfontsize{\small}{9}{10pt}} +\def\footnotesize{\@setfontsize{\footnotesize}{8}{9pt}} +\def\scriptsize{\@setfontsize{\scriptsize}{7}{8pt}} +\def\tiny{\@setfontsize{\tiny}{5}{6pt}} +% sublargesize is a tad smaller than large - 11pt +\def\sublargesize{\@setfontsize{\sublargesize}{11}{13.4pt}} +\def\large{\@setfontsize{\large}{12}{14pt}} +\def\Large{\@setfontsize{\Large}{14}{17pt}} +\def\LARGE{\@setfontsize{\LARGE}{17}{20pt}} +\def\huge{\@setfontsize{\huge}{20}{24pt}} +\def\Huge{\@setfontsize{\Huge}{24}{28pt}} +\fi + + +% Check if we have selected 11 points +\ifx\CLASSOPTIONpt\@IEEEptsizeeleven +\typeout{-- This is an 11 point document.} +\def\normalsize{\@setfontsize{\normalsize}{11}{13.3846pt}}% +\setlength{\@IEEEnormalsizeunitybaselineskip}{13.3846pt}% +\normalsize +\abovedisplayskip 1.5ex plus5pt minus3pt% +\belowdisplayskip \abovedisplayskip% +\abovedisplayshortskip 0pt plus5pt% +\belowdisplayshortskip 1.5ex plus5pt minus3pt +\def\small{\@setfontsize{\small}{10}{12pt}} +\def\footnotesize{\@setfontsize{\footnotesize}{9}{10.5pt}} +\def\scriptsize{\@setfontsize{\scriptsize}{8}{9pt}} +\def\tiny{\@setfontsize{\tiny}{6}{7pt}} +% sublargesize is the same as large - 12pt +\def\sublargesize{\@setfontsize{\sublargesize}{12}{14pt}} +\def\large{\@setfontsize{\large}{12}{14pt}} +\def\Large{\@setfontsize{\Large}{14}{17pt}} +\def\LARGE{\@setfontsize{\LARGE}{17}{20pt}} +\def\huge{\@setfontsize{\huge}{20}{24pt}} +\def\Huge{\@setfontsize{\Huge}{24}{28pt}} +\fi + + +% Check if we have selected 12 points +\ifx\CLASSOPTIONpt\@IEEEptsizetwelve +\typeout{-- This is a 12 point document.} +\def\normalsize{\@setfontsize{\normalsize}{12}{13.92pt}}% +\setlength{\@IEEEnormalsizeunitybaselineskip}{13.92pt}% +\normalsize +\abovedisplayskip 1.5ex plus6pt minus4pt% +\belowdisplayskip \abovedisplayskip% +\abovedisplayshortskip 0pt plus6pt% +\belowdisplayshortskip 1.5ex plus6pt minus4pt +\def\small{\@setfontsize{\small}{10}{12pt}} +\def\footnotesize{\@setfontsize{\footnotesize}{9}{10.5pt}} +\def\scriptsize{\@setfontsize{\scriptsize}{8}{9pt}} +\def\tiny{\@setfontsize{\tiny}{6}{7pt}} +% sublargesize is the same as large - 14pt +\def\sublargesize{\@setfontsize{\sublargesize}{14}{17pt}} +\def\large{\@setfontsize{\large}{14}{17pt}} +\def\Large{\@setfontsize{\Large}{17}{20pt}} +\def\LARGE{\@setfontsize{\LARGE}{20}{24pt}} +\def\huge{\@setfontsize{\huge}{22}{26pt}} +\def\Huge{\@setfontsize{\Huge}{24}{28pt}} +\fi + + +% V1.6 The Computer Modern Fonts will issue a substitution warning for +% 24pt titles (24.88pt is used instead) increase the substitution +% tolerance to turn off this warning +\def\fontsubfuzz{.9pt} +% However, the default (and correct) Times font will scale exactly as needed. + + +% warn the user in case they forget to use the 9pt option with +% technote +\ifCLASSOPTIONtechnote% + \ifx\CLASSOPTIONpt\@IEEEptsizenine\else% + \typeout{** ATTENTION: Technotes are normally 9pt documents.}% + \fi% +\fi + + +% V1.7 +% Improved \textunderscore to provide a much better fake _ when used with +% OT1 encoding. Under OT1, detect use of pcr or cmtt \ttfamily and use +% available true _ glyph for those two typewriter fonts. +\def\@IEEEstringptm{ptm} % Times Roman family +\def\@IEEEstringppl{ppl} % Palatino Roman family +\def\@IEEEstringphv{phv} % Helvetica Sans Serif family +\def\@IEEEstringpcr{pcr} % Courier typewriter family +\def\@IEEEstringcmtt{cmtt} % Computer Modern typewriter family +\DeclareTextCommandDefault{\textunderscore}{\leavevmode +\ifx\f@family\@IEEEstringpcr\string_\else +\ifx\f@family\@IEEEstringcmtt\string_\else +\ifx\f@family\@IEEEstringptm\kern 0em\vbox{\hrule\@width 0.5em\@height 0.5pt\kern -0.3ex}\else +\ifx\f@family\@IEEEstringppl\kern 0em\vbox{\hrule\@width 0.5em\@height 0.5pt\kern -0.3ex}\else +\ifx\f@family\@IEEEstringphv\kern -0.03em\vbox{\hrule\@width 0.62em\@height 0.52pt\kern -0.33ex}\kern -0.03em\else +\kern 0.09em\vbox{\hrule\@width 0.6em\@height 0.44pt\kern -0.63pt\kern -0.42ex}\kern 0.09em\fi\fi\fi\fi\fi\relax} + + + + +% set the default \baselinestretch +\def\baselinestretch{1} +\ifCLASSOPTIONdraftcls + \def\baselinestretch{1.5}% default baselinestretch for draft modes +\fi + + +% process CLASSINPUT baselinestretch +\ifx\CLASSINPUTbaselinestretch\@IEEEundefined +\else + \edef\baselinestretch{\CLASSINPUTbaselinestretch} % user CLASSINPUT override + \typeout{** ATTENTION: Overriding \string\baselinestretch\space to + \baselinestretch\space via \string\CLASSINPUT.} +\fi + +\normalsize % make \baselinestretch take affect + + + + +% store the normalsize baselineskip +\newdimen\CLASSINFOnormalsizebaselineskip +\CLASSINFOnormalsizebaselineskip=\baselineskip\relax +% and the normalsize unity (baselinestretch=1) baselineskip +% we could save a register by giving the user access to +% \@IEEEnormalsizeunitybaselineskip. However, let's protect +% its read only internal status +\newdimen\CLASSINFOnormalsizeunitybaselineskip +\CLASSINFOnormalsizeunitybaselineskip=\@IEEEnormalsizeunitybaselineskip\relax +% store the nominal value of jot +\newdimen\IEEEnormaljot +\IEEEnormaljot=0.25\baselineskip\relax + +% set \jot +\jot=\IEEEnormaljot\relax + + + + +% V1.6, we are now going to fine tune the interword spacing +% The default interword glue for Times under TeX appears to use a +% nominal interword spacing of 25% (relative to the font size, i.e., 1em) +% a maximum of 40% and a minimum of 19%. +% For example, 10pt text uses an interword glue of: +% +% 2.5pt plus 1.49998pt minus 0.59998pt +% +% However, IEEE allows for a more generous range which reduces the need +% for hyphenation, especially for two column text. Furthermore, IEEE +% tends to use a little bit more nominal space between the words. +% IEEE's interword spacing percentages appear to be: +% 35% nominal +% 23% minimum +% 50% maximum +% (They may even be using a tad more for the largest fonts such as 24pt.) +% +% for bold text, IEEE increases the spacing a little more: +% 37.5% nominal +% 23% minimum +% 55% maximum + +% here are the interword spacing ratios we'll use +% for medium (normal weight) +\def\@IEEEinterspaceratioM{0.35} +\def\@IEEEinterspaceMINratioM{0.23} +\def\@IEEEinterspaceMAXratioM{0.50} + +% for bold +\def\@IEEEinterspaceratioB{0.375} +\def\@IEEEinterspaceMINratioB{0.23} +\def\@IEEEinterspaceMAXratioB{0.55} + + +% command to revise the interword spacing for the current font under TeX: +% \fontdimen2 = nominal interword space +% \fontdimen3 = interword stretch +% \fontdimen4 = interword shrink +% since all changes to the \fontdimen are global, we can enclose these commands +% in braces to confine any font attribute or length changes +\def\@@@IEEEsetfontdimens#1#2#3{{% +\setlength{\@IEEEtrantmpdimenB}{\f@size pt}% grab the font size in pt, could use 1em instead. +\setlength{\@IEEEtrantmpdimenA}{#1\@IEEEtrantmpdimenB}% +\fontdimen2\font=\@IEEEtrantmpdimenA\relax +\addtolength{\@IEEEtrantmpdimenA}{-#2\@IEEEtrantmpdimenB}% +\fontdimen3\font=-\@IEEEtrantmpdimenA\relax +\setlength{\@IEEEtrantmpdimenA}{#1\@IEEEtrantmpdimenB}% +\addtolength{\@IEEEtrantmpdimenA}{-#3\@IEEEtrantmpdimenB}% +\fontdimen4\font=\@IEEEtrantmpdimenA\relax}} + +% revise the interword spacing for each font weight +\def\@@IEEEsetfontdimens{{% +\mdseries +\@@@IEEEsetfontdimens{\@IEEEinterspaceratioM}{\@IEEEinterspaceMAXratioM}{\@IEEEinterspaceMINratioM}% +\bfseries +\@@@IEEEsetfontdimens{\@IEEEinterspaceratioB}{\@IEEEinterspaceMAXratioB}{\@IEEEinterspaceMINratioB}% +}} + +% revise the interword spacing for each font shape +% \slshape is not often used for IEEE work and is not altered here. The \scshape caps are +% already a tad too large in the free LaTeX fonts (as compared to what IEEE uses) so we +% won't alter these either. +\def\@IEEEsetfontdimens{{% +\normalfont +\@@IEEEsetfontdimens +\normalfont\itshape +\@@IEEEsetfontdimens +}} + +% command to revise the interword spacing for each font size (and shape +% and weight). Only the \rmfamily is done here as \ttfamily uses a +% fixed spacing and \sffamily is not used as the main text of IEEE papers. +\def\@IEEEtunefonts{{\selectfont\rmfamily +\tiny\@IEEEsetfontdimens +\scriptsize\@IEEEsetfontdimens +\footnotesize\@IEEEsetfontdimens +\small\@IEEEsetfontdimens +\normalsize\@IEEEsetfontdimens +\sublargesize\@IEEEsetfontdimens +\large\@IEEEsetfontdimens +\LARGE\@IEEEsetfontdimens +\huge\@IEEEsetfontdimens +\Huge\@IEEEsetfontdimens}} + +% if the nofonttune class option is not given, revise the interword spacing +% now - in case IEEEtran makes any default length measurements, and make +% sure all the default fonts are loaded +\ifCLASSOPTIONnofonttune\else +\@IEEEtunefonts +\fi + +% and again at the start of the document in case the user loaded different fonts +\AtBeginDocument{\ifCLASSOPTIONnofonttune\else\@IEEEtunefonts\fi} + + + +% V1.6 +% LaTeX is a little to quick to use hyphenations +% So, we increase the penalty for their use and raise +% the badness level that triggers an underfull hbox +% warning. The author may still have to tweak things, +% but the appearance will be much better "right out +% of the box" than that under V1.5 and prior. +% TeX default is 50 +\hyphenpenalty=750 +% If we didn't adjust the interword spacing, 2200 might be better. +% The TeX default is 1000 +\hbadness=1350 +% IEEE does not use extra spacing after punctuation +\frenchspacing + +% V1.7 increase this a tad to discourage equation breaks +\binoppenalty=1000 % default 700 +\relpenalty=800 % default 500 + + +% margin note stuff +\marginparsep 10pt +\marginparwidth 20pt +\marginparpush 25pt + + +% if things get too close, go ahead and let them touch +\lineskip 0pt +\normallineskip 0pt +\lineskiplimit 0pt +\normallineskiplimit 0pt + +% The distance from the lower edge of the text body to the +% footline +\footskip 0.4in + +% normally zero, should be relative to font height. +% put in a little rubber to help stop some bad breaks (underfull vboxes) +\parskip 0ex plus 0.2ex minus 0.1ex +\ifCLASSOPTIONconference +\parskip 6pt plus 2pt minus 1pt +\fi + +\parindent 1.0em +\ifCLASSOPTIONconference +\parindent 14.45pt +\fi + +\topmargin -49.0pt +\headheight 12pt +\headsep 0.25in + +% use the normal font baselineskip +% so that \topskip is unaffected by changes in \baselinestretch +\topskip=\@IEEEnormalsizeunitybaselineskip +\textheight 58pc % 9.63in, 696pt +% Tweak textheight to a perfect integer number of lines/page. +% The normal baselineskip for each document point size is used +% to determine these values. +\ifx\CLASSOPTIONpt\@IEEEptsizenine\textheight=63\@IEEEnormalsizeunitybaselineskip\fi % 63 lines/page +\ifx\CLASSOPTIONpt\@IEEEptsizeten\textheight=58\@IEEEnormalsizeunitybaselineskip\fi % 58 lines/page +\ifx\CLASSOPTIONpt\@IEEEptsizeeleven\textheight=52\@IEEEnormalsizeunitybaselineskip\fi % 52 lines/page +\ifx\CLASSOPTIONpt\@IEEEptsizetwelve\textheight=50\@IEEEnormalsizeunitybaselineskip\fi % 50 lines/page + + +\columnsep 1.5pc +\textwidth 184.2mm + + +% the default side margins are equal +\if@IEEEusingAfourpaper +\oddsidemargin 14.32mm +\evensidemargin 14.32mm +\else +\oddsidemargin 0.680in +\evensidemargin 0.680in +\fi +% compensate for LaTeX's 1in offset +\addtolength{\oddsidemargin}{-1in} +\addtolength{\evensidemargin}{-1in} + + + +% adjust margins for conference mode +\ifCLASSOPTIONconference + \topmargin -0.25in + % we retain the reserved, but unused space for headers + \addtolength{\topmargin}{-\headheight} + \addtolength{\topmargin}{-\headsep} + \textheight 9.25in % The standard for conferences (668.4975pt) + % Tweak textheight to a perfect integer number of lines/page. + \ifx\CLASSOPTIONpt\@IEEEptsizenine\textheight=61\@IEEEnormalsizeunitybaselineskip\fi % 61 lines/page + \ifx\CLASSOPTIONpt\@IEEEptsizeten\textheight=62\@IEEEnormalsizeunitybaselineskip\fi % 62 lines/page + \ifx\CLASSOPTIONpt\@IEEEptsizeeleven\textheight=50\@IEEEnormalsizeunitybaselineskip\fi % 50 lines/page + \ifx\CLASSOPTIONpt\@IEEEptsizetwelve\textheight=48\@IEEEnormalsizeunitybaselineskip\fi % 48 lines/page +\fi + + +% compsoc conference +\ifCLASSOPTIONcompsoc +\ifCLASSOPTIONconference + % compsoc conference use a larger value for columnsep + \columnsep 0.375in + % compsoc conferences want 1in top margin, 1.125in bottom margin + \topmargin 0in + \addtolength{\topmargin}{-6pt}% we tweak this a tad to better comply with top of line stuff + % we retain the reserved, but unused space for headers + \addtolength{\topmargin}{-\headheight} + \addtolength{\topmargin}{-\headsep} + \textheight 8.875in % (641.39625pt) + % Tweak textheight to a perfect integer number of lines/page. + \ifx\CLASSOPTIONpt\@IEEEptsizenine\textheight=58\@IEEEnormalsizeunitybaselineskip\fi % 58 lines/page + \ifx\CLASSOPTIONpt\@IEEEptsizeten\textheight=53\@IEEEnormalsizeunitybaselineskip\fi % 53 lines/page + \ifx\CLASSOPTIONpt\@IEEEptsizeeleven\textheight=48\@IEEEnormalsizeunitybaselineskip\fi % 48 lines/page + \ifx\CLASSOPTIONpt\@IEEEptsizetwelve\textheight=46\@IEEEnormalsizeunitybaselineskip\fi % 46 lines/page + \textwidth 6.5in + % the default side margins are equal + \if@IEEEusingAfourpaper + \oddsidemargin 22.45mm + \evensidemargin 22.45mm + \else + \oddsidemargin 1in + \evensidemargin 1in + \fi + % compensate for LaTeX's 1in offset + \addtolength{\oddsidemargin}{-1in} + \addtolength{\evensidemargin}{-1in} +\fi\fi + + + +% draft mode settings override that of all other modes +% provides a nice 1in margin all around the paper and extra +% space between the lines for editor's comments +\ifCLASSOPTIONdraftcls + % want 1in from top of paper to text + \setlength{\topmargin}{-\headsep}% + \addtolength{\topmargin}{-\headheight}% + % we want 1in side margins regardless of paper type + \oddsidemargin 0in + \evensidemargin 0in + % set the text width + \setlength{\textwidth}{\paperwidth}% + \addtolength{\textwidth}{-2.0in}% + \setlength{\textheight}{\paperheight}% + \addtolength{\textheight}{-2.0in}% + % digitize textheight to be an integer number of lines. + % this may cause the bottom margin to be off a tad + \addtolength{\textheight}{-1\topskip}% + \divide\textheight by \baselineskip% + \multiply\textheight by \baselineskip% + \addtolength{\textheight}{\topskip}% +\fi + + + +% process CLASSINPUT inner/outer margin +% if inner margin defined, but outer margin not, set outer to inner. +\ifx\CLASSINPUTinnersidemargin\@IEEEundefined +\else + \ifx\CLASSINPUToutersidemargin\@IEEEundefined + \edef\CLASSINPUToutersidemargin{\CLASSINPUTinnersidemargin} + \fi +\fi + +\ifx\CLASSINPUToutersidemargin\@IEEEundefined +\else + % if outer margin defined, but inner margin not, set inner to outer. + \ifx\CLASSINPUTinnersidemargin\@IEEEundefined + \edef\CLASSINPUTinnersidemargin{\CLASSINPUToutersidemargin} + \fi + \setlength{\oddsidemargin}{\CLASSINPUTinnersidemargin} + \ifCLASSOPTIONtwoside + \setlength{\evensidemargin}{\CLASSINPUToutersidemargin} + \else + \setlength{\evensidemargin}{\CLASSINPUTinnersidemargin} + \fi + \addtolength{\oddsidemargin}{-1in} + \addtolength{\evensidemargin}{-1in} + \setlength{\textwidth}{\paperwidth} + \addtolength{\textwidth}{-\CLASSINPUTinnersidemargin} + \addtolength{\textwidth}{-\CLASSINPUToutersidemargin} + \typeout{** ATTENTION: Overriding inner side margin to \CLASSINPUTinnersidemargin\space and + outer side margin to \CLASSINPUToutersidemargin\space via \string\CLASSINPUT.} +\fi + + + +% process CLASSINPUT top/bottom text margin +% if toptext margin defined, but bottomtext margin not, set bottomtext to toptext margin +\ifx\CLASSINPUTtoptextmargin\@IEEEundefined +\else + \ifx\CLASSINPUTbottomtextmargin\@IEEEundefined + \edef\CLASSINPUTbottomtextmargin{\CLASSINPUTtoptextmargin} + \fi +\fi + +\ifx\CLASSINPUTbottomtextmargin\@IEEEundefined +\else + % if bottomtext margin defined, but toptext margin not, set toptext to bottomtext margin + \ifx\CLASSINPUTtoptextmargin\@IEEEundefined + \edef\CLASSINPUTtoptextmargin{\CLASSINPUTbottomtextmargin} + \fi + \setlength{\topmargin}{\CLASSINPUTtoptextmargin} + \addtolength{\topmargin}{-1in} + \addtolength{\topmargin}{-\headheight} + \addtolength{\topmargin}{-\headsep} + \setlength{\textheight}{\paperheight} + \addtolength{\textheight}{-\CLASSINPUTtoptextmargin} + \addtolength{\textheight}{-\CLASSINPUTbottomtextmargin} + % in the default format we use the normal baselineskip as topskip + % we only need 0.7 of this to clear typical top text and we need + % an extra 0.3 spacing at the bottom for descenders. This will + % correct for both. + \addtolength{\topmargin}{-0.3\@IEEEnormalsizeunitybaselineskip} + \typeout{** ATTENTION: Overriding top text margin to \CLASSINPUTtoptextmargin\space and + bottom text margin to \CLASSINPUTbottomtextmargin\space via \string\CLASSINPUT.} +\fi + + + + + + + +% LIST SPACING CONTROLS + +% Controls the amount of EXTRA spacing +% above and below \trivlist +% Both \list and IED lists override this. +% However, \trivlist will use this as will most +% things built from \trivlist like the \center +% environment. +\topsep 0.5\baselineskip + +% Controls the additional spacing around lists preceded +% or followed by blank lines. IEEE does not increase +% spacing before or after paragraphs so it is set to zero. +% \z@ is the same as zero, but faster. +\partopsep \z@ + +% Controls the spacing between paragraphs in lists. +% IEEE does not increase spacing before or after paragraphs +% so this is also zero. +% With IEEEtran.cls, global changes to +% this value DO affect lists (but not IED lists). +\parsep \z@ + +% Controls the extra spacing between list items. +% IEEE does not put extra spacing between items. +% With IEEEtran.cls, global changes to this value DO affect +% lists (but not IED lists). +\itemsep \z@ + +% \itemindent is the amount to indent the FIRST line of a list +% item. It is auto set to zero within the \list environment. To alter +% it, you have to do so when you call the \list. +% However, IEEE uses this for the theorem environment +% There is an alternative value for this near \leftmargini below +\itemindent -1em + +% \leftmargin, the spacing from the left margin of the main text to +% the left of the main body of a list item is set by \list. +% Hence this statement does nothing for lists. +% But, quote and verse do use it for indention. +\leftmargin 2em + +% we retain this stuff from the older IEEEtran.cls so that \list +% will work the same way as before. However, itemize, enumerate and +% description (IED) could care less about what these are as they +% all are overridden. +\leftmargini 2em +%\itemindent 2em % Alternative values: sometimes used. +%\leftmargini 0em +\leftmarginii 1em +\leftmarginiii 1.5em +\leftmarginiv 1.5em +\leftmarginv 1.0em +\leftmarginvi 1.0em +\labelsep 0.5em +\labelwidth \z@ + + +% The old IEEEtran.cls behavior of \list is retained. +% However, the new V1.3 IED list environments override all the +% @list stuff (\@listX is called within \list for the +% appropriate level just before the user's list_decl is called). +% \topsep is now 2pt as IEEE puts a little extra space around +% lists - used by those non-IED macros that depend on \list. +% Note that \parsep and \itemsep are not redefined as in +% the sizexx.clo \@listX (which article.cls uses) so global changes +% of these values DO affect \list +% +\def\@listi{\leftmargin\leftmargini \topsep 2pt plus 1pt minus 1pt} +\let\@listI\@listi +\def\@listii{\leftmargin\leftmarginii\labelwidth\leftmarginii% + \advance\labelwidth-\labelsep \topsep 2pt} +\def\@listiii{\leftmargin\leftmarginiii\labelwidth\leftmarginiii% + \advance\labelwidth-\labelsep \topsep 2pt} +\def\@listiv{\leftmargin\leftmarginiv\labelwidth\leftmarginiv% + \advance\labelwidth-\labelsep \topsep 2pt} +\def\@listv{\leftmargin\leftmarginv\labelwidth\leftmarginv% + \advance\labelwidth-\labelsep \topsep 2pt} +\def\@listvi{\leftmargin\leftmarginvi\labelwidth\leftmarginvi% + \advance\labelwidth-\labelsep \topsep 2pt} + + +% IEEE uses 5) not 5. +\def\labelenumi{\theenumi)} \def\theenumi{\arabic{enumi}} + +% IEEE uses a) not (a) +\def\labelenumii{\theenumii)} \def\theenumii{\alph{enumii}} + +% IEEE uses iii) not iii. +\def\labelenumiii{\theenumiii)} \def\theenumiii{\roman{enumiii}} + +% IEEE uses A) not A. +\def\labelenumiv{\theenumiv)} \def\theenumiv{\Alph{enumiv}} + +% exactly the same as in article.cls +\def\p@enumii{\theenumi} +\def\p@enumiii{\theenumi(\theenumii)} +\def\p@enumiv{\p@enumiii\theenumiii} + +% itemized list label styles +\def\labelitemi{$\bullet$} +\def\labelitemii{$\circ$} +\def\labelitemiii{\vrule height 0.8ex depth -0.2ex width 0.6ex} +\def\labelitemiv{$\ast$} + + + +% **** V1.3 ENHANCEMENTS **** +% Itemize, Enumerate and Description (IED) List Controls +% *************************** +% +% +% IEEE seems to use at least two different values by +% which ITEMIZED list labels are indented to the right +% For The Journal of Lightwave Technology (JLT) and The Journal +% on Selected Areas in Communications (JSAC), they tend to use +% an indention equal to \parindent. For Transactions on Communications +% they tend to indent ITEMIZED lists a little more--- 1.3\parindent. +% We'll provide both values here for you so that you can choose +% which one you like in your document using a command such as: +% setlength{\IEEEilabelindent}{\IEEEilabelindentB} +\newdimen\IEEEilabelindentA +\IEEEilabelindentA \parindent + +\newdimen\IEEEilabelindentB +\IEEEilabelindentB 1.3\parindent +% However, we'll default to using \parindent +% which makes more sense to me +\newdimen\IEEEilabelindent +\IEEEilabelindent \IEEEilabelindentA + + +% This controls the default amount the enumerated list labels +% are indented to the right. +% Normally, this is the same as the paragraph indention +\newdimen\IEEEelabelindent +\IEEEelabelindent \parindent + +% This controls the default amount the description list labels +% are indented to the right. +% Normally, this is the same as the paragraph indention +\newdimen\IEEEdlabelindent +\IEEEdlabelindent \parindent + +% This is the value actually used within the IED lists. +% The IED environments automatically set its value to +% one of the three values above, so global changes do +% not have any effect +\newdimen\IEEElabelindent +\IEEElabelindent \parindent + +% The actual amount labels will be indented is +% \IEEElabelindent multiplied by the factor below +% corresponding to the level of nesting depth +% This provides a means by which the user can +% alter the effective \IEEElabelindent for deeper +% levels +% There may not be such a thing as correct "standard IEEE" +% values. What IEEE actually does may depend on the specific +% circumstances. +% The first list level almost always has full indention. +% The second levels I've seen have only 75% of the normal indentation +% Three level or greater nestings are very rare. I am guessing +% that they don't use any indentation. +\def\IEEElabelindentfactori{1.0} % almost always one +\def\IEEElabelindentfactorii{0.75} % 0.0 or 1.0 may be used in some cases +\def\IEEElabelindentfactoriii{0.0} % 0.75? 0.5? 0.0? +\def\IEEElabelindentfactoriv{0.0} +\def\IEEElabelindentfactorv{0.0} +\def\IEEElabelindentfactorvi{0.0} + +% value actually used within IED lists, it is auto +% set to one of the 6 values above +% global changes here have no effect +\def\IEEElabelindentfactor{1.0} + +% This controls the default spacing between the end of the IED +% list labels and the list text, when normal text is used for +% the labels. +\newdimen\IEEEiednormlabelsep +\IEEEiednormlabelsep \parindent + +% This controls the default spacing between the end of the IED +% list labels and the list text, when math symbols are used for +% the labels (nomenclature lists). IEEE usually increases the +% spacing in these cases +\newdimen\IEEEiedmathlabelsep +\IEEEiedmathlabelsep 1.2em + +% This controls the extra vertical separation put above and +% below each IED list. IEEE usually puts a little extra spacing +% around each list. However, this spacing is barely noticeable. +\newskip\IEEEiedtopsep +\IEEEiedtopsep 2pt plus 1pt minus 1pt + + +% This command is executed within each IED list environment +% at the beginning of the list. You can use this to set the +% parameters for some/all your IED list(s) without disturbing +% global parameters that affect things other than lists. +% i.e., renewcommand{\IEEEiedlistdecl}{\setlength{\labelsep}{5em}} +% will alter the \labelsep for the next list(s) until +% \IEEEiedlistdecl is redefined. +\def\IEEEiedlistdecl{\relax} + +% This command provides an easy way to set \leftmargin based +% on the \labelwidth, \labelsep and the argument \IEEElabelindent +% Usage: \IEEEcalcleftmargin{width-to-indent-the-label} +% output is in the \leftmargin variable, i.e., effectively: +% \leftmargin = argument + \labelwidth + \labelsep +% Note controlled spacing here, shield end of lines with % +\def\IEEEcalcleftmargin#1{\setlength{\leftmargin}{#1}% +\addtolength{\leftmargin}{\labelwidth}% +\addtolength{\leftmargin}{\labelsep}} + +% This command provides an easy way to set \labelwidth to the +% width of the given text. It is the same as +% \settowidth{\labelwidth}{label-text} +% and useful as a shorter alternative. +% Typically used to set \labelwidth to be the width +% of the longest label in the list +\def\IEEEsetlabelwidth#1{\settowidth{\labelwidth}{#1}} + +% When this command is executed, IED lists will use the +% IEEEiedmathlabelsep label separation rather than the normal +% spacing. To have an effect, this command must be executed via +% the \IEEEiedlistdecl or within the option of the IED list +% environments. +\def\IEEEusemathlabelsep{\setlength{\labelsep}{\IEEEiedmathlabelsep}} + +% A flag which controls whether the IED lists automatically +% calculate \leftmargin from \IEEElabelindent, \labelwidth and \labelsep +% Useful if you want to specify your own \leftmargin +% This flag must be set (\IEEEnocalcleftmargintrue or \IEEEnocalcleftmarginfalse) +% via the \IEEEiedlistdecl or within the option of the IED list +% environments to have an effect. +\newif\ifIEEEnocalcleftmargin +\IEEEnocalcleftmarginfalse + +% A flag which controls whether \IEEElabelindent is multiplied by +% the \IEEElabelindentfactor for each list level. +% This flag must be set via the \IEEEiedlistdecl or within the option +% of the IED list environments to have an effect. +\newif\ifIEEEnolabelindentfactor +\IEEEnolabelindentfactorfalse + + +% internal variable to indicate type of IED label +% justification +% 0 - left; 1 - center; 2 - right +\def\@IEEEiedjustify{0} + + +% commands to allow the user to control IED +% label justifications. Use these commands within +% the IED environment option or in the \IEEEiedlistdecl +% Note that changing the normal list justifications +% is nonstandard and IEEE may not like it if you do so! +% I include these commands as they may be helpful to +% those who are using these enhanced list controls for +% other non-IEEE related LaTeX work. +% itemize and enumerate automatically default to right +% justification, description defaults to left. +\def\IEEEiedlabeljustifyl{\def\@IEEEiedjustify{0}}%left +\def\IEEEiedlabeljustifyc{\def\@IEEEiedjustify{1}}%center +\def\IEEEiedlabeljustifyr{\def\@IEEEiedjustify{2}}%right + + + + +% commands to save to and restore from the list parameter copies +% this allows us to set all the list parameters within +% the list_decl and prevent \list (and its \@list) +% from overriding any of our parameters +% V1.6 use \edefs instead of dimen's to conserve dimen registers +% Note controlled spacing here, shield end of lines with % +\def\@IEEEsavelistparams{\edef\@IEEEiedtopsep{\the\topsep}% +\edef\@IEEEiedlabelwidth{\the\labelwidth}% +\edef\@IEEEiedlabelsep{\the\labelsep}% +\edef\@IEEEiedleftmargin{\the\leftmargin}% +\edef\@IEEEiedpartopsep{\the\partopsep}% +\edef\@IEEEiedparsep{\the\parsep}% +\edef\@IEEEieditemsep{\the\itemsep}% +\edef\@IEEEiedrightmargin{\the\rightmargin}% +\edef\@IEEEiedlistparindent{\the\listparindent}% +\edef\@IEEEieditemindent{\the\itemindent}} + +% Note controlled spacing here +\def\@IEEErestorelistparams{\topsep\@IEEEiedtopsep\relax% +\labelwidth\@IEEEiedlabelwidth\relax% +\labelsep\@IEEEiedlabelsep\relax% +\leftmargin\@IEEEiedleftmargin\relax% +\partopsep\@IEEEiedpartopsep\relax% +\parsep\@IEEEiedparsep\relax% +\itemsep\@IEEEieditemsep\relax% +\rightmargin\@IEEEiedrightmargin\relax% +\listparindent\@IEEEiedlistparindent\relax% +\itemindent\@IEEEieditemindent\relax} + + +% v1.6b provide original LaTeX IED list environments +% note that latex.ltx defines \itemize and \enumerate, but not \description +% which must be created by the base classes +% save original LaTeX itemize and enumerate +\let\LaTeXitemize\itemize +\let\endLaTeXitemize\enditemize +\let\LaTeXenumerate\enumerate +\let\endLaTeXenumerate\endenumerate + +% provide original LaTeX description environment from article.cls +\newenvironment{LaTeXdescription} + {\list{}{\labelwidth\z@ \itemindent-\leftmargin + \let\makelabel\descriptionlabel}} + {\endlist} +\newcommand*\descriptionlabel[1]{\hspace\labelsep + \normalfont\bfseries #1} + + +% override LaTeX's default IED lists +\def\itemize{\@IEEEitemize} +\def\enditemize{\@endIEEEitemize} +\def\enumerate{\@IEEEenumerate} +\def\endenumerate{\@endIEEEenumerate} +\def\description{\@IEEEdescription} +\def\enddescription{\@endIEEEdescription} + +% provide the user with aliases - may help those using packages that +% override itemize, enumerate, or description +\def\IEEEitemize{\@IEEEitemize} +\def\endIEEEitemize{\@endIEEEitemize} +\def\IEEEenumerate{\@IEEEenumerate} +\def\endIEEEenumerate{\@endIEEEenumerate} +\def\IEEEdescription{\@IEEEdescription} +\def\endIEEEdescription{\@endIEEEdescription} + + +% V1.6 we want to keep the IEEEtran IED list definitions as our own internal +% commands so they are protected against redefinition +\def\@IEEEitemize{\@ifnextchar[{\@@IEEEitemize}{\@@IEEEitemize[\relax]}} +\def\@IEEEenumerate{\@ifnextchar[{\@@IEEEenumerate}{\@@IEEEenumerate[\relax]}} +\def\@IEEEdescription{\@ifnextchar[{\@@IEEEdescription}{\@@IEEEdescription[\relax]}} +\def\@endIEEEitemize{\endlist} +\def\@endIEEEenumerate{\endlist} +\def\@endIEEEdescription{\endlist} + + +% DO NOT ALLOW BLANK LINES TO BE IN THESE IED ENVIRONMENTS +% AS THIS WILL FORCE NEW PARAGRAPHS AFTER THE IED LISTS +% IEEEtran itemized list MDS 1/2001 +% Note controlled spacing here, shield end of lines with % +\def\@@IEEEitemize[#1]{% + \ifnum\@itemdepth>3\relax\@toodeep\else% + \ifnum\@listdepth>5\relax\@toodeep\else% + \advance\@itemdepth\@ne% + \edef\@itemitem{labelitem\romannumeral\the\@itemdepth}% + % get the labelindentfactor for this level + \advance\@listdepth\@ne% we need to know what the level WILL be + \edef\IEEElabelindentfactor{\csname IEEElabelindentfactor\romannumeral\the\@listdepth\endcsname}% + \advance\@listdepth-\@ne% undo our increment + \def\@IEEEiedjustify{2}% right justified labels are default + % set other defaults + \IEEEnocalcleftmarginfalse% + \IEEEnolabelindentfactorfalse% + \topsep\IEEEiedtopsep% + \IEEElabelindent\IEEEilabelindent% + \labelsep\IEEEiednormlabelsep% + \partopsep 0ex% + \parsep 0ex% + \itemsep \parskip% + \rightmargin 0em% + \listparindent 0em% + \itemindent 0em% + % calculate the label width + % the user can override this later if + % they specified a \labelwidth + \settowidth{\labelwidth}{\csname labelitem\romannumeral\the\@itemdepth\endcsname}% + \@IEEEsavelistparams% save our list parameters + \list{\csname\@itemitem\endcsname}{% + \@IEEErestorelistparams% override any list{} changes + % to our globals + \let\makelabel\@IEEEiedmakelabel% v1.6b setup \makelabel + \IEEEiedlistdecl% let user alter parameters + #1\relax% + % If the user has requested not to use the + % labelindent factor, don't revise \labelindent + \ifIEEEnolabelindentfactor\relax% + \else\IEEElabelindent=\IEEElabelindentfactor\labelindent% + \fi% + % Unless the user has requested otherwise, + % calculate our left margin based + % on \IEEElabelindent, \labelwidth and + % \labelsep + \ifIEEEnocalcleftmargin\relax% + \else\IEEEcalcleftmargin{\IEEElabelindent}% + \fi}\fi\fi}% + + +% DO NOT ALLOW BLANK LINES TO BE IN THESE IED ENVIRONMENTS +% AS THIS WILL FORCE NEW PARAGRAPHS AFTER THE IED LISTS +% IEEEtran enumerate list MDS 1/2001 +% Note controlled spacing here, shield end of lines with % +\def\@@IEEEenumerate[#1]{% + \ifnum\@enumdepth>3\relax\@toodeep\else% + \ifnum\@listdepth>5\relax\@toodeep\else% + \advance\@enumdepth\@ne% + \edef\@enumctr{enum\romannumeral\the\@enumdepth}% + % get the labelindentfactor for this level + \advance\@listdepth\@ne% we need to know what the level WILL be + \edef\IEEElabelindentfactor{\csname IEEElabelindentfactor\romannumeral\the\@listdepth\endcsname}% + \advance\@listdepth-\@ne% undo our increment + \def\@IEEEiedjustify{2}% right justified labels are default + % set other defaults + \IEEEnocalcleftmarginfalse% + \IEEEnolabelindentfactorfalse% + \topsep\IEEEiedtopsep% + \IEEElabelindent\IEEEelabelindent% + \labelsep\IEEEiednormlabelsep% + \partopsep 0ex% + \parsep 0ex% + \itemsep 0ex% + \rightmargin 0em% + \listparindent 0em% + \itemindent 0em% + % calculate the label width + % We'll set it to the width suitable for all labels using + % normalfont 1) to 9) + % The user can override this later + \settowidth{\labelwidth}{9)}% + \@IEEEsavelistparams% save our list parameters + \list{\csname label\@enumctr\endcsname}{\usecounter{\@enumctr}% + \@IEEErestorelistparams% override any list{} changes + % to our globals + \let\makelabel\@IEEEiedmakelabel% v1.6b setup \makelabel + \IEEEiedlistdecl% let user alter parameters + #1\relax% + % If the user has requested not to use the + % IEEElabelindent factor, don't revise \IEEElabelindent + \ifIEEEnolabelindentfactor\relax% + \else\IEEElabelindent=\IEEElabelindentfactor\IEEElabelindent% + \fi% + % Unless the user has requested otherwise, + % calculate our left margin based + % on \IEEElabelindent, \labelwidth and + % \labelsep + \ifIEEEnocalcleftmargin\relax% + \else\IEEEcalcleftmargin{\IEEElabelindent}% + \fi}\fi\fi}% + + +% DO NOT ALLOW BLANK LINES TO BE IN THESE IED ENVIRONMENTS +% AS THIS WILL FORCE NEW PARAGRAPHS AFTER THE IED LISTS +% IEEEtran description list MDS 1/2001 +% Note controlled spacing here, shield end of lines with % +\def\@@IEEEdescription[#1]{% + \ifnum\@listdepth>5\relax\@toodeep\else% + % get the labelindentfactor for this level + \advance\@listdepth\@ne% we need to know what the level WILL be + \edef\IEEElabelindentfactor{\csname IEEElabelindentfactor\romannumeral\the\@listdepth\endcsname}% + \advance\@listdepth-\@ne% undo our increment + \def\@IEEEiedjustify{0}% left justified labels are default + % set other defaults + \IEEEnocalcleftmarginfalse% + \IEEEnolabelindentfactorfalse% + \topsep\IEEEiedtopsep% + \IEEElabelindent\IEEEdlabelindent% + % assume normal labelsep + \labelsep\IEEEiednormlabelsep% + \partopsep 0ex% + \parsep 0ex% + \itemsep 0ex% + \rightmargin 0em% + \listparindent 0em% + \itemindent 0em% + % Bogus label width in case the user forgets + % to set it. + % TIP: If you want to see what a variable's width is you + % can use the TeX command \showthe\width-variable to + % display it on the screen during compilation + % (This might be helpful to know when you need to find out + % which label is the widest) + \settowidth{\labelwidth}{Hello}% + \@IEEEsavelistparams% save our list parameters + \list{}{\@IEEErestorelistparams% override any list{} changes + % to our globals + \let\makelabel\@IEEEiedmakelabel% v1.6b setup \makelabel + \IEEEiedlistdecl% let user alter parameters + #1\relax% + % If the user has requested not to use the + % labelindent factor, don't revise \IEEElabelindent + \ifIEEEnolabelindentfactor\relax% + \else\IEEElabelindent=\IEEElabelindentfactor\IEEElabelindent% + \fi% + % Unless the user has requested otherwise, + % calculate our left margin based + % on \IEEElabelindent, \labelwidth and + % \labelsep + \ifIEEEnocalcleftmargin\relax% + \else\IEEEcalcleftmargin{\IEEElabelindent}\relax% + \fi}\fi} + +% v1.6b we use one makelabel that does justification as needed. +\def\@IEEEiedmakelabel#1{\relax\if\@IEEEiedjustify 0\relax +\makebox[\labelwidth][l]{\normalfont #1}\else +\if\@IEEEiedjustify 1\relax +\makebox[\labelwidth][c]{\normalfont #1}\else +\makebox[\labelwidth][r]{\normalfont #1}\fi\fi} + + +% VERSE and QUOTE +% V1.7 define environments with newenvironment +\newenvironment{verse}{\let\\=\@centercr + \list{}{\itemsep\z@ \itemindent -1.5em \listparindent \itemindent + \rightmargin\leftmargin\advance\leftmargin 1.5em}\item\relax} + {\endlist} +\newenvironment{quotation}{\list{}{\listparindent 1.5em \itemindent\listparindent + \rightmargin\leftmargin \parsep 0pt plus 1pt}\item\relax} + {\endlist} +\newenvironment{quote}{\list{}{\rightmargin\leftmargin}\item\relax} + {\endlist} + + +% \titlepage +% provided only for backward compatibility. \maketitle is the correct +% way to create the title page. +\newif\if@restonecol +\def\titlepage{\@restonecolfalse\if@twocolumn\@restonecoltrue\onecolumn + \else \newpage \fi \thispagestyle{empty}\c@page\z@} +\def\endtitlepage{\if@restonecol\twocolumn \else \newpage \fi} + +% standard values from article.cls +\arraycolsep 5pt +\arrayrulewidth .4pt +\doublerulesep 2pt + +\tabcolsep 6pt +\tabbingsep 0.5em + + +%% FOOTNOTES +% +%\skip\footins 10pt plus 4pt minus 2pt +% V1.6 respond to changes in font size +% space added above the footnotes (if present) +\skip\footins 0.9\baselineskip plus 0.4\baselineskip minus 0.2\baselineskip + +% V1.6, we need to make \footnotesep responsive to changes +% in \baselineskip or strange spacings will result when in +% draft mode. Here is a little LaTeX secret - \footnotesep +% determines the height of an invisible strut that is placed +% *above* the baseline of footnotes after the first. Since +% LaTeX considers the space for characters to be 0.7/baselineskip +% above the baseline and 0.3/baselineskip below it, we need to +% use 0.7/baselineskip as a \footnotesep to maintain equal spacing +% between all the lines of the footnotes. IEEE often uses a tad +% more, so use 0.8\baselineskip. This slightly larger value also helps +% the text to clear the footnote marks. Note that \thanks in IEEEtran +% uses its own value of \footnotesep which is set in \maketitle. +{\footnotesize +\global\footnotesep 0.8\baselineskip} + +\def\unnumberedfootnote{\gdef\@thefnmark{\quad}\@footnotetext} + +\skip\@mpfootins 0.3\baselineskip +\fboxsep = 3pt +\fboxrule = .4pt +% V1.6 use 1em, then use LaTeX2e's \@makefnmark +% Note that IEEE normally *left* aligns the footnote marks, so we don't need +% box resizing tricks here. +%\long\def\@makefnmark{\scriptsize\normalfont\@thefnmark} +\long\def\@makefntext#1{\parindent 1em\indent\hbox{\@makefnmark}#1}% V1.6 use 1em +\long\def\@maketablefntext#1{\raggedleft\leavevmode\hbox{\@makefnmark}#1} +% V1.7 compsoc does not use superscipts for footnote marks +\ifCLASSOPTIONcompsoc +\def\@IEEEcompsocmakefnmark{\hbox{\normalfont\@thefnmark.\ }} +\long\def\@makefntext#1{\parindent 1em\indent\hbox{\@IEEEcompsocmakefnmark}#1} +\fi + +% IEEE does not use footnote rules. Or do they? +\def\footnoterule{\vskip-2pt \hrule height 0.6pt depth \z@ \vskip1.6pt\relax} +\toks@\expandafter{\@setminipage\let\footnoterule\relax\footnotesep\z@} +\edef\@setminipage{\the\toks@} + +% V1.7 for compsoc, IEEE uses a footnote rule only for \thanks. We devise a "one-shot" +% system to implement this. +\newif\if@IEEEenableoneshotfootnoterule +\@IEEEenableoneshotfootnoterulefalse +\ifCLASSOPTIONcompsoc +\def\footnoterule{\relax\if@IEEEenableoneshotfootnoterule +\kern-5pt +\hbox to \columnwidth{\hfill\vrule width 0.5\columnwidth height 0.4pt\hfill} +\kern4.6pt +\global\@IEEEenableoneshotfootnoterulefalse +\else +\relax +\fi} +\fi + +% V1.6 do not allow LaTeX to break a footnote across multiple pages +\interfootnotelinepenalty=10000 + +% V1.6 discourage breaks within equations +% Note that amsmath normally sets this to 10000, +% but LaTeX2e normally uses 100. +\interdisplaylinepenalty=2500 + +% default allows section depth up to /paragraph +\setcounter{secnumdepth}{4} + +% technotes do not allow /paragraph +\ifCLASSOPTIONtechnote + \setcounter{secnumdepth}{3} +\fi +% neither do compsoc conferences +\@IEEEcompsocconfonly{\setcounter{secnumdepth}{3}} + + +\newcounter{section} +\newcounter{subsection}[section] +\newcounter{subsubsection}[subsection] +\newcounter{paragraph}[subsubsection] + +% used only by IEEEtran's IEEEeqnarray as other packages may +% have their own, different, implementations +\newcounter{IEEEsubequation}[equation] + +% as shown when called by user from \ref, \label and in table of contents +\def\theequation{\arabic{equation}} % 1 +\def\theIEEEsubequation{\theequation\alph{IEEEsubequation}} % 1a (used only by IEEEtran's IEEEeqnarray) +\ifCLASSOPTIONcompsoc +% compsoc is all arabic +\def\thesection{\arabic{section}} +\def\thesubsection{\thesection.\arabic{subsection}} +\def\thesubsubsection{\thesubsection.\arabic{subsubsection}} +\def\theparagraph{\thesubsubsection.\arabic{paragraph}} +\else +\def\thesection{\Roman{section}} % I +% V1.7, \mbox prevents breaks around - +\def\thesubsection{\mbox{\thesection-\Alph{subsection}}} % I-A +% V1.7 use I-A1 format used by IEEE rather than I-A.1 +\def\thesubsubsection{\thesubsection\arabic{subsubsection}} % I-A1 +\def\theparagraph{\thesubsubsection\alph{paragraph}} % I-A1a +\fi + +% From Heiko Oberdiek. Because of the \mbox in \thesubsection, we need to +% tell hyperref to disable the \mbox command when making PDF bookmarks. +% This done already with hyperref.sty version 6.74o and later, but +% it will not hurt to do it here again for users of older versions. +\@ifundefined{pdfstringdefPreHook}{\let\pdfstringdefPreHook\@empty}{}% +\g@addto@macro\pdfstringdefPreHook{\let\mbox\relax} + + +% Main text forms (how shown in main text headings) +% V1.6, using \thesection in \thesectiondis allows changes +% in the former to automatically appear in the latter +\ifCLASSOPTIONcompsoc + \ifCLASSOPTIONconference% compsoc conference + \def\thesectiondis{\thesection.} + \def\thesubsectiondis{\thesectiondis\arabic{subsection}.} + \def\thesubsubsectiondis{\thesubsectiondis\arabic{subsubsection}.} + \def\theparagraphdis{\thesubsubsectiondis\arabic{paragraph}.} + \else% compsoc not conferencs + \def\thesectiondis{\thesection} + \def\thesubsectiondis{\thesectiondis.\arabic{subsection}} + \def\thesubsubsectiondis{\thesubsectiondis.\arabic{subsubsection}} + \def\theparagraphdis{\thesubsubsectiondis.\arabic{paragraph}} + \fi +\else% not compsoc + \def\thesectiondis{\thesection.} % I. + \def\thesubsectiondis{\Alph{subsection}.} % B. + \def\thesubsubsectiondis{\arabic{subsubsection})} % 3) + \def\theparagraphdis{\alph{paragraph})} % d) +\fi + +% just like LaTeX2e's \@eqnnum +\def\theequationdis{{\normalfont \normalcolor (\theequation)}}% (1) +% IEEEsubequation used only by IEEEtran's IEEEeqnarray +\def\theIEEEsubequationdis{{\normalfont \normalcolor (\theIEEEsubequation)}}% (1a) +% redirect LaTeX2e's equation number display and all that depend on +% it, through IEEEtran's \theequationdis +\def\@eqnnum{\theequationdis} + + + +% V1.7 provide string macros as article.cls does +\def\contentsname{Contents} +\def\listfigurename{List of Figures} +\def\listtablename{List of Tables} +\def\refname{References} +\def\indexname{Index} +\def\figurename{Fig.} +\def\tablename{TABLE} +\@IEEEcompsocconfonly{\def\figurename{Figure}\def\tablename{Table}} +\def\partname{Part} +\def\appendixname{Appendix} +\def\abstractname{Abstract} +% IEEE specific names +\def\IEEEkeywordsname{Keywords} +\def\IEEEproofname{Proof} + + +% LIST OF FIGURES AND TABLES AND TABLE OF CONTENTS +% +\def\@pnumwidth{1.55em} +\def\@tocrmarg{2.55em} +\def\@dotsep{4.5} +\setcounter{tocdepth}{3} + +% adjusted some spacings here so that section numbers will not easily +% collide with the section titles. +% VIII; VIII-A; and VIII-A.1 are usually the worst offenders. +% MDS 1/2001 +\def\tableofcontents{\section*{\contentsname}\@starttoc{toc}} +\def\l@section#1#2{\addpenalty{\@secpenalty}\addvspace{1.0em plus 1pt}% + \@tempdima 2.75em \begingroup \parindent \z@ \rightskip \@pnumwidth% + \parfillskip-\@pnumwidth {\bfseries\leavevmode #1}\hfil\hbox to\@pnumwidth{\hss #2}\par% + \endgroup} +% argument format #1:level, #2:labelindent,#3:labelsep +\def\l@subsection{\@dottedtocline{2}{2.75em}{3.75em}} +\def\l@subsubsection{\@dottedtocline{3}{6.5em}{4.5em}} +% must provide \l@ defs for ALL sublevels EVEN if tocdepth +% is such as they will not appear in the table of contents +% these defs are how TOC knows what level these things are! +\def\l@paragraph{\@dottedtocline{4}{6.5em}{5.5em}} +\def\l@subparagraph{\@dottedtocline{5}{6.5em}{6.5em}} +\def\listoffigures{\section*{\listfigurename}\@starttoc{lof}} +\def\l@figure{\@dottedtocline{1}{0em}{2.75em}} +\def\listoftables{\section*{\listtablename}\@starttoc{lot}} +\let\l@table\l@figure + + +%% Definitions for floats +%% +%% Normal Floats +\floatsep 1\baselineskip plus 0.2\baselineskip minus 0.2\baselineskip +\textfloatsep 1.7\baselineskip plus 0.2\baselineskip minus 0.4\baselineskip +\@fptop 0pt plus 1fil +\@fpsep 0.75\baselineskip plus 2fil +\@fpbot 0pt plus 1fil +\def\topfraction{0.9} +\def\bottomfraction{0.4} +\def\floatpagefraction{0.8} +% V1.7, let top floats approach 90% of page +\def\textfraction{0.1} + +%% Double Column Floats +\dblfloatsep 1\baselineskip plus 0.2\baselineskip minus 0.2\baselineskip + +\dbltextfloatsep 1.7\baselineskip plus 0.2\baselineskip minus 0.4\baselineskip +% Note that it would be nice if the rubber here actually worked in LaTeX2e. +% There is a long standing limitation in LaTeX, first discovered (to the best +% of my knowledge) by Alan Jeffrey in 1992. LaTeX ignores the stretchable +% portion of \dbltextfloatsep, and as a result, double column figures can and +% do result in an non-integer number of lines in the main text columns with +% underfull vbox errors as a consequence. A post to comp.text.tex +% by Donald Arseneau confirms that this had not yet been fixed in 1998. +% IEEEtran V1.6 will fix this problem for you in the titles, but it doesn't +% protect you from other double floats. Happy vspace'ing. + +\@dblfptop 0pt plus 1fil +\@dblfpsep 0.75\baselineskip plus 2fil +\@dblfpbot 0pt plus 1fil +\def\dbltopfraction{0.8} +\def\dblfloatpagefraction{0.8} +\setcounter{dbltopnumber}{4} + +\intextsep 1\baselineskip plus 0.2\baselineskip minus 0.2\baselineskip +\setcounter{topnumber}{2} +\setcounter{bottomnumber}{2} +\setcounter{totalnumber}{4} + + + +% article class provides these, we should too. +\newlength\abovecaptionskip +\newlength\belowcaptionskip +% but only \abovecaptionskip is used above figure captions and *below* table +% captions +\setlength\abovecaptionskip{0.65\baselineskip} +\setlength\belowcaptionskip{0.75\baselineskip} +% V1.6 create hooks in case the caption spacing ever needs to be +% overridden by a user +\def\@IEEEfigurecaptionsepspace{\vskip\abovecaptionskip\relax}% +\def\@IEEEtablecaptionsepspace{\vskip\belowcaptionskip\relax}% + + +% 1.6b revise caption system so that \@makecaption uses two arguments +% as with LaTeX2e. Otherwise, there will be problems when using hyperref. +\def\@IEEEtablestring{table} + +\ifCLASSOPTIONcompsoc +% V1.7 compsoc \@makecaption +\ifCLASSOPTIONconference% compsoc conference +\long\def\@makecaption#1#2{% +% test if is a for a figure or table +\ifx\@captype\@IEEEtablestring% +% if a table, do table caption +\normalsize\begin{center}{\normalfont\sffamily\normalsize {#1.}~ #2}\end{center}% +\@IEEEtablecaptionsepspace +% if not a table, format it as a figure +\else +\@IEEEfigurecaptionsepspace +\setbox\@tempboxa\hbox{\normalfont\sffamily\normalsize {#1.}~ #2}% +\ifdim \wd\@tempboxa >\hsize% +% if caption is longer than a line, let it wrap around +\setbox\@tempboxa\hbox{\normalfont\sffamily\normalsize {#1.}~ }% +\parbox[t]{\hsize}{\normalfont\sffamily\normalsize \noindent\unhbox\@tempboxa#2}% +% if caption is shorter than a line, center +\else% +\hbox to\hsize{\normalfont\sffamily\normalsize\hfil\box\@tempboxa\hfil}% +\fi\fi} +\else% nonconference compsoc +\long\def\@makecaption#1#2{% +% test if is a for a figure or table +\ifx\@captype\@IEEEtablestring% +% if a table, do table caption +\normalsize\begin{center}{\normalfont\sffamily\normalsize #1}\\{\normalfont\sffamily\normalsize #2}\end{center}% +\@IEEEtablecaptionsepspace +% if not a table, format it as a figure +\else +\@IEEEfigurecaptionsepspace +\setbox\@tempboxa\hbox{\normalfont\sffamily\normalsize {#1.}~ #2}% +\ifdim \wd\@tempboxa >\hsize% +% if caption is longer than a line, let it wrap around +\setbox\@tempboxa\hbox{\normalfont\sffamily\normalsize {#1.}~ }% +\parbox[t]{\hsize}{\normalfont\sffamily\normalsize \noindent\unhbox\@tempboxa#2}% +% if caption is shorter than a line, left justify +\else% +\hbox to\hsize{\normalfont\sffamily\normalsize\box\@tempboxa\hfil}% +\fi\fi} +\fi + +\else% traditional noncompsoc \@makecaption +\long\def\@makecaption#1#2{% +% test if is a for a figure or table +\ifx\@captype\@IEEEtablestring% +% if a table, do table caption +\footnotesize{\centering\normalfont\footnotesize#1.\qquad\scshape #2\par}% +\@IEEEtablecaptionsepspace +% if not a table, format it as a figure +\else +\@IEEEfigurecaptionsepspace +% 3/2001 use footnotesize, not small; use two nonbreaking spaces, not one +\setbox\@tempboxa\hbox{\normalfont\footnotesize {#1.}~~ #2}% +\ifdim \wd\@tempboxa >\hsize% +% if caption is longer than a line, let it wrap around +\setbox\@tempboxa\hbox{\normalfont\footnotesize {#1.}~~ }% +\parbox[t]{\hsize}{\normalfont\footnotesize\noindent\unhbox\@tempboxa#2}% +% if caption is shorter than a line, center if conference, left justify otherwise +\else% +\ifCLASSOPTIONconference \hbox to\hsize{\normalfont\footnotesize\box\@tempboxa\hfil}% +\else \hbox to\hsize{\normalfont\footnotesize\box\@tempboxa\hfil}% +\fi\fi\fi} +\fi + + + +% V1.7 disable captions class option, do so in a way that retains operation of \label +% within \caption +\ifCLASSOPTIONcaptionsoff +\long\def\@makecaption#1#2{\vspace*{2em}\footnotesize\begin{center}{\footnotesize #1}\end{center}% +\let\@IEEEtemporiglabeldefsave\label +\let\@IEEEtemplabelargsave\relax +\def\label##1{\gdef\@IEEEtemplabelargsave{##1}}% +\setbox\@tempboxa\hbox{#2}% +\let\label\@IEEEtemporiglabeldefsave +\ifx\@IEEEtemplabelargsave\relax\else\label{\@IEEEtemplabelargsave}\fi} +\fi + + +% V1.7 define end environments with \def not \let so as to work OK with +% preview-latex +\newcounter{figure} +\def\thefigure{\@arabic\c@figure} +\def\fps@figure{tbp} +\def\ftype@figure{1} +\def\ext@figure{lof} +\def\fnum@figure{\figurename~\thefigure} +\def\figure{\@float{figure}} +\def\endfigure{\end@float} +\@namedef{figure*}{\@dblfloat{figure}} +\@namedef{endfigure*}{\end@dblfloat} +\newcounter{table} +\ifCLASSOPTIONcompsoc +\def\thetable{\arabic{table}} +\else +\def\thetable{\@Roman\c@table} +\fi +\def\fps@table{tbp} +\def\ftype@table{2} +\def\ext@table{lot} +\def\fnum@table{\tablename~\thetable} +% V1.6 IEEE uses 8pt text for tables +% to default to footnotesize, we hack into LaTeX2e's \@floatboxreset and pray +\def\table{\def\@floatboxreset{\reset@font\scriptsize\@setminipage}% + \let\@makefntext\@maketablefntext + \@float{table}} +\def\endtable{\end@float} +% v1.6b double column tables need to default to footnotesize as well. +\@namedef{table*}{\def\@floatboxreset{\reset@font\scriptsize\@setminipage}\@dblfloat{table}} +\@namedef{endtable*}{\end@dblfloat} + + + + +%% +%% START OF IEEEeqnarry DEFINITIONS +%% +%% Inspired by the concepts, examples, and previous works of LaTeX +%% coders and developers such as Donald Arseneau, Fred Bartlett, +%% David Carlisle, Tony Liu, Frank Mittelbach, Piet van Oostrum, +%% Roland Winkler and Mark Wooding. +%% I don't make the claim that my work here is even near their calibre. ;) + + +% hook to allow easy changeover to IEEEtran.cls/tools.sty error reporting +\def\@IEEEclspkgerror{\ClassError{IEEEtran}} + +\newif\if@IEEEeqnarraystarform% flag to indicate if the environment was called as the star form +\@IEEEeqnarraystarformfalse + +\newif\if@advanceIEEEeqncolcnt% tracks if the environment should advance the col counter +% allows a way to make an \IEEEeqnarraybox that can be used within an \IEEEeqnarray +% used by IEEEeqnarraymulticol so that it can work properly in both +\@advanceIEEEeqncolcnttrue + +\newcount\@IEEEeqnnumcols % tracks how many IEEEeqnarray cols are defined +\newcount\@IEEEeqncolcnt % tracks how many IEEEeqnarray cols the user actually used + + +% The default math style used by the columns +\def\IEEEeqnarraymathstyle{\displaystyle} +% The default text style used by the columns +% default to using the current font +\def\IEEEeqnarraytextstyle{\relax} + +% like the iedlistdecl but for \IEEEeqnarray +\def\IEEEeqnarraydecl{\relax} +\def\IEEEeqnarrayboxdecl{\relax} + +% \yesnumber is the opposite of \nonumber +% a novel concept with the same def as the equationarray package +% However, we give IEEE versions too since some LaTeX packages such as +% the MDWtools mathenv.sty redefine \nonumber to something else. +\providecommand{\yesnumber}{\global\@eqnswtrue} +\def\IEEEyesnumber{\global\@eqnswtrue} +\def\IEEEnonumber{\global\@eqnswfalse} + + +\def\IEEEyessubnumber{\global\@IEEEissubequationtrue\global\@eqnswtrue% +\if@IEEEeqnarrayISinner% only do something inside an IEEEeqnarray +\if@IEEElastlinewassubequation\addtocounter{equation}{-1}\else\setcounter{IEEEsubequation}{1}\fi% +\def\@currentlabel{\p@IEEEsubequation\theIEEEsubequation}\fi} + +% flag to indicate that an equation is a sub equation +\newif\if@IEEEissubequation% +\@IEEEissubequationfalse + +% allows users to "push away" equations that get too close to the equation numbers +\def\IEEEeqnarraynumspace{\hphantom{\if@IEEEissubequation\theIEEEsubequationdis\else\theequationdis\fi}} + +% provides a way to span multiple columns within IEEEeqnarray environments +% will consider \if@advanceIEEEeqncolcnt before globally advancing the +% column counter - so as to work within \IEEEeqnarraybox +% usage: \IEEEeqnarraymulticol{number cols. to span}{col type}{cell text} +\long\def\IEEEeqnarraymulticol#1#2#3{\multispan{#1}% +% check if column is defined +\relax\expandafter\ifx\csname @IEEEeqnarraycolDEF#2\endcsname\@IEEEeqnarraycolisdefined% +\csname @IEEEeqnarraycolPRE#2\endcsname#3\relax\relax\relax\relax\relax% +\relax\relax\relax\relax\relax\csname @IEEEeqnarraycolPOST#2\endcsname% +\else% if not, error and use default type +\@IEEEclspkgerror{Invalid column type "#2" in \string\IEEEeqnarraymulticol.\MessageBreak +Using a default centering column instead}% +{You must define IEEEeqnarray column types before use.}% +\csname @IEEEeqnarraycolPRE@IEEEdefault\endcsname#3\relax\relax\relax\relax\relax% +\relax\relax\relax\relax\relax\csname @IEEEeqnarraycolPOST@IEEEdefault\endcsname% +\fi% +% advance column counter only if the IEEEeqnarray environment wants it +\if@advanceIEEEeqncolcnt\global\advance\@IEEEeqncolcnt by #1\relax\fi} + +% like \omit, but maintains track of the column counter for \IEEEeqnarray +\def\IEEEeqnarrayomit{\omit\if@advanceIEEEeqncolcnt\global\advance\@IEEEeqncolcnt by 1\relax\fi} + + +% provides a way to define a letter referenced column type +% usage: \IEEEeqnarraydefcol{col. type letter/name}{pre insertion text}{post insertion text} +\def\IEEEeqnarraydefcol#1#2#3{\expandafter\def\csname @IEEEeqnarraycolPRE#1\endcsname{#2}% +\expandafter\def\csname @IEEEeqnarraycolPOST#1\endcsname{#3}% +\expandafter\def\csname @IEEEeqnarraycolDEF#1\endcsname{1}} + + +% provides a way to define a numerically referenced inter-column glue types +% usage: \IEEEeqnarraydefcolsep{col. glue number}{glue definition} +\def\IEEEeqnarraydefcolsep#1#2{\expandafter\def\csname @IEEEeqnarraycolSEP\romannumeral #1\endcsname{#2}% +\expandafter\def\csname @IEEEeqnarraycolSEPDEF\romannumeral #1\endcsname{1}} + + +\def\@IEEEeqnarraycolisdefined{1}% just a macro for 1, used for checking undefined column types + + +% expands and appends the given argument to the \@IEEEtrantmptoksA token list +% used to build up the \halign preamble +\def\@IEEEappendtoksA#1{\edef\@@IEEEappendtoksA{\@IEEEtrantmptoksA={\the\@IEEEtrantmptoksA #1}}% +\@@IEEEappendtoksA} + +% also appends to \@IEEEtrantmptoksA, but does not expand the argument +% uses \toks8 as a scratchpad register +\def\@IEEEappendNOEXPANDtoksA#1{\toks8={#1}% +\edef\@@IEEEappendNOEXPANDtoksA{\@IEEEtrantmptoksA={\the\@IEEEtrantmptoksA\the\toks8}}% +\@@IEEEappendNOEXPANDtoksA} + +% define some common column types for the user +% math +\IEEEeqnarraydefcol{l}{$\IEEEeqnarraymathstyle}{$\hfil} +\IEEEeqnarraydefcol{c}{\hfil$\IEEEeqnarraymathstyle}{$\hfil} +\IEEEeqnarraydefcol{r}{\hfil$\IEEEeqnarraymathstyle}{$} +\IEEEeqnarraydefcol{L}{$\IEEEeqnarraymathstyle{}}{{}$\hfil} +\IEEEeqnarraydefcol{C}{\hfil$\IEEEeqnarraymathstyle{}}{{}$\hfil} +\IEEEeqnarraydefcol{R}{\hfil$\IEEEeqnarraymathstyle{}}{{}$} +% text +\IEEEeqnarraydefcol{s}{\IEEEeqnarraytextstyle}{\hfil} +\IEEEeqnarraydefcol{t}{\hfil\IEEEeqnarraytextstyle}{\hfil} +\IEEEeqnarraydefcol{u}{\hfil\IEEEeqnarraytextstyle}{} + +% vertical rules +\IEEEeqnarraydefcol{v}{}{\vrule width\arrayrulewidth} +\IEEEeqnarraydefcol{vv}{\vrule width\arrayrulewidth\hfil}{\hfil\vrule width\arrayrulewidth} +\IEEEeqnarraydefcol{V}{}{\vrule width\arrayrulewidth\hskip\doublerulesep\vrule width\arrayrulewidth} +\IEEEeqnarraydefcol{VV}{\vrule width\arrayrulewidth\hskip\doublerulesep\vrule width\arrayrulewidth\hfil}% +{\hfil\vrule width\arrayrulewidth\hskip\doublerulesep\vrule width\arrayrulewidth} + +% horizontal rules +\IEEEeqnarraydefcol{h}{}{\leaders\hrule height\arrayrulewidth\hfil} +\IEEEeqnarraydefcol{H}{}{\leaders\vbox{\hrule width\arrayrulewidth\vskip\doublerulesep\hrule width\arrayrulewidth}\hfil} + +% plain +\IEEEeqnarraydefcol{x}{}{} +\IEEEeqnarraydefcol{X}{$}{$} + +% the default column type to use in the event a column type is not defined +\IEEEeqnarraydefcol{@IEEEdefault}{\hfil$\IEEEeqnarraymathstyle}{$\hfil} + + +% a zero tabskip (used for "-" col types) +\def\@IEEEeqnarraycolSEPzero{0pt plus 0pt minus 0pt} +% a centering tabskip (used for "+" col types) +\def\@IEEEeqnarraycolSEPcenter{1000pt plus 0pt minus 1000pt} + +% top level default tabskip glues for the start, end, and inter-column +% may be reset within environments not always at the top level, e.g., \IEEEeqnarraybox +\edef\@IEEEeqnarraycolSEPdefaultstart{\@IEEEeqnarraycolSEPcenter}% default start glue +\edef\@IEEEeqnarraycolSEPdefaultend{\@IEEEeqnarraycolSEPcenter}% default end glue +\edef\@IEEEeqnarraycolSEPdefaultmid{\@IEEEeqnarraycolSEPzero}% default inter-column glue + + + +% creates a vertical rule that extends from the bottom to the top a a cell +% Provided in case other packages redefine \vline some other way. +% usage: \IEEEeqnarrayvrule[rule thickness] +% If no argument is provided, \arrayrulewidth will be used for the rule thickness. +\newcommand\IEEEeqnarrayvrule[1][\arrayrulewidth]{\vrule\@width#1\relax} + +% creates a blank separator row +% usage: \IEEEeqnarrayseprow[separation length][font size commands] +% default is \IEEEeqnarrayseprow[0.25\normalbaselineskip][\relax] +% blank arguments inherit the default values +% uses \skip5 as a scratch register - calls \@IEEEeqnarraystrutsize which uses more scratch registers +\def\IEEEeqnarrayseprow{\relax\@ifnextchar[{\@IEEEeqnarrayseprow}{\@IEEEeqnarrayseprow[0.25\normalbaselineskip]}} +\def\@IEEEeqnarrayseprow[#1]{\relax\@ifnextchar[{\@@IEEEeqnarrayseprow[#1]}{\@@IEEEeqnarrayseprow[#1][\relax]}} +\def\@@IEEEeqnarrayseprow[#1][#2]{\def\@IEEEeqnarrayseprowARGONE{#1}% +\ifx\@IEEEeqnarrayseprowARGONE\@empty% +% get the skip value, based on the font commands +% use skip5 because \IEEEeqnarraystrutsize uses \skip0, \skip2, \skip3 +% assign within a bogus box to confine the font changes +{\setbox0=\hbox{#2\relax\global\skip5=0.25\normalbaselineskip}}% +\else% +{\setbox0=\hbox{#2\relax\global\skip5=#1}}% +\fi% +\@IEEEeqnarrayhoptolastcolumn\IEEEeqnarraystrutsize{\skip5}{0pt}[\relax]\relax} + +% creates a blank separator row, but omits all the column templates +% usage: \IEEEeqnarrayseprowcut[separation length][font size commands] +% default is \IEEEeqnarrayseprowcut[0.25\normalbaselineskip][\relax] +% blank arguments inherit the default values +% uses \skip5 as a scratch register - calls \@IEEEeqnarraystrutsize which uses more scratch registers +\def\IEEEeqnarrayseprowcut{\multispan{\@IEEEeqnnumcols}\relax% span all the cols +% advance column counter only if the IEEEeqnarray environment wants it +\if@advanceIEEEeqncolcnt\global\advance\@IEEEeqncolcnt by \@IEEEeqnnumcols\relax\fi% +\@ifnextchar[{\@IEEEeqnarrayseprowcut}{\@IEEEeqnarrayseprowcut[0.25\normalbaselineskip]}} +\def\@IEEEeqnarrayseprowcut[#1]{\relax\@ifnextchar[{\@@IEEEeqnarrayseprowcut[#1]}{\@@IEEEeqnarrayseprowcut[#1][\relax]}} +\def\@@IEEEeqnarrayseprowcut[#1][#2]{\def\@IEEEeqnarrayseprowARGONE{#1}% +\ifx\@IEEEeqnarrayseprowARGONE\@empty% +% get the skip value, based on the font commands +% use skip5 because \IEEEeqnarraystrutsize uses \skip0, \skip2, \skip3 +% assign within a bogus box to confine the font changes +{\setbox0=\hbox{#2\relax\global\skip5=0.25\normalbaselineskip}}% +\else% +{\setbox0=\hbox{#2\relax\global\skip5=#1}}% +\fi% +\IEEEeqnarraystrutsize{\skip5}{0pt}[\relax]\relax} + + + +% draws a single rule across all the columns optional +% argument determines the rule width, \arrayrulewidth is the default +% updates column counter as needed and turns off struts +% usage: \IEEEeqnarrayrulerow[rule line thickness] +\def\IEEEeqnarrayrulerow{\multispan{\@IEEEeqnnumcols}\relax% span all the cols +% advance column counter only if the IEEEeqnarray environment wants it +\if@advanceIEEEeqncolcnt\global\advance\@IEEEeqncolcnt by \@IEEEeqnnumcols\relax\fi% +\@ifnextchar[{\@IEEEeqnarrayrulerow}{\@IEEEeqnarrayrulerow[\arrayrulewidth]}} +\def\@IEEEeqnarrayrulerow[#1]{\leaders\hrule height#1\hfil\relax% put in our rule +% turn off any struts +\IEEEeqnarraystrutsize{0pt}{0pt}[\relax]\relax} + + +% draws a double rule by using a single rule row, a separator row, and then +% another single rule row +% first optional argument determines the rule thicknesses, \arrayrulewidth is the default +% second optional argument determines the rule spacing, \doublerulesep is the default +% usage: \IEEEeqnarraydblrulerow[rule line thickness][rule spacing] +\def\IEEEeqnarraydblrulerow{\multispan{\@IEEEeqnnumcols}\relax% span all the cols +% advance column counter only if the IEEEeqnarray environment wants it +\if@advanceIEEEeqncolcnt\global\advance\@IEEEeqncolcnt by \@IEEEeqnnumcols\relax\fi% +\@ifnextchar[{\@IEEEeqnarraydblrulerow}{\@IEEEeqnarraydblrulerow[\arrayrulewidth]}} +\def\@IEEEeqnarraydblrulerow[#1]{\relax\@ifnextchar[{\@@IEEEeqnarraydblrulerow[#1]}% +{\@@IEEEeqnarraydblrulerow[#1][\doublerulesep]}} +\def\@@IEEEeqnarraydblrulerow[#1][#2]{\def\@IEEEeqnarraydblrulerowARG{#1}% +% we allow the user to say \IEEEeqnarraydblrulerow[][] +\ifx\@IEEEeqnarraydblrulerowARG\@empty% +\@IEEEeqnarrayrulerow[\arrayrulewidth]% +\else% +\@IEEEeqnarrayrulerow[#1]\relax% +\fi% +\def\@IEEEeqnarraydblrulerowARG{#2}% +\ifx\@IEEEeqnarraydblrulerowARG\@empty% +\\\IEEEeqnarrayseprow[\doublerulesep][\relax]% +\else% +\\\IEEEeqnarrayseprow[#2][\relax]% +\fi% +\\\multispan{\@IEEEeqnnumcols}% +% advance column counter only if the IEEEeqnarray environment wants it +\if@advanceIEEEeqncolcnt\global\advance\@IEEEeqncolcnt by \@IEEEeqnnumcols\relax\fi% +\def\@IEEEeqnarraydblrulerowARG{#1}% +\ifx\@IEEEeqnarraydblrulerowARG\@empty% +\@IEEEeqnarrayrulerow[\arrayrulewidth]% +\else% +\@IEEEeqnarrayrulerow[#1]% +\fi% +} + +% draws a double rule by using a single rule row, a separator (cutting) row, and then +% another single rule row +% first optional argument determines the rule thicknesses, \arrayrulewidth is the default +% second optional argument determines the rule spacing, \doublerulesep is the default +% usage: \IEEEeqnarraydblrulerow[rule line thickness][rule spacing] +\def\IEEEeqnarraydblrulerowcut{\multispan{\@IEEEeqnnumcols}\relax% span all the cols +% advance column counter only if the IEEEeqnarray environment wants it +\if@advanceIEEEeqncolcnt\global\advance\@IEEEeqncolcnt by \@IEEEeqnnumcols\relax\fi% +\@ifnextchar[{\@IEEEeqnarraydblrulerowcut}{\@IEEEeqnarraydblrulerowcut[\arrayrulewidth]}} +\def\@IEEEeqnarraydblrulerowcut[#1]{\relax\@ifnextchar[{\@@IEEEeqnarraydblrulerowcut[#1]}% +{\@@IEEEeqnarraydblrulerowcut[#1][\doublerulesep]}} +\def\@@IEEEeqnarraydblrulerowcut[#1][#2]{\def\@IEEEeqnarraydblrulerowARG{#1}% +% we allow the user to say \IEEEeqnarraydblrulerow[][] +\ifx\@IEEEeqnarraydblrulerowARG\@empty% +\@IEEEeqnarrayrulerow[\arrayrulewidth]% +\else% +\@IEEEeqnarrayrulerow[#1]% +\fi% +\def\@IEEEeqnarraydblrulerowARG{#2}% +\ifx\@IEEEeqnarraydblrulerowARG\@empty% +\\\IEEEeqnarrayseprowcut[\doublerulesep][\relax]% +\else% +\\\IEEEeqnarrayseprowcut[#2][\relax]% +\fi% +\\\multispan{\@IEEEeqnnumcols}% +% advance column counter only if the IEEEeqnarray environment wants it +\if@advanceIEEEeqncolcnt\global\advance\@IEEEeqncolcnt by \@IEEEeqnnumcols\relax\fi% +\def\@IEEEeqnarraydblrulerowARG{#1}% +\ifx\@IEEEeqnarraydblrulerowARG\@empty% +\@IEEEeqnarrayrulerow[\arrayrulewidth]% +\else% +\@IEEEeqnarrayrulerow[#1]% +\fi% +} + + + +% inserts a full row's worth of &'s +% relies on \@IEEEeqnnumcols to provide the correct number of columns +% uses \@IEEEtrantmptoksA, \count0 as scratch registers +\def\@IEEEeqnarrayhoptolastcolumn{\@IEEEtrantmptoksA={}\count0=1\relax% +\loop% add cols if the user did not use them all +\ifnum\count0<\@IEEEeqnnumcols\relax% +\@IEEEappendtoksA{&}% +\advance\count0 by 1\relax% update the col count +\repeat% +\the\@IEEEtrantmptoksA%execute the &'s +} + + + +\newif\if@IEEEeqnarrayISinner % flag to indicate if we are within the lines +\@IEEEeqnarrayISinnerfalse % of an IEEEeqnarray - after the IEEEeqnarraydecl + +\edef\@IEEEeqnarrayTHEstrutheight{0pt} % height and depth of IEEEeqnarray struts +\edef\@IEEEeqnarrayTHEstrutdepth{0pt} + +\edef\@IEEEeqnarrayTHEmasterstrutheight{0pt} % default height and depth of +\edef\@IEEEeqnarrayTHEmasterstrutdepth{0pt} % struts within an IEEEeqnarray + +\edef\@IEEEeqnarrayTHEmasterstrutHSAVE{0pt} % saved master strut height +\edef\@IEEEeqnarrayTHEmasterstrutDSAVE{0pt} % and depth + +\newif\if@IEEEeqnarrayusemasterstrut % flag to indicate that the master strut value +\@IEEEeqnarrayusemasterstruttrue % is to be used + + + +% saves the strut height and depth of the master strut +\def\@IEEEeqnarraymasterstrutsave{\relax% +\expandafter\skip0=\@IEEEeqnarrayTHEmasterstrutheight\relax% +\expandafter\skip2=\@IEEEeqnarrayTHEmasterstrutdepth\relax% +% remove stretchability +\dimen0\skip0\relax% +\dimen2\skip2\relax% +% save values +\edef\@IEEEeqnarrayTHEmasterstrutHSAVE{\the\dimen0}% +\edef\@IEEEeqnarrayTHEmasterstrutDSAVE{\the\dimen2}} + +% restores the strut height and depth of the master strut +\def\@IEEEeqnarraymasterstrutrestore{\relax% +\expandafter\skip0=\@IEEEeqnarrayTHEmasterstrutHSAVE\relax% +\expandafter\skip2=\@IEEEeqnarrayTHEmasterstrutDSAVE\relax% +% remove stretchability +\dimen0\skip0\relax% +\dimen2\skip2\relax% +% restore values +\edef\@IEEEeqnarrayTHEmasterstrutheight{\the\dimen0}% +\edef\@IEEEeqnarrayTHEmasterstrutdepth{\the\dimen2}} + + +% globally restores the strut height and depth to the +% master values and sets the master strut flag to true +\def\@IEEEeqnarraystrutreset{\relax% +\expandafter\skip0=\@IEEEeqnarrayTHEmasterstrutheight\relax% +\expandafter\skip2=\@IEEEeqnarrayTHEmasterstrutdepth\relax% +% remove stretchability +\dimen0\skip0\relax% +\dimen2\skip2\relax% +% restore values +\xdef\@IEEEeqnarrayTHEstrutheight{\the\dimen0}% +\xdef\@IEEEeqnarrayTHEstrutdepth{\the\dimen2}% +\global\@IEEEeqnarrayusemasterstruttrue} + + +% if the master strut is not to be used, make the current +% values of \@IEEEeqnarrayTHEstrutheight, \@IEEEeqnarrayTHEstrutdepth +% and the use master strut flag, global +% this allows user strut commands issued in the last column to be carried +% into the isolation/strut column +\def\@IEEEeqnarrayglobalizestrutstatus{\relax% +\if@IEEEeqnarrayusemasterstrut\else% +\xdef\@IEEEeqnarrayTHEstrutheight{\@IEEEeqnarrayTHEstrutheight}% +\xdef\@IEEEeqnarrayTHEstrutdepth{\@IEEEeqnarrayTHEstrutdepth}% +\global\@IEEEeqnarrayusemasterstrutfalse% +\fi} + + + +% usage: \IEEEeqnarraystrutsize{height}{depth}[font size commands] +% If called outside the lines of an IEEEeqnarray, sets the height +% and depth of both the master and local struts. If called inside +% an IEEEeqnarray line, sets the height and depth of the local strut +% only and sets the flag to indicate the use of the local strut +% values. If the height or depth is left blank, 0.7\normalbaselineskip +% and 0.3\normalbaselineskip will be used, respectively. +% The optional argument can be used to evaluate the lengths under +% a different font size and styles. If none is specified, the current +% font is used. +% uses scratch registers \skip0, \skip2, \skip3, \dimen0, \dimen2 +\def\IEEEeqnarraystrutsize#1#2{\relax\@ifnextchar[{\@IEEEeqnarraystrutsize{#1}{#2}}{\@IEEEeqnarraystrutsize{#1}{#2}[\relax]}} +\def\@IEEEeqnarraystrutsize#1#2[#3]{\def\@IEEEeqnarraystrutsizeARG{#1}% +\ifx\@IEEEeqnarraystrutsizeARG\@empty% +{\setbox0=\hbox{#3\relax\global\skip3=0.7\normalbaselineskip}}% +\skip0=\skip3\relax% +\else% arg one present +{\setbox0=\hbox{#3\relax\global\skip3=#1\relax}}% +\skip0=\skip3\relax% +\fi% if null arg +\def\@IEEEeqnarraystrutsizeARG{#2}% +\ifx\@IEEEeqnarraystrutsizeARG\@empty% +{\setbox0=\hbox{#3\relax\global\skip3=0.3\normalbaselineskip}}% +\skip2=\skip3\relax% +\else% arg two present +{\setbox0=\hbox{#3\relax\global\skip3=#2\relax}}% +\skip2=\skip3\relax% +\fi% if null arg +% remove stretchability, just to be safe +\dimen0\skip0\relax% +\dimen2\skip2\relax% +% dimen0 = height, dimen2 = depth +\if@IEEEeqnarrayISinner% inner does not touch master strut size +\edef\@IEEEeqnarrayTHEstrutheight{\the\dimen0}% +\edef\@IEEEeqnarrayTHEstrutdepth{\the\dimen2}% +\@IEEEeqnarrayusemasterstrutfalse% do not use master +\else% outer, have to set master strut too +\edef\@IEEEeqnarrayTHEmasterstrutheight{\the\dimen0}% +\edef\@IEEEeqnarrayTHEmasterstrutdepth{\the\dimen2}% +\edef\@IEEEeqnarrayTHEstrutheight{\the\dimen0}% +\edef\@IEEEeqnarrayTHEstrutdepth{\the\dimen2}% +\@IEEEeqnarrayusemasterstruttrue% use master strut +\fi} + + +% usage: \IEEEeqnarraystrutsizeadd{added height}{added depth}[font size commands] +% If called outside the lines of an IEEEeqnarray, adds the given height +% and depth to both the master and local struts. +% If called inside an IEEEeqnarray line, adds the given height and depth +% to the local strut only and sets the flag to indicate the use +% of the local strut values. +% In both cases, if a height or depth is left blank, 0pt is used instead. +% The optional argument can be used to evaluate the lengths under +% a different font size and styles. If none is specified, the current +% font is used. +% uses scratch registers \skip0, \skip2, \skip3, \dimen0, \dimen2 +\def\IEEEeqnarraystrutsizeadd#1#2{\relax\@ifnextchar[{\@IEEEeqnarraystrutsizeadd{#1}{#2}}{\@IEEEeqnarraystrutsizeadd{#1}{#2}[\relax]}} +\def\@IEEEeqnarraystrutsizeadd#1#2[#3]{\def\@IEEEeqnarraystrutsizearg{#1}% +\ifx\@IEEEeqnarraystrutsizearg\@empty% +\skip0=0pt\relax% +\else% arg one present +{\setbox0=\hbox{#3\relax\global\skip3=#1}}% +\skip0=\skip3\relax% +\fi% if null arg +\def\@IEEEeqnarraystrutsizearg{#2}% +\ifx\@IEEEeqnarraystrutsizearg\@empty% +\skip2=0pt\relax% +\else% arg two present +{\setbox0=\hbox{#3\relax\global\skip3=#2}}% +\skip2=\skip3\relax% +\fi% if null arg +% remove stretchability, just to be safe +\dimen0\skip0\relax% +\dimen2\skip2\relax% +% dimen0 = height, dimen2 = depth +\if@IEEEeqnarrayISinner% inner does not touch master strut size +% get local strut size +\expandafter\skip0=\@IEEEeqnarrayTHEstrutheight\relax% +\expandafter\skip2=\@IEEEeqnarrayTHEstrutdepth\relax% +% add it to the user supplied values +\advance\dimen0 by \skip0\relax% +\advance\dimen2 by \skip2\relax% +% update the local strut size +\edef\@IEEEeqnarrayTHEstrutheight{\the\dimen0}% +\edef\@IEEEeqnarrayTHEstrutdepth{\the\dimen2}% +\@IEEEeqnarrayusemasterstrutfalse% do not use master +\else% outer, have to set master strut too +% get master strut size +\expandafter\skip0=\@IEEEeqnarrayTHEmasterstrutheight\relax% +\expandafter\skip2=\@IEEEeqnarrayTHEmasterstrutdepth\relax% +% add it to the user supplied values +\advance\dimen0 by \skip0\relax% +\advance\dimen2 by \skip2\relax% +% update the local and master strut sizes +\edef\@IEEEeqnarrayTHEmasterstrutheight{\the\dimen0}% +\edef\@IEEEeqnarrayTHEmasterstrutdepth{\the\dimen2}% +\edef\@IEEEeqnarrayTHEstrutheight{\the\dimen0}% +\edef\@IEEEeqnarrayTHEstrutdepth{\the\dimen2}% +\@IEEEeqnarrayusemasterstruttrue% use master strut +\fi} + + +% allow user a way to see the struts +\newif\ifIEEEvisiblestruts +\IEEEvisiblestrutsfalse + +% inserts an invisible strut using the master or local strut values +% uses scratch registers \skip0, \skip2, \dimen0, \dimen2 +\def\@IEEEeqnarrayinsertstrut{\relax% +\if@IEEEeqnarrayusemasterstrut +% get master strut size +\expandafter\skip0=\@IEEEeqnarrayTHEmasterstrutheight\relax% +\expandafter\skip2=\@IEEEeqnarrayTHEmasterstrutdepth\relax% +\else% +% get local strut size +\expandafter\skip0=\@IEEEeqnarrayTHEstrutheight\relax% +\expandafter\skip2=\@IEEEeqnarrayTHEstrutdepth\relax% +\fi% +% remove stretchability, probably not needed +\dimen0\skip0\relax% +\dimen2\skip2\relax% +% dimen0 = height, dimen2 = depth +% allow user to see struts if desired +\ifIEEEvisiblestruts% +\vrule width0.2pt height\dimen0 depth\dimen2\relax% +\else% +\vrule width0pt height\dimen0 depth\dimen2\relax\fi} + + +% creates an invisible strut, useable even outside \IEEEeqnarray +% if \IEEEvisiblestrutstrue, the strut will be visible and 0.2pt wide. +% usage: \IEEEstrut[height][depth][font size commands] +% default is \IEEEstrut[0.7\normalbaselineskip][0.3\normalbaselineskip][\relax] +% blank arguments inherit the default values +% uses \dimen0, \dimen2, \skip0, \skip2 +\def\IEEEstrut{\relax\@ifnextchar[{\@IEEEstrut}{\@IEEEstrut[0.7\normalbaselineskip]}} +\def\@IEEEstrut[#1]{\relax\@ifnextchar[{\@@IEEEstrut[#1]}{\@@IEEEstrut[#1][0.3\normalbaselineskip]}} +\def\@@IEEEstrut[#1][#2]{\relax\@ifnextchar[{\@@@IEEEstrut[#1][#2]}{\@@@IEEEstrut[#1][#2][\relax]}} +\def\@@@IEEEstrut[#1][#2][#3]{\mbox{#3\relax% +\def\@IEEEstrutARG{#1}% +\ifx\@IEEEstrutARG\@empty% +\skip0=0.7\normalbaselineskip\relax% +\else% +\skip0=#1\relax% +\fi% +\def\@IEEEstrutARG{#2}% +\ifx\@IEEEstrutARG\@empty% +\skip2=0.3\normalbaselineskip\relax% +\else% +\skip2=#2\relax% +\fi% +% remove stretchability, probably not needed +\dimen0\skip0\relax% +\dimen2\skip2\relax% +\ifIEEEvisiblestruts% +\vrule width0.2pt height\dimen0 depth\dimen2\relax% +\else% +\vrule width0.0pt height\dimen0 depth\dimen2\relax\fi}} + + +% enables strut mode by setting a default strut size and then zeroing the +% \baselineskip, \lineskip, \lineskiplimit and \jot +\def\IEEEeqnarraystrutmode{\IEEEeqnarraystrutsize{0.7\normalbaselineskip}{0.3\normalbaselineskip}[\relax]% +\baselineskip=0pt\lineskip=0pt\lineskiplimit=0pt\jot=0pt} + + + +\def\IEEEeqnarray{\@IEEEeqnarraystarformfalse\@IEEEeqnarray} +\def\endIEEEeqnarray{\end@IEEEeqnarray} + +\@namedef{IEEEeqnarray*}{\@IEEEeqnarraystarformtrue\@IEEEeqnarray} +\@namedef{endIEEEeqnarray*}{\end@IEEEeqnarray} + + +% \IEEEeqnarray is an enhanced \eqnarray. +% The star form defaults to not putting equation numbers at the end of each row. +% usage: \IEEEeqnarray[decl]{cols} +\def\@IEEEeqnarray{\relax\@ifnextchar[{\@@IEEEeqnarray}{\@@IEEEeqnarray[\relax]}} +\def\@@IEEEeqnarray[#1]#2{% + % default to showing the equation number or not based on whether or not + % the star form was involked + \if@IEEEeqnarraystarform\global\@eqnswfalse + \else% not the star form + \global\@eqnswtrue + \fi% if star form + \@IEEEissubequationfalse% default to no subequations + \@IEEElastlinewassubequationfalse% assume last line is not a sub equation + \@IEEEeqnarrayISinnerfalse% not yet within the lines of the halign + \@IEEEeqnarraystrutsize{0pt}{0pt}[\relax]% turn off struts by default + \@IEEEeqnarrayusemasterstruttrue% use master strut till user asks otherwise + \IEEEvisiblestrutsfalse% diagnostic mode defaults to off + % no extra space unless the user specifically requests it + \lineskip=0pt\relax + \lineskiplimit=0pt\relax + \baselineskip=\normalbaselineskip\relax% + \jot=\IEEEnormaljot\relax% + \mathsurround\z@\relax% no extra spacing around math + \@advanceIEEEeqncolcnttrue% advance the col counter for each col the user uses, + % used in \IEEEeqnarraymulticol and in the preamble build + \stepcounter{equation}% advance equation counter before first line + \setcounter{IEEEsubequation}{0}% no subequation yet + \def\@currentlabel{\p@equation\theequation}% redefine the ref label + \IEEEeqnarraydecl\relax% allow a way for the user to make global overrides + #1\relax% allow user to override defaults + \let\\\@IEEEeqnarraycr% replace newline with one that can put in eqn. numbers + \global\@IEEEeqncolcnt\z@% col. count = 0 for first line + \@IEEEbuildpreamble #2\end\relax% build the preamble and put it into \@IEEEtrantmptoksA + % put in the column for the equation number + \ifnum\@IEEEeqnnumcols>0\relax\@IEEEappendtoksA{&}\fi% col separator for those after the first + \toks0={##}% + % advance the \@IEEEeqncolcnt for the isolation col, this helps with error checking + \@IEEEappendtoksA{\global\advance\@IEEEeqncolcnt by 1\relax}% + % add the isolation column + \@IEEEappendtoksA{\tabskip\z@skip\bgroup\the\toks0\egroup}% + % advance the \@IEEEeqncolcnt for the equation number col, this helps with error checking + \@IEEEappendtoksA{&\global\advance\@IEEEeqncolcnt by 1\relax}% + % add the equation number col to the preamble + \@IEEEappendtoksA{\tabskip\z@skip\hb@xt@\z@\bgroup\hss\the\toks0\egroup}% + % note \@IEEEeqnnumcols does not count the equation col or isolation col + % set the starting tabskip glue as determined by the preamble build + \tabskip=\@IEEEBPstartglue\relax + % begin the display alignment + \@IEEEeqnarrayISinnertrue% commands are now within the lines + $$\everycr{}\halign to\displaywidth\bgroup + % "exspand" the preamble + \span\the\@IEEEtrantmptoksA\cr} + +% enter isolation/strut column (or the next column if the user did not use +% every column), record the strut status, complete the columns, do the strut if needed, +% restore counters to correct values and exit +\def\end@IEEEeqnarray{\@IEEEeqnarrayglobalizestrutstatus&\@@IEEEeqnarraycr\egroup% +\if@IEEElastlinewassubequation\global\advance\c@IEEEsubequation\m@ne\fi% +\global\advance\c@equation\m@ne% +$$\@ignoretrue} + +% need a way to remember if last line is a subequation +\newif\if@IEEElastlinewassubequation% +\@IEEElastlinewassubequationfalse + +% IEEEeqnarray uses a modifed \\ instead of the plain \cr to +% end rows. This allows for things like \\*[vskip amount] +% This "cr" macros are modified versions those for LaTeX2e's eqnarray +% the {\ifnum0=`} braces must be kept away from the last column to avoid +% altering spacing of its math, so we use & to advance to the next column +% as there is an isolation/strut column after the user's columns +\def\@IEEEeqnarraycr{\@IEEEeqnarrayglobalizestrutstatus&% save strut status and advance to next column + {\ifnum0=`}\fi + \@ifstar{% + \global\@eqpen\@M\@IEEEeqnarrayYCR + }{% + \global\@eqpen\interdisplaylinepenalty \@IEEEeqnarrayYCR + }% +} + +\def\@IEEEeqnarrayYCR{\@testopt\@IEEEeqnarrayXCR\z@skip} + +\def\@IEEEeqnarrayXCR[#1]{% + \ifnum0=`{\fi}% + \@@IEEEeqnarraycr + \noalign{\penalty\@eqpen\vskip\jot\vskip #1\relax}}% + +\def\@@IEEEeqnarraycr{\@IEEEtrantmptoksA={}% clear token register + \advance\@IEEEeqncolcnt by -1\relax% adjust col count because of the isolation column + \ifnum\@IEEEeqncolcnt>\@IEEEeqnnumcols\relax + \@IEEEclspkgerror{Too many columns within the IEEEeqnarray\MessageBreak + environment}% + {Use fewer \string &'s or put more columns in the IEEEeqnarry column\MessageBreak + specifications.}\relax% + \else + \loop% add cols if the user did not use them all + \ifnum\@IEEEeqncolcnt<\@IEEEeqnnumcols\relax + \@IEEEappendtoksA{&}% + \advance\@IEEEeqncolcnt by 1\relax% update the col count + \repeat + % this number of &'s will take us the the isolation column + \fi + % execute the &'s + \the\@IEEEtrantmptoksA% + % handle the strut/isolation column + \@IEEEeqnarrayinsertstrut% do the strut if needed + \@IEEEeqnarraystrutreset% reset the strut system for next line or IEEEeqnarray + &% and enter the equation number column + % is this line needs an equation number, display it and advance the + % (sub)equation counters, record what type this line was + \if@eqnsw% + \if@IEEEissubequation\theIEEEsubequationdis\addtocounter{equation}{1}\stepcounter{IEEEsubequation}% + \global\@IEEElastlinewassubequationtrue% + \else% display a standard equation number, initialize the IEEEsubequation counter + \theequationdis\stepcounter{equation}\setcounter{IEEEsubequation}{0}% + \global\@IEEElastlinewassubequationfalse\fi% + \fi% + % reset the eqnsw flag to indicate default preference of the display of equation numbers + \if@IEEEeqnarraystarform\global\@eqnswfalse\else\global\@eqnswtrue\fi + \global\@IEEEissubequationfalse% reset the subequation flag + % reset the number of columns the user actually used + \global\@IEEEeqncolcnt\z@\relax + % the real end of the line + \cr} + + + + + +% \IEEEeqnarraybox is like \IEEEeqnarray except the box form puts everything +% inside a vtop, vbox, or vcenter box depending on the letter in the second +% optional argument (t,b,c). Vbox is the default. Unlike \IEEEeqnarray, +% equation numbers are not displayed and \IEEEeqnarraybox can be nested. +% \IEEEeqnarrayboxm is for math mode (like \array) and does not put the vbox +% within an hbox. +% \IEEEeqnarrayboxt is for text mode (like \tabular) and puts the vbox within +% a \hbox{$ $} construct. +% \IEEEeqnarraybox will auto detect whether to use \IEEEeqnarrayboxm or +% \IEEEeqnarrayboxt depending on the math mode. +% The third optional argument specifies the width this box is to be set to - +% natural width is the default. +% The * forms do not add \jot line spacing +% usage: \IEEEeqnarraybox[decl][pos][width]{cols} +\def\IEEEeqnarrayboxm{\@IEEEeqnarraystarformfalse\@IEEEeqnarrayboxHBOXSWfalse\@IEEEeqnarraybox} +\def\endIEEEeqnarrayboxm{\end@IEEEeqnarraybox} +\@namedef{IEEEeqnarrayboxm*}{\@IEEEeqnarraystarformtrue\@IEEEeqnarrayboxHBOXSWfalse\@IEEEeqnarraybox} +\@namedef{endIEEEeqnarrayboxm*}{\end@IEEEeqnarraybox} + +\def\IEEEeqnarrayboxt{\@IEEEeqnarraystarformfalse\@IEEEeqnarrayboxHBOXSWtrue\@IEEEeqnarraybox} +\def\endIEEEeqnarrayboxt{\end@IEEEeqnarraybox} +\@namedef{IEEEeqnarrayboxt*}{\@IEEEeqnarraystarformtrue\@IEEEeqnarrayboxHBOXSWtrue\@IEEEeqnarraybox} +\@namedef{endIEEEeqnarrayboxt*}{\end@IEEEeqnarraybox} + +\def\IEEEeqnarraybox{\@IEEEeqnarraystarformfalse\ifmmode\@IEEEeqnarrayboxHBOXSWfalse\else\@IEEEeqnarrayboxHBOXSWtrue\fi% +\@IEEEeqnarraybox} +\def\endIEEEeqnarraybox{\end@IEEEeqnarraybox} + +\@namedef{IEEEeqnarraybox*}{\@IEEEeqnarraystarformtrue\ifmmode\@IEEEeqnarrayboxHBOXSWfalse\else\@IEEEeqnarrayboxHBOXSWtrue\fi% +\@IEEEeqnarraybox} +\@namedef{endIEEEeqnarraybox*}{\end@IEEEeqnarraybox} + +% flag to indicate if the \IEEEeqnarraybox needs to put things into an hbox{$ $} +% for \vcenter in non-math mode +\newif\if@IEEEeqnarrayboxHBOXSW% +\@IEEEeqnarrayboxHBOXSWfalse + +\def\@IEEEeqnarraybox{\relax\@ifnextchar[{\@@IEEEeqnarraybox}{\@@IEEEeqnarraybox[\relax]}} +\def\@@IEEEeqnarraybox[#1]{\relax\@ifnextchar[{\@@@IEEEeqnarraybox[#1]}{\@@@IEEEeqnarraybox[#1][b]}} +\def\@@@IEEEeqnarraybox[#1][#2]{\relax\@ifnextchar[{\@@@@IEEEeqnarraybox[#1][#2]}{\@@@@IEEEeqnarraybox[#1][#2][\relax]}} + +% #1 = decl; #2 = t,b,c; #3 = width, #4 = col specs +\def\@@@@IEEEeqnarraybox[#1][#2][#3]#4{\@IEEEeqnarrayISinnerfalse % not yet within the lines of the halign + \@IEEEeqnarraymasterstrutsave% save current master strut values + \@IEEEeqnarraystrutsize{0pt}{0pt}[\relax]% turn off struts by default + \@IEEEeqnarrayusemasterstruttrue% use master strut till user asks otherwise + \IEEEvisiblestrutsfalse% diagnostic mode defaults to off + % no extra space unless the user specifically requests it + \lineskip=0pt\relax% + \lineskiplimit=0pt\relax% + \baselineskip=\normalbaselineskip\relax% + \jot=\IEEEnormaljot\relax% + \mathsurround\z@\relax% no extra spacing around math + % the default end glues are zero for an \IEEEeqnarraybox + \edef\@IEEEeqnarraycolSEPdefaultstart{\@IEEEeqnarraycolSEPzero}% default start glue + \edef\@IEEEeqnarraycolSEPdefaultend{\@IEEEeqnarraycolSEPzero}% default end glue + \edef\@IEEEeqnarraycolSEPdefaultmid{\@IEEEeqnarraycolSEPzero}% default inter-column glue + \@advanceIEEEeqncolcntfalse% do not advance the col counter for each col the user uses, + % used in \IEEEeqnarraymulticol and in the preamble build + \IEEEeqnarrayboxdecl\relax% allow a way for the user to make global overrides + #1\relax% allow user to override defaults + \let\\\@IEEEeqnarrayboxcr% replace newline with one that allows optional spacing + \@IEEEbuildpreamble #4\end\relax% build the preamble and put it into \@IEEEtrantmptoksA + % add an isolation column to the preamble to stop \\'s {} from getting into the last col + \ifnum\@IEEEeqnnumcols>0\relax\@IEEEappendtoksA{&}\fi% col separator for those after the first + \toks0={##}% + % add the isolation column to the preamble + \@IEEEappendtoksA{\tabskip\z@skip\bgroup\the\toks0\egroup}% + % set the starting tabskip glue as determined by the preamble build + \tabskip=\@IEEEBPstartglue\relax + % begin the alignment + \everycr{}% + % use only the very first token to determine the positioning + % this stops some problems when the user uses more than one letter, + % but is probably not worth the effort + % \noindent is used as a delimiter + \def\@IEEEgrabfirstoken##1##2\noindent{\let\@IEEEgrabbedfirstoken=##1}% + \@IEEEgrabfirstoken#2\relax\relax\noindent + % \@IEEEgrabbedfirstoken has the first token, the rest are discarded + % if we need to put things into and hbox and go into math mode, do so now + \if@IEEEeqnarrayboxHBOXSW \leavevmode \hbox \bgroup $\fi% + % use the appropriate vbox type + \if\@IEEEgrabbedfirstoken t\relax\vtop\else\if\@IEEEgrabbedfirstoken c\relax% + \vcenter\else\vbox\fi\fi\bgroup% + \@IEEEeqnarrayISinnertrue% commands are now within the lines + \ifx#3\relax\halign\else\halign to #3\relax\fi% + \bgroup + % "exspand" the preamble + \span\the\@IEEEtrantmptoksA\cr} + +% carry strut status and enter the isolation/strut column, +% exit from math mode if needed, and exit +\def\end@IEEEeqnarraybox{\@IEEEeqnarrayglobalizestrutstatus% carry strut status +&% enter isolation/strut column +\@IEEEeqnarrayinsertstrut% do strut if needed +\@IEEEeqnarraymasterstrutrestore% restore the previous master strut values +% reset the strut system for next IEEEeqnarray +% (sets local strut values back to previous master strut values) +\@IEEEeqnarraystrutreset% +% ensure last line, exit from halign, close vbox +\crcr\egroup\egroup% +% exit from math mode and close hbox if needed +\if@IEEEeqnarrayboxHBOXSW $\egroup\fi} + + + +% IEEEeqnarraybox uses a modifed \\ instead of the plain \cr to +% end rows. This allows for things like \\[vskip amount] +% This "cr" macros are modified versions those for LaTeX2e's eqnarray +% For IEEEeqnarraybox, \\* is the same as \\ +% the {\ifnum0=`} braces must be kept away from the last column to avoid +% altering spacing of its math, so we use & to advance to the isolation/strut column +% carry strut status into isolation/strut column +\def\@IEEEeqnarrayboxcr{\@IEEEeqnarrayglobalizestrutstatus% carry strut status +&% enter isolation/strut column +\@IEEEeqnarrayinsertstrut% do strut if needed +% reset the strut system for next line or IEEEeqnarray +\@IEEEeqnarraystrutreset% +{\ifnum0=`}\fi% +\@ifstar{\@IEEEeqnarrayboxYCR}{\@IEEEeqnarrayboxYCR}} + +% test and setup the optional argument to \\[] +\def\@IEEEeqnarrayboxYCR{\@testopt\@IEEEeqnarrayboxXCR\z@skip} + +% IEEEeqnarraybox does not automatically increase line spacing by \jot +\def\@IEEEeqnarrayboxXCR[#1]{\ifnum0=`{\fi}% +\cr\noalign{\if@IEEEeqnarraystarform\else\vskip\jot\fi\vskip#1\relax}} + + + +% starts the halign preamble build +\def\@IEEEbuildpreamble{\@IEEEtrantmptoksA={}% clear token register +\let\@IEEEBPcurtype=u%current column type is not yet known +\let\@IEEEBPprevtype=s%the previous column type was the start +\let\@IEEEBPnexttype=u%next column type is not yet known +% ensure these are valid +\def\@IEEEBPcurglue={0pt plus 0pt minus 0pt}% +\def\@IEEEBPcurcolname{@IEEEdefault}% name of current column definition +% currently acquired numerically referenced glue +% use a name that is easier to remember +\let\@IEEEBPcurnum=\@IEEEtrantmpcountA% +\@IEEEBPcurnum=0% +% tracks number of columns in the preamble +\@IEEEeqnnumcols=0% +% record the default end glues +\edef\@IEEEBPstartglue{\@IEEEeqnarraycolSEPdefaultstart}% +\edef\@IEEEBPendglue{\@IEEEeqnarraycolSEPdefaultend}% +% now parse the user's column specifications +\@@IEEEbuildpreamble} + + +% parses and builds the halign preamble +\def\@@IEEEbuildpreamble#1#2{\let\@@nextIEEEbuildpreamble=\@@IEEEbuildpreamble% +% use only the very first token to check the end +% \noindent is used as a delimiter as \end can be present here +\def\@IEEEgrabfirstoken##1##2\noindent{\let\@IEEEgrabbedfirstoken=##1}% +\@IEEEgrabfirstoken#1\relax\relax\noindent +\ifx\@IEEEgrabbedfirstoken\end\let\@@nextIEEEbuildpreamble=\@@IEEEfinishpreamble\else% +% identify current and next token type +\@IEEEgetcoltype{#1}{\@IEEEBPcurtype}{1}% current, error on invalid +\@IEEEgetcoltype{#2}{\@IEEEBPnexttype}{0}% next, no error on invalid next +% if curtype is a glue, get the glue def +\if\@IEEEBPcurtype g\@IEEEgetcurglue{#1}{\@IEEEBPcurglue}\fi% +% if curtype is a column, get the column def and set the current column name +\if\@IEEEBPcurtype c\@IEEEgetcurcol{#1}\fi% +% if curtype is a numeral, acquire the user defined glue +\if\@IEEEBPcurtype n\@IEEEprocessNcol{#1}\fi% +% process the acquired glue +\if\@IEEEBPcurtype g\@IEEEprocessGcol\fi% +% process the acquired col +\if\@IEEEBPcurtype c\@IEEEprocessCcol\fi% +% ready prevtype for next col spec. +\let\@IEEEBPprevtype=\@IEEEBPcurtype% +% be sure and put back the future token(s) as a group +\fi\@@nextIEEEbuildpreamble{#2}} + + +% executed just after preamble build is completed +% warn about zero cols, and if prevtype type = u, put in end tabskip glue +\def\@@IEEEfinishpreamble#1{\ifnum\@IEEEeqnnumcols<1\relax +\@IEEEclspkgerror{No column specifiers declared for IEEEeqnarray}% +{At least one column type must be declared for each IEEEeqnarray.}% +\fi%num cols less than 1 +%if last type undefined, set default end tabskip glue +\if\@IEEEBPprevtype u\@IEEEappendtoksA{\tabskip=\@IEEEBPendglue}\fi} + + +% Identify and return the column specifier's type code +\def\@IEEEgetcoltype#1#2#3{% +% use only the very first token to determine the type +% \noindent is used as a delimiter as \end can be present here +\def\@IEEEgrabfirstoken##1##2\noindent{\let\@IEEEgrabbedfirstoken=##1}% +\@IEEEgrabfirstoken#1\relax\relax\noindent +% \@IEEEgrabfirstoken has the first token, the rest are discarded +% n = number +% g = glue (any other char in catagory 12) +% c = letter +% e = \end +% u = undefined +% third argument: 0 = no error message, 1 = error on invalid char +\let#2=u\relax% assume invalid until know otherwise +\ifx\@IEEEgrabbedfirstoken\end\let#2=e\else +\ifcat\@IEEEgrabbedfirstoken\relax\else% screen out control sequences +\if0\@IEEEgrabbedfirstoken\let#2=n\else +\if1\@IEEEgrabbedfirstoken\let#2=n\else +\if2\@IEEEgrabbedfirstoken\let#2=n\else +\if3\@IEEEgrabbedfirstoken\let#2=n\else +\if4\@IEEEgrabbedfirstoken\let#2=n\else +\if5\@IEEEgrabbedfirstoken\let#2=n\else +\if6\@IEEEgrabbedfirstoken\let#2=n\else +\if7\@IEEEgrabbedfirstoken\let#2=n\else +\if8\@IEEEgrabbedfirstoken\let#2=n\else +\if9\@IEEEgrabbedfirstoken\let#2=n\else +\ifcat,\@IEEEgrabbedfirstoken\let#2=g\relax +\else\ifcat a\@IEEEgrabbedfirstoken\let#2=c\relax\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi +\if#2u\relax +\if0\noexpand#3\relax\else\@IEEEclspkgerror{Invalid character in column specifications}% +{Only letters, numerals and certain other symbols are allowed \MessageBreak +as IEEEeqnarray column specifiers.}\fi\fi} + + +% identify the current letter referenced column +% if invalid, use a default column +\def\@IEEEgetcurcol#1{\expandafter\ifx\csname @IEEEeqnarraycolDEF#1\endcsname\@IEEEeqnarraycolisdefined% +\def\@IEEEBPcurcolname{#1}\else% invalid column name +\@IEEEclspkgerror{Invalid column type "#1" in column specifications.\MessageBreak +Using a default centering column instead}% +{You must define IEEEeqnarray column types before use.}% +\def\@IEEEBPcurcolname{@IEEEdefault}\fi} + + +% identify and return the predefined (punctuation) glue value +\def\@IEEEgetcurglue#1#2{% +% ! = \! (neg small) -0.16667em (-3/18 em) +% , = \, (small) 0.16667em ( 3/18 em) +% : = \: (med) 0.22222em ( 4/18 em) +% ; = \; (large) 0.27778em ( 5/18 em) +% ' = \quad 1em +% " = \qquad 2em +% . = 0.5\arraycolsep +% / = \arraycolsep +% ? = 2\arraycolsep +% * = 1fil +% + = \@IEEEeqnarraycolSEPcenter +% - = \@IEEEeqnarraycolSEPzero +% Note that all em values are referenced to the math font (textfont2) fontdimen6 +% value for 1em. +% +% use only the very first token to determine the type +% this prevents errant tokens from getting in the main text +% \noindent is used as a delimiter here +\def\@IEEEgrabfirstoken##1##2\noindent{\let\@IEEEgrabbedfirstoken=##1}% +\@IEEEgrabfirstoken#1\relax\relax\noindent +% get the math font 1em value +% LaTeX2e's NFSS2 does not preload the fonts, but \IEEEeqnarray needs +% to gain access to the math (\textfont2) font's spacing parameters. +% So we create a bogus box here that uses the math font to ensure +% that \textfont2 is loaded and ready. If this is not done, +% the \textfont2 stuff here may not work. +% Thanks to Bernd Raichle for his 1997 post on this topic. +{\setbox0=\hbox{$\displaystyle\relax$}}% +% fontdimen6 has the width of 1em (a quad). +\@IEEEtrantmpdimenA=\fontdimen6\textfont2\relax% +% identify the glue value based on the first token +% we discard anything after the first +\if!\@IEEEgrabbedfirstoken\@IEEEtrantmpdimenA=-0.16667\@IEEEtrantmpdimenA\edef#2{\the\@IEEEtrantmpdimenA}\else +\if,\@IEEEgrabbedfirstoken\@IEEEtrantmpdimenA=0.16667\@IEEEtrantmpdimenA\edef#2{\the\@IEEEtrantmpdimenA}\else +\if:\@IEEEgrabbedfirstoken\@IEEEtrantmpdimenA=0.22222\@IEEEtrantmpdimenA\edef#2{\the\@IEEEtrantmpdimenA}\else +\if;\@IEEEgrabbedfirstoken\@IEEEtrantmpdimenA=0.27778\@IEEEtrantmpdimenA\edef#2{\the\@IEEEtrantmpdimenA}\else +\if'\@IEEEgrabbedfirstoken\@IEEEtrantmpdimenA=1\@IEEEtrantmpdimenA\edef#2{\the\@IEEEtrantmpdimenA}\else +\if"\@IEEEgrabbedfirstoken\@IEEEtrantmpdimenA=2\@IEEEtrantmpdimenA\edef#2{\the\@IEEEtrantmpdimenA}\else +\if.\@IEEEgrabbedfirstoken\@IEEEtrantmpdimenA=0.5\arraycolsep\edef#2{\the\@IEEEtrantmpdimenA}\else +\if/\@IEEEgrabbedfirstoken\edef#2{\the\arraycolsep}\else +\if?\@IEEEgrabbedfirstoken\@IEEEtrantmpdimenA=2\arraycolsep\edef#2{\the\@IEEEtrantmpdimenA}\else +\if *\@IEEEgrabbedfirstoken\edef#2{0pt plus 1fil minus 0pt}\else +\if+\@IEEEgrabbedfirstoken\edef#2{\@IEEEeqnarraycolSEPcenter}\else +\if-\@IEEEgrabbedfirstoken\edef#2{\@IEEEeqnarraycolSEPzero}\else +\edef#2{\@IEEEeqnarraycolSEPzero}% +\@IEEEclspkgerror{Invalid predefined inter-column glue type "#1" in\MessageBreak +column specifications. Using a default value of\MessageBreak +0pt instead}% +{Only !,:;'"./?*+ and - are valid predefined glue types in the\MessageBreak +IEEEeqnarray column specifications.}\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi\fi} + + + +% process a numerical digit from the column specification +% and look up the corresponding user defined glue value +% can transform current type from n to g or a as the user defined glue is acquired +\def\@IEEEprocessNcol#1{\if\@IEEEBPprevtype g% +\@IEEEclspkgerror{Back-to-back inter-column glue specifiers in column\MessageBreak +specifications. Ignoring consecutive glue specifiers\MessageBreak +after the first}% +{You cannot have two or more glue types next to each other\MessageBreak +in the IEEEeqnarray column specifications.}% +\let\@IEEEBPcurtype=a% abort this glue, future digits will be discarded +\@IEEEBPcurnum=0\relax% +\else% if we previously aborted a glue +\if\@IEEEBPprevtype a\@IEEEBPcurnum=0\let\@IEEEBPcurtype=a%maintain digit abortion +\else%acquire this number +% save the previous type before the numerical digits started +\if\@IEEEBPprevtype n\else\let\@IEEEBPprevsavedtype=\@IEEEBPprevtype\fi% +\multiply\@IEEEBPcurnum by 10\relax% +\advance\@IEEEBPcurnum by #1\relax% add in number, \relax is needed to stop TeX's number scan +\if\@IEEEBPnexttype n\else%close acquisition +\expandafter\ifx\csname @IEEEeqnarraycolSEPDEF\expandafter\romannumeral\number\@IEEEBPcurnum\endcsname\@IEEEeqnarraycolisdefined% +\edef\@IEEEBPcurglue{\csname @IEEEeqnarraycolSEP\expandafter\romannumeral\number\@IEEEBPcurnum\endcsname}% +\else%user glue not defined +\@IEEEclspkgerror{Invalid user defined inter-column glue type "\number\@IEEEBPcurnum" in\MessageBreak +column specifications. Using a default value of\MessageBreak +0pt instead}% +{You must define all IEEEeqnarray numerical inter-column glue types via\MessageBreak +\string\IEEEeqnarraydefcolsep \space before they are used in column specifications.}% +\edef\@IEEEBPcurglue{\@IEEEeqnarraycolSEPzero}% +\fi% glue defined or not +\let\@IEEEBPcurtype=g% change the type to reflect the acquired glue +\let\@IEEEBPprevtype=\@IEEEBPprevsavedtype% restore the prev type before this number glue +\@IEEEBPcurnum=0\relax%ready for next acquisition +\fi%close acquisition, get glue +\fi%discard or acquire number +\fi%prevtype glue or not +} + + +% process an acquired glue +% add any acquired column/glue pair to the preamble +\def\@IEEEprocessGcol{\if\@IEEEBPprevtype a\let\@IEEEBPcurtype=a%maintain previous glue abortions +\else +% if this is the start glue, save it, but do nothing else +% as this is not used in the preamble, but before +\if\@IEEEBPprevtype s\edef\@IEEEBPstartglue{\@IEEEBPcurglue}% +\else%not the start glue +\if\@IEEEBPprevtype g%ignore if back to back glues +\@IEEEclspkgerror{Back-to-back inter-column glue specifiers in column\MessageBreak +specifications. Ignoring consecutive glue specifiers\MessageBreak +after the first}% +{You cannot have two or more glue types next to each other\MessageBreak +in the IEEEeqnarray column specifications.}% +\let\@IEEEBPcurtype=a% abort this glue +\else% not a back to back glue +\if\@IEEEBPprevtype c\relax% if the previoustype was a col, add column/glue pair to preamble +\ifnum\@IEEEeqnnumcols>0\relax\@IEEEappendtoksA{&}\fi +\toks0={##}% +% make preamble advance col counter if this environment needs this +\if@advanceIEEEeqncolcnt\@IEEEappendtoksA{\global\advance\@IEEEeqncolcnt by 1\relax}\fi +% insert the column defintion into the preamble, being careful not to expand +% the column definition +\@IEEEappendtoksA{\tabskip=\@IEEEBPcurglue}% +\@IEEEappendNOEXPANDtoksA{\begingroup\csname @IEEEeqnarraycolPRE}% +\@IEEEappendtoksA{\@IEEEBPcurcolname}% +\@IEEEappendNOEXPANDtoksA{\endcsname}% +\@IEEEappendtoksA{\the\toks0}% +\@IEEEappendNOEXPANDtoksA{\relax\relax\relax\relax\relax% +\relax\relax\relax\relax\relax\csname @IEEEeqnarraycolPOST}% +\@IEEEappendtoksA{\@IEEEBPcurcolname}% +\@IEEEappendNOEXPANDtoksA{\endcsname\relax\relax\relax\relax\relax% +\relax\relax\relax\relax\relax\endgroup}% +\advance\@IEEEeqnnumcols by 1\relax%one more column in the preamble +\else% error: non-start glue with no pending column +\@IEEEclspkgerror{Inter-column glue specifier without a prior column\MessageBreak +type in the column specifications. Ignoring this glue\MessageBreak +specifier}% +{Except for the first and last positions, glue can be placed only\MessageBreak +between column types.}% +\let\@IEEEBPcurtype=a% abort this glue +\fi% previous was a column +\fi% back-to-back glues +\fi% is start column glue +\fi% prev type not a +} + + +% process an acquired letter referenced column and, if necessary, add it to the preamble +\def\@IEEEprocessCcol{\if\@IEEEBPnexttype g\else +\if\@IEEEBPnexttype n\else +% we have a column followed by something other than a glue (or numeral glue) +% so we must add this column to the preamble now +\ifnum\@IEEEeqnnumcols>0\relax\@IEEEappendtoksA{&}\fi%col separator for those after the first +\if\@IEEEBPnexttype e\@IEEEappendtoksA{\tabskip=\@IEEEBPendglue\relax}\else%put in end glue +\@IEEEappendtoksA{\tabskip=\@IEEEeqnarraycolSEPdefaultmid\relax}\fi% or default mid glue +\toks0={##}% +% make preamble advance col counter if this environment needs this +\if@advanceIEEEeqncolcnt\@IEEEappendtoksA{\global\advance\@IEEEeqncolcnt by 1\relax}\fi +% insert the column definition into the preamble, being careful not to expand +% the column definition +\@IEEEappendNOEXPANDtoksA{\begingroup\csname @IEEEeqnarraycolPRE}% +\@IEEEappendtoksA{\@IEEEBPcurcolname}% +\@IEEEappendNOEXPANDtoksA{\endcsname}% +\@IEEEappendtoksA{\the\toks0}% +\@IEEEappendNOEXPANDtoksA{\relax\relax\relax\relax\relax% +\relax\relax\relax\relax\relax\csname @IEEEeqnarraycolPOST}% +\@IEEEappendtoksA{\@IEEEBPcurcolname}% +\@IEEEappendNOEXPANDtoksA{\endcsname\relax\relax\relax\relax\relax% +\relax\relax\relax\relax\relax\endgroup}% +\advance\@IEEEeqnnumcols by 1\relax%one more column in the preamble +\fi%next type not numeral +\fi%next type not glue +} + + +%% +%% END OF IEEEeqnarry DEFINITIONS +%% + + + + +% set up the running headings, this complex because of all the different +% modes IEEEtran supports +\if@twoside + \ifCLASSOPTIONtechnote + \def\ps@headings{% + \def\@oddhead{\hbox{}\scriptsize\leftmark \hfil \thepage} + \def\@evenhead{\scriptsize\thepage \hfil \leftmark\hbox{}} + \ifCLASSOPTIONdraftcls + \ifCLASSOPTIONdraftclsnofoot + \def\@oddfoot{}\def\@evenfoot{}% + \else + \def\@oddfoot{\scriptsize\@date\hfil DRAFT} + \def\@evenfoot{\scriptsize DRAFT\hfil\@date} + \fi + \else + \def\@oddfoot{}\def\@evenfoot{} + \fi} + \else % not a technote + \def\ps@headings{% + \ifCLASSOPTIONconference + \def\@oddhead{} + \def\@evenhead{} + \else + \def\@oddhead{\hbox{}\scriptsize\rightmark \hfil \thepage} + \def\@evenhead{\scriptsize\thepage \hfil \leftmark\hbox{}} + \fi + \ifCLASSOPTIONdraftcls + \def\@oddhead{\hbox{}\scriptsize\rightmark \hfil \thepage} + \def\@evenhead{\scriptsize\thepage \hfil \leftmark\hbox{}} + \ifCLASSOPTIONdraftclsnofoot + \def\@oddfoot{}\def\@evenfoot{}% + \else + \def\@oddfoot{\scriptsize\@date\hfil DRAFT} + \def\@evenfoot{\scriptsize DRAFT\hfil\@date} + \fi + \else + \def\@oddfoot{}\def\@evenfoot{}% + \fi} + \fi +\else % single side +\def\ps@headings{% + \ifCLASSOPTIONconference + \def\@oddhead{} + \def\@evenhead{} + \else + \def\@oddhead{\hbox{}\scriptsize\leftmark \hfil \thepage} + \def\@evenhead{} + \fi + \ifCLASSOPTIONdraftcls + \def\@oddhead{\hbox{}\scriptsize\leftmark \hfil \thepage} + \def\@evenhead{} + \ifCLASSOPTIONdraftclsnofoot + \def\@oddfoot{} + \else + \def\@oddfoot{\scriptsize \@date \hfil DRAFT} + \fi + \else + \def\@oddfoot{} + \fi + \def\@evenfoot{}} +\fi + + +% title page style +\def\ps@IEEEtitlepagestyle{\def\@oddfoot{}\def\@evenfoot{}% +\ifCLASSOPTIONconference + \def\@oddhead{}% + \def\@evenhead{}% +\else + \def\@oddhead{\hbox{}\scriptsize\leftmark \hfil \thepage}% + \def\@evenhead{\scriptsize\thepage \hfil \leftmark\hbox{}}% +\fi +\ifCLASSOPTIONdraftcls + \def\@oddhead{\hbox{}\scriptsize\leftmark \hfil \thepage}% + \def\@evenhead{\scriptsize\thepage \hfil \leftmark\hbox{}}% + \ifCLASSOPTIONdraftclsnofoot\else + \def\@oddfoot{\scriptsize \@date\hfil DRAFT}% + \def\@evenfoot{\scriptsize DRAFT\hfil \@date}% + \fi +\else + % all non-draft mode footers + \if@IEEEusingpubid + % for title pages that are using a pubid + % do not repeat pubid if using peer review option + \ifCLASSOPTIONpeerreview + \else + \footskip 0pt% + \ifCLASSOPTIONcompsoc + \def\@oddfoot{\hss\normalfont\scriptsize\raisebox{-1.5\@IEEEnormalsizeunitybaselineskip}[0ex][0ex]{\@IEEEpubid}\hss}% + \def\@evenfoot{\hss\normalfont\scriptsize\raisebox{-1.5\@IEEEnormalsizeunitybaselineskip}[0ex][0ex]{\@IEEEpubid}\hss}% + \else + \def\@oddfoot{\hss\normalfont\footnotesize\raisebox{1.5ex}[1.5ex]{\@IEEEpubid}\hss}% + \def\@evenfoot{\hss\normalfont\footnotesize\raisebox{1.5ex}[1.5ex]{\@IEEEpubid}\hss}% + \fi + \fi + \fi +\fi} + + +% peer review cover page style +\def\ps@IEEEpeerreviewcoverpagestyle{% +\def\@oddhead{}\def\@evenhead{}% +\def\@oddfoot{}\def\@evenfoot{}% +\ifCLASSOPTIONdraftcls + \ifCLASSOPTIONdraftclsnofoot\else + \def\@oddfoot{\scriptsize \@date\hfil DRAFT}% + \def\@evenfoot{\scriptsize DRAFT\hfil \@date}% + \fi +\else + % non-draft mode footers + \if@IEEEusingpubid + \footskip 0pt% + \ifCLASSOPTIONcompsoc + \def\@oddfoot{\hss\normalfont\scriptsize\raisebox{-1.5\@IEEEnormalsizeunitybaselineskip}[0ex][0ex]{\@IEEEpubid}\hss}% + \def\@evenfoot{\hss\normalfont\scriptsize\raisebox{-1.5\@IEEEnormalsizeunitybaselineskip}[0ex][0ex]{\@IEEEpubid}\hss}% + \else + \def\@oddfoot{\hss\normalfont\footnotesize\raisebox{1.5ex}[1.5ex]{\@IEEEpubid}\hss}% + \def\@evenfoot{\hss\normalfont\footnotesize\raisebox{1.5ex}[1.5ex]{\@IEEEpubid}\hss}% + \fi + \fi +\fi} + + +% start with empty headings +\def\rightmark{}\def\leftmark{} + + +%% Defines the command for putting the header. \footernote{TEXT} is the same +%% as \markboth{TEXT}{TEXT}. +%% Note that all the text is forced into uppercase, if you have some text +%% that needs to be in lower case, for instance et. al., then either manually +%% set \leftmark and \rightmark or use \MakeLowercase{et. al.} within the +%% arguments to \markboth. +\def\markboth#1#2{\def\leftmark{\@IEEEcompsoconly{\sffamily}\MakeUppercase{#1}}% +\def\rightmark{\@IEEEcompsoconly{\sffamily}\MakeUppercase{#2}}} +\def\footernote#1{\markboth{#1}{#1}} + +\def\today{\ifcase\month\or + January\or February\or March\or April\or May\or June\or + July\or August\or September\or October\or November\or December\fi + \space\number\day, \number\year} + + + + +%% CITATION AND BIBLIOGRAPHY COMMANDS +%% +%% V1.6 no longer supports the older, nonstandard \shortcite and \citename setup stuff +% +% +% Modify Latex2e \@citex to separate citations with "], [" +\def\@citex[#1]#2{% + \let\@citea\@empty + \@cite{\@for\@citeb:=#2\do + {\@citea\def\@citea{], [}% + \edef\@citeb{\expandafter\@firstofone\@citeb\@empty}% + \if@filesw\immediate\write\@auxout{\string\citation{\@citeb}}\fi + \@ifundefined{b@\@citeb}{\mbox{\reset@font\bfseries ?}% + \G@refundefinedtrue + \@latex@warning + {Citation `\@citeb' on page \thepage \space undefined}}% + {\hbox{\csname b@\@citeb\endcsname}}}}{#1}} + +% V1.6 we create hooks for the optional use of Donald Arseneau's +% cite.sty package. cite.sty is "smart" and will notice that the +% following format controls are already defined and will not +% redefine them. The result will be the proper sorting of the +% citation numbers and auto detection of 3 or more entry "ranges" - +% all in IEEE style: [1], [2], [5]--[7], [12] +% This also allows for an optional note, i.e., \cite[mynote]{..}. +% If the \cite with note has more than one reference, the note will +% be applied to the last of the listed references. It is generally +% desired that if a note is given, only one reference is listed in +% that \cite. +% Thanks to Mr. Arseneau for providing the required format arguments +% to produce the IEEE style. +\def\citepunct{], [} +\def\citedash{]--[} + +% V1.7 default to using same font for urls made by url.sty +\AtBeginDocument{\csname url@samestyle\endcsname} + +% V1.6 class files should always provide these +\def\newblock{\hskip .11em\@plus.33em\@minus.07em} +\let\@openbib@code\@empty + + +% Provide support for the control entries of IEEEtran.bst V1.00 and later. +% V1.7 optional argument allows for a different aux file to be specified in +% order to handle multiple bibliographies. For example, with multibib.sty: +% \newcites{sec}{Secondary Literature} +% \bstctlcite[@auxoutsec]{BSTcontrolhak} +\def\bstctlcite{\@ifnextchar[{\@bstctlcite}{\@bstctlcite[@auxout]}} +\def\@bstctlcite[#1]#2{\@bsphack + \@for\@citeb:=#2\do{% + \edef\@citeb{\expandafter\@firstofone\@citeb}% + \if@filesw\immediate\write\csname #1\endcsname{\string\citation{\@citeb}}\fi}% + \@esphack} + +% V1.6 provide a way for a user to execute a command just before +% a given reference number - used to insert a \newpage to balance +% the columns on the last page +\edef\@IEEEtriggerrefnum{0} % the default of zero means that + % the command is not executed +\def\@IEEEtriggercmd{\newpage} + +% allow the user to alter the triggered command +\long\def\IEEEtriggercmd#1{\long\def\@IEEEtriggercmd{#1}} + +% allow user a way to specify the reference number just before the +% command is executed +\def\IEEEtriggeratref#1{\@IEEEtrantmpcountA=#1% +\edef\@IEEEtriggerrefnum{\the\@IEEEtrantmpcountA}}% + +% trigger command at the given reference +\def\@IEEEbibitemprefix{\@IEEEtrantmpcountA=\@IEEEtriggerrefnum\relax% +\advance\@IEEEtrantmpcountA by -1\relax% +\ifnum\c@enumiv=\@IEEEtrantmpcountA\relax\@IEEEtriggercmd\relax\fi} + + +\def\@biblabel#1{[#1]} + +% compsoc journals left align the reference numbers +\@IEEEcompsocnotconfonly{\def\@biblabel#1{[#1]\hfill}} + +% controls bib item spacing +\def\IEEEbibitemsep{2.5pt plus .5pt} + +\@IEEEcompsocconfonly{\def\IEEEbibitemsep{1\baselineskip plus 0.25\baselineskip minus 0.25\baselineskip}} + + +\def\thebibliography#1{\section*{\refname}% + \addcontentsline{toc}{section}{\refname}% + % V1.6 add some rubber space here and provide a command trigger + \footnotesize\@IEEEcompsocconfonly{\small}\vskip 0.3\baselineskip plus 0.1\baselineskip minus 0.1\baselineskip% + \list{\@biblabel{\@arabic\c@enumiv}}% + {\settowidth\labelwidth{\@biblabel{#1}}% + \leftmargin\labelwidth + \labelsep 1em + \advance\leftmargin\labelsep\relax + \itemsep \IEEEbibitemsep\relax + \usecounter{enumiv}% + \let\p@enumiv\@empty + \renewcommand\theenumiv{\@arabic\c@enumiv}}% + \let\@IEEElatexbibitem\bibitem% + \def\bibitem{\@IEEEbibitemprefix\@IEEElatexbibitem}% +\def\newblock{\hskip .11em plus .33em minus .07em}% +% originally: +% \sloppy\clubpenalty4000\widowpenalty4000% +% by adding the \interlinepenalty here, we make it more +% difficult, but not impossible, for LaTeX to break within a reference. +% IEEE almost never breaks a reference (but they do it more often with +% technotes). You may get an underfull vbox warning around the bibliography, +% but the final result will be much more like what IEEE will publish. +% MDS 11/2000 +\ifCLASSOPTIONtechnote\sloppy\clubpenalty4000\widowpenalty4000\interlinepenalty100% +\else\sloppy\clubpenalty4000\widowpenalty4000\interlinepenalty500\fi% + \sfcode`\.=1000\relax} +\let\endthebibliography=\endlist + + + + +% TITLE PAGE COMMANDS +% +% +% \IEEEmembership is used to produce the sublargesize italic font used to indicate author +% IEEE membership. compsoc uses a large size sans slant font +\def\IEEEmembership#1{{\@IEEEnotcompsoconly{\sublargesize}\normalfont\@IEEEcompsoconly{\sffamily}\textit{#1}}} + + +% \IEEEauthorrefmark{} produces a footnote type symbol to indicate author affiliation. +% When given an argument of 1 to 9, \IEEEauthorrefmark{} follows the standard LaTeX footnote +% symbol sequence convention. However, for arguments 10 and above, \IEEEauthorrefmark{} +% reverts to using lower case roman numerals, so it cannot overflow. Do note that you +% cannot use \footnotemark[] in place of \IEEEauthorrefmark{} within \author as the footnote +% symbols will have been turned off to prevent \thanks from creating footnote marks. +% \IEEEauthorrefmark{} produces a symbol that appears to LaTeX as having zero vertical +% height - this allows for a more compact line packing, but the user must ensure that +% the interline spacing is large enough to prevent \IEEEauthorrefmark{} from colliding +% with the text above. +% V1.7 make this a robust command +\DeclareRobustCommand*{\IEEEauthorrefmark}[1]{\raisebox{0pt}[0pt][0pt]{\textsuperscript{\footnotesize\ensuremath{\ifcase#1\or *\or \dagger\or \ddagger\or% + \mathsection\or \mathparagraph\or \|\or **\or \dagger\dagger% + \or \ddagger\ddagger \else\textsuperscript{\expandafter\romannumeral#1}\fi}}}} + + +% FONT CONTROLS AND SPACINGS FOR CONFERENCE MODE AUTHOR NAME AND AFFILIATION BLOCKS +% +% The default font styles for the author name and affiliation blocks (confmode) +\def\@IEEEauthorblockNstyle{\normalfont\@IEEEcompsocnotconfonly{\sffamily}\sublargesize\@IEEEcompsocconfonly{\large}} +\def\@IEEEauthorblockAstyle{\normalfont\@IEEEcompsocnotconfonly{\sffamily}\@IEEEcompsocconfonly{\itshape}\normalsize\@IEEEcompsocconfonly{\large}} +% The default if the user does not use an author block +\def\@IEEEauthordefaulttextstyle{\normalfont\@IEEEcompsocnotconfonly{\sffamily}\sublargesize} + +% spacing from title (or special paper notice) to author name blocks (confmode) +% can be negative +\def\@IEEEauthorblockconfadjspace{-0.25em} +% compsoc conferences need more space here +\@IEEEcompsocconfonly{\def\@IEEEauthorblockconfadjspace{0.75\@IEEEnormalsizeunitybaselineskip}} +\ifCLASSOPTIONconference\def\@IEEEauthorblockconfadjspace{20pt}\fi + +% spacing between name and affiliation blocks (confmode) +% This can be negative. +% IEEE doesn't want any added spacing here, but I will leave these +% controls in place in case they ever change their mind. +% Personally, I like 0.75ex. +%\def\@IEEEauthorblockNtopspace{0.75ex} +%\def\@IEEEauthorblockAtopspace{0.75ex} +\def\@IEEEauthorblockNtopspace{0.0ex} +\def\@IEEEauthorblockAtopspace{0.0ex} +% baseline spacing within name and affiliation blocks (confmode) +% must be positive, spacings below certain values will make +% the position of line of text sensitive to the contents of the +% line above it i.e., whether or not the prior line has descenders, +% subscripts, etc. For this reason it is a good idea to keep +% these above 2.6ex +\def\@IEEEauthorblockNinterlinespace{2.6ex} +\def\@IEEEauthorblockAinterlinespace{2.75ex} + +% This tracks the required strut size. +% See the \@IEEEauthorhalign command for the actual default value used. +\def\@IEEEauthorblockXinterlinespace{2.7ex} + +% variables to retain font size and style across groups +% values given here have no effect as they will be overwritten later +\gdef\@IEEESAVESTATEfontsize{10} +\gdef\@IEEESAVESTATEfontbaselineskip{12} +\gdef\@IEEESAVESTATEfontencoding{OT1} +\gdef\@IEEESAVESTATEfontfamily{ptm} +\gdef\@IEEESAVESTATEfontseries{m} +\gdef\@IEEESAVESTATEfontshape{n} + +% saves the current font attributes +\def\@IEEEcurfontSAVE{\global\let\@IEEESAVESTATEfontsize\f@size% +\global\let\@IEEESAVESTATEfontbaselineskip\f@baselineskip% +\global\let\@IEEESAVESTATEfontencoding\f@encoding% +\global\let\@IEEESAVESTATEfontfamily\f@family% +\global\let\@IEEESAVESTATEfontseries\f@series% +\global\let\@IEEESAVESTATEfontshape\f@shape} + +% restores the saved font attributes +\def\@IEEEcurfontRESTORE{\fontsize{\@IEEESAVESTATEfontsize}{\@IEEESAVESTATEfontbaselineskip}% +\fontencoding{\@IEEESAVESTATEfontencoding}% +\fontfamily{\@IEEESAVESTATEfontfamily}% +\fontseries{\@IEEESAVESTATEfontseries}% +\fontshape{\@IEEESAVESTATEfontshape}% +\selectfont} + + +% variable to indicate if the current block is the first block in the column +\newif\if@IEEEprevauthorblockincol \@IEEEprevauthorblockincolfalse + + +% the command places a strut with height and depth = \@IEEEauthorblockXinterlinespace +% we use this technique to have complete manual control over the spacing of the lines +% within the halign environment. +% We set the below baseline portion at 30%, the above +% baseline portion at 70% of the total length. +% Responds to changes in the document's \baselinestretch +\def\@IEEEauthorstrutrule{\@IEEEtrantmpdimenA\@IEEEauthorblockXinterlinespace% +\@IEEEtrantmpdimenA=\baselinestretch\@IEEEtrantmpdimenA% +\rule[-0.3\@IEEEtrantmpdimenA]{0pt}{\@IEEEtrantmpdimenA}} + + +% blocks to hold the authors' names and affilations. +% Makes formatting easy for conferences +% +% use real definitions in conference mode +% name block +\def\IEEEauthorblockN#1{\relax\@IEEEauthorblockNstyle% set the default text style +\gdef\@IEEEauthorblockXinterlinespace{0pt}% disable strut for spacer row +% the \expandafter hides the \cr in conditional tex, see the array.sty docs +% for details, probably not needed here as the \cr is in a macro +% do a spacer row if needed +\if@IEEEprevauthorblockincol\expandafter\@IEEEauthorblockNtopspaceline\fi +\global\@IEEEprevauthorblockincoltrue% we now have a block in this column +%restore the correct strut value +\gdef\@IEEEauthorblockXinterlinespace{\@IEEEauthorblockNinterlinespace}% +% input the author names +#1% +% end the row if the user did not already +\crcr} +% spacer row for names +\def\@IEEEauthorblockNtopspaceline{\cr\noalign{\vskip\@IEEEauthorblockNtopspace}} +% +% affiliation block +\def\IEEEauthorblockA#1{\relax\@IEEEauthorblockAstyle% set the default text style +\gdef\@IEEEauthorblockXinterlinespace{0pt}%disable strut for spacer row +% the \expandafter hides the \cr in conditional tex, see the array.sty docs +% for details, probably not needed here as the \cr is in a macro +% do a spacer row if needed +\if@IEEEprevauthorblockincol\expandafter\@IEEEauthorblockAtopspaceline\fi +\global\@IEEEprevauthorblockincoltrue% we now have a block in this column +%restore the correct strut value +\gdef\@IEEEauthorblockXinterlinespace{\@IEEEauthorblockAinterlinespace}% +% input the author affiliations +#1% +% end the row if the user did not already +\crcr} +% spacer row for affiliations +\def\@IEEEauthorblockAtopspaceline{\cr\noalign{\vskip\@IEEEauthorblockAtopspace}} + + +% allow papers to compile even if author blocks are used in modes other +% than conference or peerreviewca. For such cases, we provide dummy blocks. +\ifCLASSOPTIONconference +\else + \ifCLASSOPTIONpeerreviewca\else + % not conference or peerreviewca mode + \def\IEEEauthorblockN#1{#1}% + \def\IEEEauthorblockA#1{#1}% + \fi +\fi + + + +% we provide our own halign so as not to have to depend on tabular +\def\@IEEEauthorhalign{\@IEEEauthordefaulttextstyle% default text style + \lineskip=0pt\relax% disable line spacing + \lineskiplimit=0pt\relax% + \baselineskip=0pt\relax% + \@IEEEcurfontSAVE% save the current font + \mathsurround\z@\relax% no extra spacing around math + \let\\\@IEEEauthorhaligncr% replace newline with halign friendly one + \tabskip=0pt\relax% no column spacing + \everycr{}% ensure no problems here + \@IEEEprevauthorblockincolfalse% no author blocks yet + \def\@IEEEauthorblockXinterlinespace{2.7ex}% default interline space + \vtop\bgroup%vtop box + \halign\bgroup&\relax\hfil\@IEEEcurfontRESTORE\relax ##\relax + \hfil\@IEEEcurfontSAVE\@IEEEauthorstrutrule\cr} + +% ensure last line, exit from halign, close vbox +\def\end@IEEEauthorhalign{\crcr\egroup\egroup} + +% handle bogus star form +\def\@IEEEauthorhaligncr{{\ifnum0=`}\fi\@ifstar{\@@IEEEauthorhaligncr}{\@@IEEEauthorhaligncr}} + +% test and setup the optional argument to \\[] +\def\@@IEEEauthorhaligncr{\@testopt\@@@IEEEauthorhaligncr\z@skip} + +% end the line and do the optional spacer +\def\@@@IEEEauthorhaligncr[#1]{\ifnum0=`{\fi}\cr\noalign{\vskip#1\relax}} + + + +% flag to prevent multiple \and warning messages +\newif\if@IEEEWARNand +\@IEEEWARNandtrue + +% if in conference or peerreviewca modes, we support the use of \and as \author is a +% tabular environment, otherwise we warn the user that \and is invalid +% outside of conference or peerreviewca modes. +\def\and{\relax} % provide a bogus \and that we will then override + +\renewcommand{\and}[1][\relax]{\if@IEEEWARNand\typeout{** WARNING: \noexpand\and is valid only + when in conference or peerreviewca}\typeout{modes (line \the\inputlineno).}\fi\global\@IEEEWARNandfalse} + +\ifCLASSOPTIONconference% +\renewcommand{\and}[1][\hfill]{\end{@IEEEauthorhalign}#1\begin{@IEEEauthorhalign}}% +\fi +\ifCLASSOPTIONpeerreviewca +\renewcommand{\and}[1][\hfill]{\end{@IEEEauthorhalign}#1\begin{@IEEEauthorhalign}}% +\fi + + +% page clearing command +% based on LaTeX2e's \cleardoublepage, but allows different page styles +% for the inserted blank pages +\def\@IEEEcleardoublepage#1{\clearpage\if@twoside\ifodd\c@page\else +\hbox{}\thispagestyle{#1}\newpage\if@twocolumn\hbox{}\thispagestyle{#1}\newpage\fi\fi\fi} + + +% user command to invoke the title page +\def\maketitle{\par% + \begingroup% + \normalfont% + \def\thefootnote{}% the \thanks{} mark type is empty + \def\footnotemark{}% and kill space from \thanks within author + \let\@makefnmark\relax% V1.7, must *really* kill footnotemark to remove all \textsuperscript spacing as well. + \footnotesize% equal spacing between thanks lines + \footnotesep 0.7\baselineskip%see global setting of \footnotesep for more info + % V1.7 disable \thanks note indention for compsoc + \@IEEEcompsoconly{\long\def\@makefntext##1{\parindent 1em\noindent\hbox{\@makefnmark}##1}}% + \normalsize% + \ifCLASSOPTIONpeerreview + \newpage\global\@topnum\z@ \@maketitle\@IEEEstatictitlevskip\@IEEEaftertitletext% + \thispagestyle{IEEEpeerreviewcoverpagestyle}\@thanks% + \else + \if@twocolumn% + \ifCLASSOPTIONtechnote% + \newpage\global\@topnum\z@ \@maketitle\@IEEEstatictitlevskip\@IEEEaftertitletext% + \else + \twocolumn[\@maketitle\@IEEEstatictitlevskip\@IEEEaftertitletext]% + \fi + \else + \newpage\global\@topnum\z@ \@maketitle\@IEEEstatictitlevskip\@IEEEaftertitletext% + \fi + \thispagestyle{IEEEtitlepagestyle}\@thanks% + \fi + % pullup page for pubid if used. + \if@IEEEusingpubid + \enlargethispage{-\@IEEEpubidpullup}% + \fi + \endgroup + \setcounter{footnote}{0}\let\maketitle\relax\let\@maketitle\relax + \gdef\@thanks{}% + % v1.6b do not clear these as we will need the title again for peer review papers + % \gdef\@author{}\gdef\@title{}% + \let\thanks\relax} + + + +% V1.7 parbox to format \@IEEEcompsoctitleabstractindextext +\long\def\@IEEEcompsoctitleabstractindextextbox#1{\parbox{0.915\textwidth}{#1}} + +% formats the Title, authors names, affiliations and special paper notice +% THIS IS A CONTROLLED SPACING COMMAND! Do not allow blank lines or unintentional +% spaces to enter the definition - use % at the end of each line +\def\@maketitle{\newpage +\begingroup\centering +\ifCLASSOPTIONtechnote% technotes + {\bfseries\large\@IEEEcompsoconly{\sffamily}\@title\par}\vskip 1.3em{\lineskip .5em\@IEEEcompsoconly{\sffamily}\@author + \@IEEEspecialpapernotice\par{\@IEEEcompsoconly{\vskip 1.5em\relax + \@IEEEcompsoctitleabstractindextextbox{\@IEEEcompsoctitleabstractindextext}\par + \hfill\@IEEEcompsocdiamondline\hfill\hbox{}\par}}}\relax +\else% not a technote + \vskip0.2em{\Huge\@IEEEcompsoconly{\sffamily}\@IEEEcompsocconfonly{\normalfont\normalsize\vskip 2\@IEEEnormalsizeunitybaselineskip + \bfseries\Large}\@title\par}\vskip1.0em\par% + % V1.6 handle \author differently if in conference mode + \ifCLASSOPTIONconference% + {\@IEEEspecialpapernotice\mbox{}\vskip\@IEEEauthorblockconfadjspace% + \mbox{}\hfill\begin{@IEEEauthorhalign}\@author\end{@IEEEauthorhalign}\hfill\mbox{}\par}\relax + \else% peerreviewca, peerreview or journal + \ifCLASSOPTIONpeerreviewca + % peerreviewca handles author names just like conference mode + {\@IEEEcompsoconly{\sffamily}\@IEEEspecialpapernotice\mbox{}\vskip\@IEEEauthorblockconfadjspace% + \mbox{}\hfill\begin{@IEEEauthorhalign}\@author\end{@IEEEauthorhalign}\hfill\mbox{}\par + {\@IEEEcompsoconly{\vskip 1.5em\relax + \@IEEEcompsoctitleabstractindextextbox{\@IEEEcompsoctitleabstractindextext}\par\hfill + \@IEEEcompsocdiamondline\hfill\hbox{}\par}}}\relax + \else% journal or peerreview + {\lineskip.5em\@IEEEcompsoconly{\sffamily}\sublargesize\@author\@IEEEspecialpapernotice\par + {\@IEEEcompsoconly{\vskip 1.5em\relax + \@IEEEcompsoctitleabstractindextextbox{\@IEEEcompsoctitleabstractindextext}\par\hfill + \@IEEEcompsocdiamondline\hfill\hbox{}\par}}}\relax + \fi + \fi +\fi\par\endgroup} + + + +% V1.7 Computer Society "diamond line" which follows index terms for nonconference papers +\def\@IEEEcompsocdiamondline{\vrule depth 0pt height 0.5pt width 4cm\hspace{7.5pt}% +\raisebox{-3.5pt}{\fontfamily{pzd}\fontencoding{U}\fontseries{m}\fontshape{n}\fontsize{11}{12}\selectfont\char70}% +\hspace{7.5pt}\vrule depth 0pt height 0.5pt width 4cm\relax} + +% V1.7 standard LateX2e \thanks, but with \itshape under compsoc. Also make it a \long\def +% We also need to trigger the one-shot footnote rule +\def\@IEEEtriggeroneshotfootnoterule{\global\@IEEEenableoneshotfootnoteruletrue} + + +\long\def\thanks#1{\footnotemark + \protected@xdef\@thanks{\@thanks + \protect\footnotetext[\the\c@footnote]{\@IEEEcompsoconly{\itshape + \protect\@IEEEtriggeroneshotfootnoterule\relax}\ignorespaces#1}}} +\let\@thanks\@empty + +% V1.7 allow \author to contain \par's. This is needed to allow \thanks to contain \par. +\long\def\author#1{\gdef\@author{#1}} + + +% in addition to setting up IEEEitemize, we need to remove a baselineskip space above and +% below it because \list's \pars introduce blank lines because of the footnote struts. +\def\@IEEEsetupcompsocitemizelist{\def\labelitemi{$\bullet$}% +\setlength{\IEEElabelindent}{0pt}\setlength{\parskip}{0pt}% +\setlength{\partopsep}{0pt}\setlength{\topsep}{0.5\baselineskip}\vspace{-1\baselineskip}\relax} + + +% flag for fake non-compsoc \IEEEcompsocthanksitem - prevents line break on very first item +\newif\if@IEEEbreakcompsocthanksitem \@IEEEbreakcompsocthanksitemfalse + +\ifCLASSOPTIONcompsoc +% V1.7 compsoc bullet item \thanks +% also, we need to redefine this to destroy the argument in \@IEEEdynamictitlevspace +\long\def\IEEEcompsocitemizethanks#1{\relax\@IEEEbreakcompsocthanksitemfalse\footnotemark + \protected@xdef\@thanks{\@thanks + \protect\footnotetext[\the\c@footnote]{\itshape\protect\@IEEEtriggeroneshotfootnoterule + {\let\IEEEiedlistdecl\relax\protect\begin{IEEEitemize}[\protect\@IEEEsetupcompsocitemizelist]\ignorespaces#1\relax + \protect\end{IEEEitemize}}\protect\vspace{-1\baselineskip}}}} +\DeclareRobustCommand*{\IEEEcompsocthanksitem}{\item} +\else +% non-compsoc, allow for dual compilation via rerouting to normal \thanks +\long\def\IEEEcompsocitemizethanks#1{\thanks{#1}} +% redirect to "pseudo-par" \hfil\break\indent after swallowing [] from \IEEEcompsocthanksitem[] +\DeclareRobustCommand{\IEEEcompsocthanksitem}{\@ifnextchar [{\@IEEEthanksswallowoptionalarg}% +{\@IEEEthanksswallowoptionalarg[\relax]}} +% be sure and break only after first item, be sure and ignore spaces after optional argument +\def\@IEEEthanksswallowoptionalarg[#1]{\relax\if@IEEEbreakcompsocthanksitem\hfil\break +\indent\fi\@IEEEbreakcompsocthanksitemtrue\ignorespaces} +\fi + + +% V1.6b define the \IEEEpeerreviewmaketitle as needed +\ifCLASSOPTIONpeerreview +\def\IEEEpeerreviewmaketitle{\@IEEEcleardoublepage{empty}% +\ifCLASSOPTIONtwocolumn +\twocolumn[\@IEEEpeerreviewmaketitle\@IEEEdynamictitlevspace] +\else +\newpage\@IEEEpeerreviewmaketitle\@IEEEstatictitlevskip +\fi +\thispagestyle{IEEEtitlepagestyle}} +\else +% \IEEEpeerreviewmaketitle does nothing if peer review option has not been selected +\def\IEEEpeerreviewmaketitle{\relax} +\fi + +% peerreview formats the repeated title like the title in journal papers. +\def\@IEEEpeerreviewmaketitle{\begin{center}\@IEEEcompsoconly{\sffamily}% +\normalfont\normalsize\vskip0.2em{\Huge\@title\par}\vskip1.0em\par +\end{center}} + + + +% V1.6 +% this is a static rubber spacer between the title/authors and the main text +% used for single column text, or when the title appears in the first column +% of two column text (technotes). +\def\@IEEEstatictitlevskip{{\normalfont\normalsize +% adjust spacing to next text +% v1.6b handle peer review papers +\ifCLASSOPTIONpeerreview +% for peer review papers, the same value is used for both title pages +% regardless of the other paper modes + \vskip 1\baselineskip plus 0.375\baselineskip minus 0.1875\baselineskip +\else + \ifCLASSOPTIONconference% conference + \vskip 0.6\baselineskip + \else% + \ifCLASSOPTIONtechnote% technote + \vskip 1\baselineskip plus 0.375\baselineskip minus 0.1875\baselineskip% + \else% journal uses more space + \vskip 2.5\baselineskip plus 0.75\baselineskip minus 0.375\baselineskip% + \fi + \fi +\fi}} + + +% V1.6 +% This is a dynamically determined rigid spacer between the title/authors +% and the main text. This is used only for single column titles over two +% column text (most common) +% This is bit tricky because we have to ensure that the textheight of the +% main text is an integer multiple of \baselineskip +% otherwise underfull vbox problems may develop in the second column of the +% text on the titlepage +% The possible use of \IEEEpubid must also be taken into account. +\def\@IEEEdynamictitlevspace{{% + % we run within a group so that all the macros can be forgotten when we are done + \long\def\thanks##1{\relax}%don't allow \thanks to run when we evaluate the vbox height + \long\def\IEEEcompsocitemizethanks##1{\relax}%don't allow \IEEEcompsocitemizethanks to run when we evaluate the vbox height + \normalfont\normalsize% we declare more descriptive variable names + \let\@IEEEmaintextheight=\@IEEEtrantmpdimenA%height of the main text columns + \let\@IEEEINTmaintextheight=\@IEEEtrantmpdimenB%height of the main text columns with integer # lines + % set the nominal and minimum values for the title spacer + % the dynamic algorithm will not allow the spacer size to + % become less than \@IEEEMINtitlevspace - instead it will be + % lengthened + % default to journal values + \def\@IEEENORMtitlevspace{2.5\baselineskip}% + \def\@IEEEMINtitlevspace{2\baselineskip}% + % conferences and technotes need tighter spacing + \ifCLASSOPTIONconference%conference + \def\@IEEENORMtitlevspace{1\baselineskip}% + \def\@IEEEMINtitlevspace{0.75\baselineskip}% + \fi + \ifCLASSOPTIONtechnote%technote + \def\@IEEENORMtitlevspace{1\baselineskip}% + \def\@IEEEMINtitlevspace{0.75\baselineskip}% + \fi% + % get the height that the title will take up + \ifCLASSOPTIONpeerreview + \settoheight{\@IEEEmaintextheight}{\vbox{\hsize\textwidth \@IEEEpeerreviewmaketitle}}% + \else + \settoheight{\@IEEEmaintextheight}{\vbox{\hsize\textwidth \@maketitle}}% + \fi + \@IEEEmaintextheight=-\@IEEEmaintextheight% title takes away from maintext, so reverse sign + % add the height of the page textheight + \advance\@IEEEmaintextheight by \textheight% + % correct for title pages using pubid + \ifCLASSOPTIONpeerreview\else + % peerreview papers use the pubid on the cover page only. + % And the cover page uses a static spacer. + \if@IEEEusingpubid\advance\@IEEEmaintextheight by -\@IEEEpubidpullup\fi + \fi% + % subtract off the nominal value of the title bottom spacer + \advance\@IEEEmaintextheight by -\@IEEENORMtitlevspace% + % \topskip takes away some too + \advance\@IEEEmaintextheight by -\topskip% + % calculate the column height of the main text for lines + % now we calculate the main text height as if holding + % an integer number of \normalsize lines after the first + % and discard any excess fractional remainder + % we subtracted the first line, because the first line + % is placed \topskip into the maintext, not \baselineskip like the + % rest of the lines. + \@IEEEINTmaintextheight=\@IEEEmaintextheight% + \divide\@IEEEINTmaintextheight by \baselineskip% + \multiply\@IEEEINTmaintextheight by \baselineskip% + % now we calculate how much the title spacer height will + % have to be reduced from nominal (\@IEEEREDUCEmaintextheight is always + % a positive value) so that the maintext area will contain an integer + % number of normal size lines + % we change variable names here (to avoid confusion) as we no longer + % need \@IEEEINTmaintextheight and can reuse its dimen register + \let\@IEEEREDUCEmaintextheight=\@IEEEINTmaintextheight% + \advance\@IEEEREDUCEmaintextheight by -\@IEEEmaintextheight% + \advance\@IEEEREDUCEmaintextheight by \baselineskip% + % this is the calculated height of the spacer + % we change variable names here (to avoid confusion) as we no longer + % need \@IEEEmaintextheight and can reuse its dimen register + \let\@IEEECOMPENSATElen=\@IEEEmaintextheight% + \@IEEECOMPENSATElen=\@IEEENORMtitlevspace% set the nominal value + % we go with the reduced length if it is smaller than an increase + \ifdim\@IEEEREDUCEmaintextheight < 0.5\baselineskip\relax% + \advance\@IEEECOMPENSATElen by -\@IEEEREDUCEmaintextheight% + % if the resulting spacer is too small back out and go with an increase instead + \ifdim\@IEEECOMPENSATElen<\@IEEEMINtitlevspace\relax% + \advance\@IEEECOMPENSATElen by \baselineskip% + \fi% + \else% + % go with an increase because it is closer to the nominal than a decrease + \advance\@IEEECOMPENSATElen by -\@IEEEREDUCEmaintextheight% + \advance\@IEEECOMPENSATElen by \baselineskip% + \fi% + % set the calculated rigid spacer + \vspace{\@IEEECOMPENSATElen}}} + + + +% V1.6 +% we allow the user access to the last part of the title area +% useful in emergencies such as when a different spacing is needed +% This text is NOT compensated for in the dynamic sizer. +\let\@IEEEaftertitletext=\relax +\long\def\IEEEaftertitletext#1{\def\@IEEEaftertitletext{#1}} + +% V1.7 provide a way for users to enter abstract and keywords +% into the onecolumn title are. This text is compensated for +% in the dynamic sizer. +\let\@IEEEcompsoctitleabstractindextext=\relax +\long\def\IEEEcompsoctitleabstractindextext#1{\def\@IEEEcompsoctitleabstractindextext{#1}} +% V1.7 provide a way for users to get the \@IEEEcompsoctitleabstractindextext if +% not in compsoc journal mode - this way abstract and keywords can be placed +% in their conventional position if not in compsoc mode. +\def\IEEEdisplaynotcompsoctitleabstractindextext{% +\ifCLASSOPTIONcompsoc% display if compsoc conf +\ifCLASSOPTIONconference\@IEEEcompsoctitleabstractindextext\fi +\else% or if not compsoc +\@IEEEcompsoctitleabstractindextext\fi} + + +% command to allow alteration of baselinestretch, but only if the current +% baselineskip is unity. Used to tweak the compsoc abstract and keywords line spacing. +\def\@IEEEtweakunitybaselinestretch#1{{\def\baselinestretch{1}\selectfont +\global\@tempskipa\baselineskip}\ifnum\@tempskipa=\baselineskip% +\def\baselinestretch{#1}\selectfont\fi\relax} + + +% abstract and keywords are in \small, except +% for 9pt docs in which they are in \footnotesize +% Because 9pt docs use an 8pt footnotesize, \small +% becomes a rather awkward 8.5pt +\def\@IEEEabskeysecsize{\small} +\ifx\CLASSOPTIONpt\@IEEEptsizenine + \def\@IEEEabskeysecsize{\footnotesize} +\fi + +% compsoc journals use \footnotesize, compsoc conferences use normalsize +\@IEEEcompsoconly{\def\@IEEEabskeysecsize{\footnotesize}} +\@IEEEcompsocconfonly{\def\@IEEEabskeysecsize{\normalsize}} + + + + +% V1.6 have abstract and keywords strip leading spaces, pars and newlines +% so that spacing is more tightly controlled. +\def\abstract{\normalfont + \if@twocolumn + \par\@IEEEabskeysecsize\bfseries\leavevmode\kern-1pt\textit{\abstractname}---\relax + \else + \begin{center}\vspace{-1.78ex}\@IEEEabskeysecsize\textbf{\abstractname}\end{center}\quotation\@IEEEabskeysecsize + \fi\@IEEEgobbleleadPARNLSP} +% V1.6 IEEE wants only 1 pica from end of abstract to introduction heading when in +% conference mode (the heading already has this much above it) +\def\endabstract{\relax\ifCLASSOPTIONconference\vspace{0ex}\else\vspace{1.34ex}\fi\par\if@twocolumn\else\endquotation\fi + \normalfont\normalsize} + +\def\IEEEkeywords{\normalfont + \if@twocolumn + \@IEEEabskeysecsize\bfseries\leavevmode\kern-1pt\textit{\IEEEkeywordsname}---\relax + \else + \begin{center}\@IEEEabskeysecsize\textbf{\IEEEkeywordsname}\end{center}\quotation\@IEEEabskeysecsize + \fi\itshape\@IEEEgobbleleadPARNLSP} +\def\endIEEEkeywords{\relax\ifCLASSOPTIONtechnote\vspace{1.34ex}\else\vspace{0.5ex}\fi + \par\if@twocolumn\else\endquotation\fi% + \normalfont\normalsize} + +% V1.7 compsoc keywords index terms +\ifCLASSOPTIONcompsoc + \ifCLASSOPTIONconference% compsoc conference +\def\abstract{\normalfont + \begin{center}\@IEEEabskeysecsize\textbf{\large\abstractname}\end{center}\vskip 0.5\baselineskip plus 0.1\baselineskip minus 0.1\baselineskip + \if@twocolumn\else\quotation\fi\itshape\@IEEEabskeysecsize% + \par\@IEEEgobbleleadPARNLSP} +\def\IEEEkeywords{\normalfont\vskip 1.5\baselineskip plus 0.25\baselineskip minus 0.25\baselineskip + \begin{center}\@IEEEabskeysecsize\textbf{\large\IEEEkeywordsname}\end{center}\vskip 0.5\baselineskip plus 0.1\baselineskip minus 0.1\baselineskip + \if@twocolumn\else\quotation\fi\itshape\@IEEEabskeysecsize% + \par\@IEEEgobbleleadPARNLSP} + \else% compsoc not conference +\def\abstract{\normalfont\@IEEEtweakunitybaselinestretch{1.15}\sffamily + \if@twocolumn + \@IEEEabskeysecsize\noindent\textbf{\abstractname}---\relax + \else + \begin{center}\vspace{-1.78ex}\@IEEEabskeysecsize\textbf{\abstractname}\end{center}\quotation\@IEEEabskeysecsize% + \fi\@IEEEgobbleleadPARNLSP} +\def\IEEEkeywords{\normalfont\@IEEEtweakunitybaselinestretch{1.15}\sffamily + \if@twocolumn + \@IEEEabskeysecsize\vskip 0.5\baselineskip plus 0.25\baselineskip minus 0.25\baselineskip\noindent + \textbf{\IEEEkeywordsname}---\relax + \else + \begin{center}\@IEEEabskeysecsize\textbf{\IEEEkeywordsname}\end{center}\quotation\@IEEEabskeysecsize% + \fi\@IEEEgobbleleadPARNLSP} + \fi +\fi + + + +% gobbles all leading \, \\ and \par, upon finding first token that +% is not a \ , \\ or a \par, it ceases and returns that token +% +% used to strip leading \, \\ and \par from the input +% so that such things in the beginning of an environment will not +% affect the formatting of the text +\long\def\@IEEEgobbleleadPARNLSP#1{\let\@IEEEswallowthistoken=0% +\let\@IEEEgobbleleadPARNLSPtoken#1% +\let\@IEEEgobbleleadPARtoken=\par% +\let\@IEEEgobbleleadNLtoken=\\% +\let\@IEEEgobbleleadSPtoken=\ % +\def\@IEEEgobbleleadSPMACRO{\ }% +\ifx\@IEEEgobbleleadPARNLSPtoken\@IEEEgobbleleadPARtoken% +\let\@IEEEswallowthistoken=1% +\fi% +\ifx\@IEEEgobbleleadPARNLSPtoken\@IEEEgobbleleadNLtoken% +\let\@IEEEswallowthistoken=1% +\fi% +\ifx\@IEEEgobbleleadPARNLSPtoken\@IEEEgobbleleadSPtoken% +\let\@IEEEswallowthistoken=1% +\fi% +% a control space will come in as a macro +% when it is the last one on a line +\ifx\@IEEEgobbleleadPARNLSPtoken\@IEEEgobbleleadSPMACRO% +\let\@IEEEswallowthistoken=1% +\fi% +% if we have to swallow this token, do so and taste the next one +% else spit it out and stop gobbling +\ifx\@IEEEswallowthistoken 1\let\@IEEEnextgobbleleadPARNLSP=\@IEEEgobbleleadPARNLSP\else% +\let\@IEEEnextgobbleleadPARNLSP=#1\fi% +\@IEEEnextgobbleleadPARNLSP}% + + + + +% TITLING OF SECTIONS +\def\@IEEEsectpunct{:\ \,} % Punctuation after run-in section heading (headings which are + % part of the paragraphs), need little bit more than a single space + % spacing from section number to title +% compsoc conferences use regular period/space punctuation +\ifCLASSOPTIONcompsoc +\ifCLASSOPTIONconference +\def\@IEEEsectpunct{.\ } +\fi\fi + +\def\@seccntformat#1{\hb@xt@ 1.4em{\csname the#1dis\endcsname\hss\relax}} +\def\@seccntformatinl#1{\hb@xt@ 1.1em{\csname the#1dis\endcsname\hss\relax}} +\def\@seccntformatch#1{\csname the#1dis\endcsname\hskip 1em\relax} + +\ifCLASSOPTIONcompsoc +% compsoc journals need extra spacing +\ifCLASSOPTIONconference\else +\def\@seccntformat#1{\csname the#1dis\endcsname\hskip 1em\relax} +\fi\fi + +%v1.7 put {} after #6 to allow for some types of user font control +%and use \@@par rather than \par +\def\@sect#1#2#3#4#5#6[#7]#8{% + \ifnum #2>\c@secnumdepth + \let\@svsec\@empty + \else + \refstepcounter{#1}% + % load section label and spacer into \@svsec + \ifnum #2=1 + \protected@edef\@svsec{\@seccntformatch{#1}\relax}% + \else + \ifnum #2>2 + \protected@edef\@svsec{\@seccntformatinl{#1}\relax}% + \else + \protected@edef\@svsec{\@seccntformat{#1}\relax}% + \fi + \fi + \fi% + \@tempskipa #5\relax + \ifdim \@tempskipa>\z@% tempskipa determines whether is treated as a high + \begingroup #6{\relax% or low level heading + \noindent % subsections are NOT indented + % print top level headings. \@svsec is label, #8 is heading title + % IEEE does not block indent the section title text, it flows like normal + {\hskip #3\relax\@svsec}{\interlinepenalty \@M #8\@@par}}% + \endgroup + \addcontentsline{toc}{#1}{\ifnum #2>\c@secnumdepth\relax\else + \protect\numberline{\csname the#1\endcsname}\fi#7}% + \else % printout low level headings + % svsechd seems to swallow the trailing space, protect it with \mbox{} + % got rid of sectionmark stuff + \def\@svsechd{#6{\hskip #3\relax\@svsec #8\@IEEEsectpunct\mbox{}}% + \addcontentsline{toc}{#1}{\ifnum #2>\c@secnumdepth\relax\else + \protect\numberline{\csname the#1\endcsname}\fi#7}}% + \fi%skip down + \@xsect{#5}} + + +% section* handler +%v1.7 put {} after #4 to allow for some types of user font control +%and use \@@par rather than \par +\def\@ssect#1#2#3#4#5{\@tempskipa #3\relax + \ifdim \@tempskipa>\z@ + %\begingroup #4\@hangfrom{\hskip #1}{\interlinepenalty \@M #5\par}\endgroup + % IEEE does not block indent the section title text, it flows like normal + \begingroup \noindent #4{\relax{\hskip #1}{\interlinepenalty \@M #5\@@par}}\endgroup + % svsechd swallows the trailing space, protect it with \mbox{} + \else \def\@svsechd{#4{\hskip #1\relax #5\@IEEEsectpunct\mbox{}}}\fi + \@xsect{#3}} + + +%% SECTION heading spacing and font +%% +% arguments are: #1 - sectiontype name +% (for \@sect) #2 - section level +% #3 - section heading indent +% #4 - top separation (absolute value used, neg indicates not to indent main text) +% If negative, make stretch parts negative too! +% #5 - (absolute value used) positive: bottom separation after heading, +% negative: amount to indent main text after heading +% Both #4 and #5 negative means to indent main text and use negative top separation +% #6 - font control +% You've got to have \normalfont\normalsize in the font specs below to prevent +% trouble when you do something like: +% \section{Note}{\ttfamily TT-TEXT} is known to ... +% IEEE sometimes REALLY stretches the area before a section +% heading by up to about 0.5in. However, it may not be a good +% idea to let LaTeX have quite this much rubber. +\ifCLASSOPTIONconference% +% IEEE wants section heading spacing to decrease for conference mode +\def\section{\@startsection{section}{1}{\z@}{1.5ex plus 1.5ex minus 0.5ex}% +{1sp}{\normalfont\normalsize\centering\scshape}}% +\def\subsection{\@startsection{subsection}{2}{\z@}{1.5ex plus 1.5ex minus 0.5ex}% +{1sp}{\normalfont\normalsize\itshape}}% +\else % for journals +\def\section{\@startsection{section}{1}{\z@}{3.0ex plus 1.5ex minus 1.5ex}% V1.6 3.0ex from 3.5ex +{0.7ex plus 1ex minus 0ex}{\normalfont\normalsize\centering\scshape}}% +\def\subsection{\@startsection{subsection}{2}{\z@}{3.5ex plus 1.5ex minus 1.5ex}% +{0.7ex plus .5ex minus 0ex}{\normalfont\normalsize\itshape}}% +\fi + +% for both journals and conferences +% decided to put in a little rubber above the section, might help somebody +\def\subsubsection{\@startsection{subsubsection}{3}{\parindent}{0ex plus 0.1ex minus 0.1ex}% +{0ex}{\normalfont\normalsize\itshape}}% +\def\paragraph{\@startsection{paragraph}{4}{2\parindent}{0ex plus 0.1ex minus 0.1ex}% +{0ex}{\normalfont\normalsize\itshape}}% + + +% compsoc +\ifCLASSOPTIONcompsoc +\ifCLASSOPTIONconference +% compsoc conference +\def\section{\@startsection{section}{1}{\z@}{1\baselineskip plus 0.25\baselineskip minus 0.25\baselineskip}% +{1\baselineskip plus 0.25\baselineskip minus 0.25\baselineskip}{\normalfont\large\bfseries}}% +\def\subsection{\@startsection{subsection}{2}{\z@}{1\baselineskip plus 0.25\baselineskip minus 0.25\baselineskip}% +{1\baselineskip plus 0.25\baselineskip minus 0.25\baselineskip}{\normalfont\sublargesize\bfseries}}% +\def\subsubsection{\@startsection{subsubsection}{3}{\z@}{1\baselineskip plus 0.25\baselineskip minus 0.25\baselineskip}% +{0ex}{\normalfont\normalsize\bfseries}}% +\def\paragraph{\@startsection{paragraph}{4}{2\parindent}{0ex plus 0.1ex minus 0.1ex}% +{0ex}{\normalfont\normalsize}}% +\else% compsoc journals +% use negative top separation as compsoc journals do not indent paragraphs after section titles +\def\section{\@startsection{section}{1}{\z@}{-3ex plus -2ex minus -1.5ex}% +{0.7ex plus 1ex minus 0ex}{\normalfont\large\sffamily\bfseries\scshape}}% +% Note that subsection and smaller may not be correct for the Computer Society, +% I have to look up an example. +\def\subsection{\@startsection{subsection}{2}{\z@}{-3.5ex plus -1.5ex minus -1.5ex}% +{0.7ex plus .5ex minus 0ex}{\normalfont\normalsize\sffamily\bfseries}}% +\def\subsubsection{\@startsection{subsubsection}{3}{\z@}{-2.5ex plus -1ex minus -1ex}% +{0.5ex plus 0.5ex minus 0ex}{\normalfont\normalsize\sffamily\itshape}}% +\def\paragraph{\@startsection{paragraph}{4}{2\parindent}{-0ex plus -0.1ex minus -0.1ex}% +{0ex}{\normalfont\normalsize}}% +\fi\fi + + + + +%% ENVIRONMENTS +% "box" symbols at end of proofs +\def\IEEEQEDclosed{\mbox{\rule[0pt]{1.3ex}{1.3ex}}} % for a filled box +% V1.6 some journals use an open box instead that will just fit around a closed one +\def\IEEEQEDopen{{\setlength{\fboxsep}{0pt}\setlength{\fboxrule}{0.2pt}\fbox{\rule[0pt]{0pt}{1.3ex}\rule[0pt]{1.3ex}{0pt}}}} +\ifCLASSOPTIONcompsoc +\def\IEEEQED{\IEEEQEDopen} % default to open for compsoc +\else +\def\IEEEQED{\IEEEQEDclosed} % otherwise default to closed +\fi + +% v1.7 name change to avoid namespace collision with amsthm. Also add support +% for an optional argument. +\def\IEEEproof{\@ifnextchar[{\@IEEEproof}{\@IEEEproof[\IEEEproofname]}} +\def\@IEEEproof[#1]{\par\noindent\hspace{2em}{\itshape #1: }} +\def\endIEEEproof{\hspace*{\fill}~\IEEEQED\par} + + +%\itemindent is set to \z@ by list, so define new temporary variable +\newdimen\@IEEEtmpitemindent +\def\@begintheorem#1#2{\@IEEEtmpitemindent\itemindent\topsep 0pt\rmfamily\trivlist% + \item[\hskip \labelsep{\indent\itshape #1\ #2:}]\itemindent\@IEEEtmpitemindent} +\def\@opargbegintheorem#1#2#3{\@IEEEtmpitemindent\itemindent\topsep 0pt\rmfamily \trivlist% +% V1.6 IEEE is back to using () around theorem names which are also in italics +% Thanks to Christian Peel for reporting this. + \item[\hskip\labelsep{\indent\itshape #1\ #2\ (#3):}]\itemindent\@IEEEtmpitemindent} +% V1.7 remove bogus \unskip that caused equations in theorems to collide with +% lines below. +\def\@endtheorem{\endtrivlist} + +% V1.6 +% display command for the section the theorem is in - so that \thesection +% is not used as this will be in Roman numerals when we want arabic. +% LaTeX2e uses \def\@thmcounter#1{\noexpand\arabic{#1}} for the theorem number +% (second part) display and \def\@thmcountersep{.} as a separator. +% V1.7 intercept calls to the section counter and reroute to \@IEEEthmcounterinsection +% to allow \appendix(ices} to override as needed. +% +% special handler for sections, allows appendix(ices) to override +\gdef\@IEEEthmcounterinsection#1{\arabic{#1}} +% string macro +\edef\@IEEEstringsection{section} + +% redefine the #1#2[#3] form of newtheorem to use a hook to \@IEEEthmcounterinsection +% if section in_counter is used +\def\@xnthm#1#2[#3]{% + \expandafter\@ifdefinable\csname #1\endcsname + {\@definecounter{#1}\@newctr{#1}[#3]% + \edef\@IEEEstringtmp{#3} + \ifx\@IEEEstringtmp\@IEEEstringsection + \expandafter\xdef\csname the#1\endcsname{% + \noexpand\@IEEEthmcounterinsection{#3}\@thmcountersep + \@thmcounter{#1}}% + \else + \expandafter\xdef\csname the#1\endcsname{% + \expandafter\noexpand\csname the#3\endcsname \@thmcountersep + \@thmcounter{#1}}% + \fi + \global\@namedef{#1}{\@thm{#1}{#2}}% + \global\@namedef{end#1}{\@endtheorem}}} + + + +%% SET UP THE DEFAULT PAGESTYLE +\ps@headings +\pagenumbering{arabic} + +% normally the page counter starts at 1 +\setcounter{page}{1} +% however, for peerreview the cover sheet is page 0 or page -1 +% (for duplex printing) +\ifCLASSOPTIONpeerreview + \if@twoside + \setcounter{page}{-1} + \else + \setcounter{page}{0} + \fi +\fi + +% standard book class behavior - let bottom line float up and down as +% needed when single sided +\ifCLASSOPTIONtwoside\else\raggedbottom\fi +% if two column - turn on twocolumn, allow word spacings to stretch more and +% enforce a rigid position for the last lines +\ifCLASSOPTIONtwocolumn +% the peer review option delays invoking twocolumn + \ifCLASSOPTIONpeerreview\else + \twocolumn + \fi +\sloppy +\flushbottom +\fi + + + + +% \APPENDIX and \APPENDICES definitions + +% This is the \@ifmtarg command from the LaTeX ifmtarg package +% by Peter Wilson (CUA) and Donald Arseneau +% \@ifmtarg is used to determine if an argument to a command +% is present or not. +% For instance: +% \@ifmtarg{#1}{\typeout{empty}}{\typeout{has something}} +% \@ifmtarg is used with our redefined \section command if +% \appendices is invoked. +% The command \section will behave slightly differently depending +% on whether the user specifies a title: +% \section{My appendix title} +% or not: +% \section{} +% This way, we can eliminate the blank lines where the title +% would be, and the unneeded : after Appendix in the table of +% contents +\begingroup +\catcode`\Q=3 +\long\gdef\@ifmtarg#1{\@xifmtarg#1QQ\@secondoftwo\@firstoftwo\@nil} +\long\gdef\@xifmtarg#1#2Q#3#4#5\@nil{#4} +\endgroup +% end of \@ifmtarg defs + + +% V1.7 +% command that allows the one time saving of the original definition +% of section to \@IEEEappendixsavesection for \appendix or \appendices +% we don't save \section here as it may be redefined later by other +% packages (hyperref.sty, etc.) +\def\@IEEEsaveoriginalsectiononce{\let\@IEEEappendixsavesection\section +\let\@IEEEsaveoriginalsectiononce\relax} + +% neat trick to grab and process the argument from \section{argument} +% we process differently if the user invoked \section{} with no +% argument (title) +% note we reroute the call to the old \section* +\def\@IEEEprocessthesectionargument#1{% +\@ifmtarg{#1}{% +\@IEEEappendixsavesection*{\appendixname~\thesectiondis}% +\addcontentsline{toc}{section}{\appendixname~\thesection}}{% +\@IEEEappendixsavesection*{\appendixname~\thesectiondis \\* #1}% +\addcontentsline{toc}{section}{\appendixname~\thesection: #1}}} + +% we use this if the user calls \section{} after +% \appendix-- which has no meaning. So, we ignore the +% command and its argument. Then, warn the user. +\def\@IEEEdestroythesectionargument#1{\typeout{** WARNING: Ignoring useless +\protect\section\space in Appendix (line \the\inputlineno).}} + + +% remember \thesection forms will be displayed in \ref calls +% and in the Table of Contents. +% The \sectiondis form is used in the actual heading itself + +% appendix command for one single appendix +% normally has no heading. However, if you want a +% heading, you can do so via the optional argument: +% \appendix[Optional Heading] +\def\appendix{\relax} +\renewcommand{\appendix}[1][]{\@IEEEsaveoriginalsectiononce\par + % v1.6 keep hyperref's identifiers unique + \gdef\theHsection{Appendix.A}% + % v1.6 adjust hyperref's string name for the section + \xdef\Hy@chapapp{appendix}% + \setcounter{section}{0}% + \setcounter{subsection}{0}% + \setcounter{subsubsection}{0}% + \setcounter{paragraph}{0}% + \gdef\thesection{A}% + \gdef\thesectiondis{}% + \gdef\thesubsection{\Alph{subsection}}% + \gdef\@IEEEthmcounterinsection##1{A} + \refstepcounter{section}% update the \ref counter + \@ifmtarg{#1}{\@IEEEappendixsavesection*{\appendixname}% + \addcontentsline{toc}{section}{\appendixname}}{% + \@IEEEappendixsavesection*{\appendixname~\\* #1}% + \addcontentsline{toc}{section}{\appendixname: #1}}% + % redefine \section command for appendix + % leave \section* as is + \def\section{\@ifstar{\@IEEEappendixsavesection*}{% + \@IEEEdestroythesectionargument}}% throw out the argument + % of the normal form +} + + + +% appendices command for multiple appendices +% user then calls \section with an argument (possibly empty) to +% declare the individual appendices +\def\appendices{\@IEEEsaveoriginalsectiononce\par + % v1.6 keep hyperref's identifiers unique + \gdef\theHsection{Appendix.\Alph{section}}% + % v1.6 adjust hyperref's string name for the section + \xdef\Hy@chapapp{appendix}% + \setcounter{section}{-1}% we want \refstepcounter to use section 0 + \setcounter{subsection}{0}% + \setcounter{subsubsection}{0}% + \setcounter{paragraph}{0}% + \ifCLASSOPTIONromanappendices% + \gdef\thesection{\Roman{section}}% + \gdef\thesectiondis{\Roman{section}}% + \@IEEEcompsocconfonly{\gdef\thesectiondis{\Roman{section}.}}% + \gdef\@IEEEthmcounterinsection##1{A\arabic{##1}} + \else% + \gdef\thesection{\Alph{section}}% + \gdef\thesectiondis{\Alph{section}}% + \@IEEEcompsocconfonly{\gdef\thesectiondis{\Alph{section}.}}% + \gdef\@IEEEthmcounterinsection##1{\Alph{##1}} + \fi% + \refstepcounter{section}% update the \ref counter + \setcounter{section}{0}% NEXT \section will be the FIRST appendix + % redefine \section command for appendices + % leave \section* as is + \def\section{\@ifstar{\@IEEEappendixsavesection*}{% process the *-form + \refstepcounter{section}% or is a new section so, + \@IEEEprocessthesectionargument}}% process the argument + % of the normal form +} + + + +% \IEEEPARstart +% Definition for the big two line drop cap letter at the beginning of the +% first paragraph of journal papers. The first argument is the first letter +% of the first word, the second argument is the remaining letters of the +% first word which will be rendered in upper case. +% In V1.6 this has been completely rewritten to: +% +% 1. no longer have problems when the user begins an environment +% within the paragraph that uses \IEEEPARstart. +% 2. auto-detect and use the current font family +% 3. revise handling of the space at the end of the first word so that +% interword glue will now work as normal. +% 4. produce correctly aligned edges for the (two) indented lines. +% +% We generalize things via control macros - playing with these is fun too. +% +% V1.7 added more control macros to make it easy for IEEEtrantools.sty users +% to change the font style. +% +% the number of lines that are indented to clear it +% may need to increase if using decenders +\def\@IEEEPARstartDROPLINES{2} +% minimum number of lines left on a page to allow a \@IEEEPARstart +% Does not take into consideration rubber shrink, so it tends to +% be overly cautious +\def\@IEEEPARstartMINPAGELINES{2} +% V1.7 the height of the drop cap is adjusted to match the height of this text +% in the current font (when \IEEEPARstart is called). +\def\@IEEEPARstartHEIGHTTEXT{T} +% the depth the letter is lowered below the baseline +% the height (and size) of the letter is determined by the sum +% of this value and the height of the \@IEEEPARstartHEIGHTTEXT in the current +% font. It is a good idea to set this value in terms of the baselineskip +% so that it can respond to changes therein. +\def\@IEEEPARstartDROPDEPTH{1.1\baselineskip} +% V1.7 the font the drop cap will be rendered in, +% can take zero or one argument. +\def\@IEEEPARstartFONTSTYLE{\bfseries} +% V1.7 any additional, non-font related commands needed to modify +% the drop cap letter, can take zero or one argument. +\def\@IEEEPARstartCAPSTYLE{\MakeUppercase} +% V1.7 the font that will be used to render the rest of the word, +% can take zero or one argument. +\def\@IEEEPARstartWORDFONTSTYLE{\relax} +% V1.7 any additional, non-font related commands needed to modify +% the rest of the word, can take zero or one argument. +\def\@IEEEPARstartWORDCAPSTYLE{\MakeUppercase} +% This is the horizontal separation distance from the drop letter to the main text. +% Lengths that depend on the font (e.g., ex, em, etc.) will be referenced +% to the font that is active when \IEEEPARstart is called. +\def\@IEEEPARstartSEP{0.15em} +% V1.7 horizontal offset applied to the left of the drop cap. +\def\@IEEEPARstartHOFFSET{0em} +% V1.7 Italic correction command applied at the end of the drop cap. +\def\@IEEEPARstartITLCORRECT{\/} + +% V1.7 compoc uses nonbold drop cap and small caps word style +\ifCLASSOPTIONcompsoc +\def\@IEEEPARstartFONTSTYLE{\mdseries} +\def\@IEEEPARstartWORDFONTSTYLE{\scshape} +\def\@IEEEPARstartWORDCAPSTYLE{\relax} +\fi + +% definition of \IEEEPARstart +% THIS IS A CONTROLLED SPACING AREA, DO NOT ALLOW SPACES WITHIN THESE LINES +% +% The token \@IEEEPARstartfont will be globally defined after the first use +% of \IEEEPARstart and will be a font command which creates the big letter +% The first argument is the first letter of the first word and the second +% argument is the rest of the first word(s). +\def\IEEEPARstart#1#2{\par{% +% if this page does not have enough space, break it and lets start +% on a new one +\@IEEEtranneedspace{\@IEEEPARstartMINPAGELINES\baselineskip}{\relax}% +% V1.7 move this up here in case user uses \textbf for \@IEEEPARstartFONTSTYLE +% which uses command \leavevmode which causes an unwanted \indent to be issued +\noindent +% calculate the desired height of the big letter +% it extends from the top of \@IEEEPARstartHEIGHTTEXT in the current font +% down to \@IEEEPARstartDROPDEPTH below the current baseline +\settoheight{\@IEEEtrantmpdimenA}{\@IEEEPARstartHEIGHTTEXT}% +\addtolength{\@IEEEtrantmpdimenA}{\@IEEEPARstartDROPDEPTH}% +% extract the name of the current font in bold +% and place it in \@IEEEPARstartFONTNAME +\def\@IEEEPARstartGETFIRSTWORD##1 ##2\relax{##1}% +{\@IEEEPARstartFONTSTYLE{\selectfont\edef\@IEEEPARstartFONTNAMESPACE{\fontname\font\space}% +\xdef\@IEEEPARstartFONTNAME{\expandafter\@IEEEPARstartGETFIRSTWORD\@IEEEPARstartFONTNAMESPACE\relax}}}% +% define a font based on this name with a point size equal to the desired +% height of the drop letter +\font\@IEEEPARstartsubfont\@IEEEPARstartFONTNAME\space at \@IEEEtrantmpdimenA\relax% +% save this value as a counter (integer) value (sp points) +\@IEEEtrantmpcountA=\@IEEEtrantmpdimenA% +% now get the height of the actual letter produced by this font size +\settoheight{\@IEEEtrantmpdimenB}{\@IEEEPARstartsubfont\@IEEEPARstartCAPSTYLE{#1}}% +% If something bogus happens like the first argument is empty or the +% current font is strange, do not allow a zero height. +\ifdim\@IEEEtrantmpdimenB=0pt\relax% +\typeout{** WARNING: IEEEPARstart drop letter has zero height! (line \the\inputlineno)}% +\typeout{ Forcing the drop letter font size to 10pt.}% +\@IEEEtrantmpdimenB=10pt% +\fi% +% and store it as a counter +\@IEEEtrantmpcountB=\@IEEEtrantmpdimenB% +% Since a font size doesn't exactly correspond to the height of the capital +% letters in that font, the actual height of the letter, \@IEEEtrantmpcountB, +% will be less than that desired, \@IEEEtrantmpcountA +% we need to raise the font size, \@IEEEtrantmpdimenA +% by \@IEEEtrantmpcountA / \@IEEEtrantmpcountB +% But, TeX doesn't have floating point division, so we have to use integer +% division. Hence the use of the counters. +% We need to reduce the denominator so that the loss of the remainder will +% have minimal affect on the accuracy of the result +\divide\@IEEEtrantmpcountB by 200% +\divide\@IEEEtrantmpcountA by \@IEEEtrantmpcountB% +% Then reequalize things when we use TeX's ability to multiply by +% floating point values +\@IEEEtrantmpdimenB=0.005\@IEEEtrantmpdimenA% +\multiply\@IEEEtrantmpdimenB by \@IEEEtrantmpcountA% +% \@IEEEPARstartfont is globaly set to the calculated font of the big letter +% We need to carry this out of the local calculation area to to create the +% big letter. +\global\font\@IEEEPARstartfont\@IEEEPARstartFONTNAME\space at \@IEEEtrantmpdimenB% +% Now set \@IEEEtrantmpdimenA to the width of the big letter +% We need to carry this out of the local calculation area to set the +% hanging indent +\settowidth{\global\@IEEEtrantmpdimenA}{\@IEEEPARstartfont +\@IEEEPARstartCAPSTYLE{#1\@IEEEPARstartITLCORRECT}}}% +% end of the isolated calculation environment +% add in the extra clearance we want +\advance\@IEEEtrantmpdimenA by \@IEEEPARstartSEP\relax% +% add in the optional offset +\advance\@IEEEtrantmpdimenA by \@IEEEPARstartHOFFSET\relax% +% V1.7 don't allow negative offsets to produce negative hanging indents +\@IEEEtrantmpdimenB\@IEEEtrantmpdimenA +\ifnum\@IEEEtrantmpdimenB < 0 \@IEEEtrantmpdimenB 0pt\fi +% \@IEEEtrantmpdimenA has the width of the big letter plus the +% separation space and \@IEEEPARstartfont is the font we need to use +% Now, we make the letter and issue the hanging indent command +% The letter is placed in a box of zero width and height so that other +% text won't be displaced by it. +\hangindent\@IEEEtrantmpdimenB\hangafter=-\@IEEEPARstartDROPLINES% +\makebox[0pt][l]{\hspace{-\@IEEEtrantmpdimenA}% +\raisebox{-\@IEEEPARstartDROPDEPTH}[0pt][0pt]{\hspace{\@IEEEPARstartHOFFSET}% +\@IEEEPARstartfont\@IEEEPARstartCAPSTYLE{#1\@IEEEPARstartITLCORRECT}% +\hspace{\@IEEEPARstartSEP}}}% +{\@IEEEPARstartWORDFONTSTYLE{\@IEEEPARstartWORDCAPSTYLE{\selectfont#2}}}} + + + + + + +% determines if the space remaining on a given page is equal to or greater +% than the specified space of argument one +% if not, execute argument two (only if the remaining space is greater than zero) +% and issue a \newpage +% +% example: \@IEEEtranneedspace{2in}{\vfill} +% +% Does not take into consideration rubber shrinkage, so it tends to +% be overly cautious +% Based on an example posted by Donald Arseneau +% Note this macro uses \@IEEEtrantmpdimenB internally for calculations, +% so DO NOT PASS \@IEEEtrantmpdimenB to this routine +% if you need a dimen register, import with \@IEEEtrantmpdimenA instead +\def\@IEEEtranneedspace#1#2{\penalty-100\begingroup%shield temp variable +\@IEEEtrantmpdimenB\pagegoal\advance\@IEEEtrantmpdimenB-\pagetotal% space left +\ifdim #1>\@IEEEtrantmpdimenB\relax% not enough space left +\ifdim\@IEEEtrantmpdimenB>\z@\relax #2\fi% +\newpage% +\fi\endgroup} + + + +% IEEEbiography ENVIRONMENT +% Allows user to enter biography leaving place for picture (adapts to font size) +% As of V1.5, a new optional argument allows you to have a real graphic! +% V1.5 and later also fixes the "colliding biographies" which could happen when a +% biography's text was shorter than the space for the photo. +% MDS 7/2001 +% V1.6 prevent multiple biographies from making multiple TOC entries +\newif\if@IEEEbiographyTOCentrynotmade +\global\@IEEEbiographyTOCentrynotmadetrue + +% biography counter so hyperref can jump directly to the biographies +% and not just the previous section +\newcounter{IEEEbiography} +\setcounter{IEEEbiography}{0} + +% photo area size +\def\@IEEEBIOphotowidth{1.0in} % width of the biography photo area +\def\@IEEEBIOphotodepth{1.25in} % depth (height) of the biography photo area +% area cleared for photo +\def\@IEEEBIOhangwidth{1.14in} % width cleared for the biography photo area +\def\@IEEEBIOhangdepth{1.25in} % depth cleared for the biography photo area + % actual depth will be a multiple of + % \baselineskip, rounded up +\def\@IEEEBIOskipN{4\baselineskip}% nominal value of the vskip above the biography + +\newenvironment{IEEEbiography}[2][]{\normalfont\@IEEEcompsoconly{\sffamily}\footnotesize% +\unitlength 1in\parskip=0pt\par\parindent 1em\interlinepenalty500% +% we need enough space to support the hanging indent +% the nominal value of the spacer +% and one extra line for good measure +\@IEEEtrantmpdimenA=\@IEEEBIOhangdepth% +\advance\@IEEEtrantmpdimenA by \@IEEEBIOskipN% +\advance\@IEEEtrantmpdimenA by 1\baselineskip% +% if this page does not have enough space, break it and lets start +% with a new one +\@IEEEtranneedspace{\@IEEEtrantmpdimenA}{\relax}% +% nominal spacer can strech, not shrink use 1fil so user can out stretch with \vfill +\vskip \@IEEEBIOskipN plus 1fil minus 0\baselineskip% +% the default box for where the photo goes +\def\@IEEEtempbiographybox{{\setlength{\fboxsep}{0pt}\framebox{% +\begin{minipage}[b][\@IEEEBIOphotodepth][c]{\@IEEEBIOphotowidth}\centering PLACE\\ PHOTO\\ HERE \end{minipage}}}}% +% +% detect if the optional argument was supplied, this requires the +% \@ifmtarg command as defined in the appendix section above +% and if so, override the default box with what they want +\@ifmtarg{#1}{\relax}{\def\@IEEEtempbiographybox{\mbox{\begin{minipage}[b][\@IEEEBIOphotodepth][c]{\@IEEEBIOphotowidth}% +\centering% +#1% +\end{minipage}}}}% end if optional argument supplied +% Make an entry into the table of contents only if we have not done so before +\if@IEEEbiographyTOCentrynotmade% +% link labels to the biography counter so hyperref will jump +% to the biography, not the previous section +\setcounter{IEEEbiography}{-1}% +\refstepcounter{IEEEbiography}% +\addcontentsline{toc}{section}{Biographies}% +\global\@IEEEbiographyTOCentrynotmadefalse% +\fi% +% one more biography +\refstepcounter{IEEEbiography}% +% Make an entry for this name into the table of contents +\addcontentsline{toc}{subsection}{#2}% +% V1.6 properly handle if a new paragraph should occur while the +% hanging indent is still active. Do this by redefining \par so +% that it will not start a new paragraph. (But it will appear to the +% user as if it did.) Also, strip any leading pars, newlines, or spaces. +\let\@IEEEBIOORGparCMD=\par% save the original \par command +\edef\par{\hfil\break\indent}% the new \par will not be a "real" \par +\settoheight{\@IEEEtrantmpdimenA}{\@IEEEtempbiographybox}% get height of biography box +\@IEEEtrantmpdimenB=\@IEEEBIOhangdepth% +\@IEEEtrantmpcountA=\@IEEEtrantmpdimenB% countA has the hang depth +\divide\@IEEEtrantmpcountA by \baselineskip% calculates lines needed to produce the hang depth +\advance\@IEEEtrantmpcountA by 1% ensure we overestimate +% set the hanging indent +\hangindent\@IEEEBIOhangwidth% +\hangafter-\@IEEEtrantmpcountA% +% reference the top of the photo area to the top of a capital T +\settoheight{\@IEEEtrantmpdimenB}{\mbox{T}}% +% set the photo box, give it zero width and height so as not to disturb anything +\noindent\makebox[0pt][l]{\hspace{-\@IEEEBIOhangwidth}\raisebox{\@IEEEtrantmpdimenB}[0pt][0pt]{% +\raisebox{-\@IEEEBIOphotodepth}[0pt][0pt]{\@IEEEtempbiographybox}}}% +% now place the author name and begin the bio text +\noindent\textbf{#2\ }\@IEEEgobbleleadPARNLSP}{\relax\let\par=\@IEEEBIOORGparCMD\par% +% 7/2001 V1.5 detect when the biography text is shorter than the photo area +% and pad the unused area - preventing a collision from the next biography entry +% MDS +\ifnum \prevgraf <\@IEEEtrantmpcountA\relax% detect when the biography text is shorter than the photo + \advance\@IEEEtrantmpcountA by -\prevgraf% calculate how many lines we need to pad + \advance\@IEEEtrantmpcountA by -1\relax% we compensate for the fact that we indented an extra line + \@IEEEtrantmpdimenA=\baselineskip% calculate the length of the padding + \multiply\@IEEEtrantmpdimenA by \@IEEEtrantmpcountA% + \noindent\rule{0pt}{\@IEEEtrantmpdimenA}% insert an invisible support strut +\fi% +\par\normalfont} + + + +% V1.6 +% added biography without a photo environment +\newenvironment{IEEEbiographynophoto}[1]{% +% Make an entry into the table of contents only if we have not done so before +\if@IEEEbiographyTOCentrynotmade% +% link labels to the biography counter so hyperref will jump +% to the biography, not the previous section +\setcounter{IEEEbiography}{-1}% +\refstepcounter{IEEEbiography}% +\addcontentsline{toc}{section}{Biographies}% +\global\@IEEEbiographyTOCentrynotmadefalse% +\fi% +% one more biography +\refstepcounter{IEEEbiography}% +% Make an entry for this name into the table of contents +\addcontentsline{toc}{subsection}{#1}% +\normalfont\@IEEEcompsoconly{\sffamily}\footnotesize\interlinepenalty500% +\vskip 4\baselineskip plus 1fil minus 0\baselineskip% +\parskip=0pt\par% +\noindent\textbf{#1\ }\@IEEEgobbleleadPARNLSP}{\relax\par\normalfont} + + +% provide the user with some old font commands +% got this from article.cls +\DeclareOldFontCommand{\rm}{\normalfont\rmfamily}{\mathrm} +\DeclareOldFontCommand{\sf}{\normalfont\sffamily}{\mathsf} +\DeclareOldFontCommand{\tt}{\normalfont\ttfamily}{\mathtt} +\DeclareOldFontCommand{\bf}{\normalfont\bfseries}{\mathbf} +\DeclareOldFontCommand{\it}{\normalfont\itshape}{\mathit} +\DeclareOldFontCommand{\sl}{\normalfont\slshape}{\@nomath\sl} +\DeclareOldFontCommand{\sc}{\normalfont\scshape}{\@nomath\sc} +\DeclareRobustCommand*\cal{\@fontswitch\relax\mathcal} +\DeclareRobustCommand*\mit{\@fontswitch\relax\mathnormal} + + +% SPECIAL PAPER NOTICE COMMANDS +% +% holds the special notice text +\def\@IEEEspecialpapernotice{\relax} + +% for special papers, like invited papers, the user can do: +% \IEEEspecialpapernotice{(Invited Paper)} before \maketitle +\def\IEEEspecialpapernotice#1{\ifCLASSOPTIONconference% +\def\@IEEEspecialpapernotice{{\Large#1\vspace*{1em}}}% +\else% +\def\@IEEEspecialpapernotice{{\\*[1.5ex]\sublargesize\textit{#1}}\vspace*{-2ex}}% +\fi} + + + + +% PUBLISHER ID COMMANDS +% to insert a publisher's ID footer +% V1.6 \IEEEpubid has been changed so that the change in page size and style +% occurs in \maketitle. \IEEEpubid must now be issued prior to \maketitle +% use \IEEEpubidadjcol as before - in the second column of the title page +% These changes allow \maketitle to take the reduced page height into +% consideration when dynamically setting the space between the author +% names and the maintext. +% +% the amount the main text is pulled up to make room for the +% publisher's ID footer +% IEEE uses about 1.3\baselineskip for journals, +% dynamic title spacing will clean up the fraction +\def\@IEEEpubidpullup{1.3\baselineskip} +\ifCLASSOPTIONtechnote +% for technotes it must be an integer of baselineskip as there can be no +% dynamic title spacing for two column mode technotes (the title is in the +% in first column) and we should maintain an integer number of lines in the +% second column +% There are some examples (such as older issues of "Transactions on +% Information Theory") in which IEEE really pulls the text off the ID for +% technotes - about 0.55in (or 4\baselineskip). We'll use 2\baselineskip +% and call it even. +\def\@IEEEpubidpullup{2\baselineskip} +\fi + +% V1.7 compsoc does not use a pullup +\ifCLASSOPTIONcompsoc +\def\@IEEEpubidpullup{0pt} +\fi + +% holds the ID text +\def\@IEEEpubid{\relax} + +% flag so \maketitle can tell if \IEEEpubid was called +\newif\if@IEEEusingpubid +\global\@IEEEusingpubidfalse +% issue this command in the page to have the ID at the bottom +% V1.6 use before \maketitle +\def\IEEEpubid#1{\def\@IEEEpubid{#1}\global\@IEEEusingpubidtrue} + + +% command which will pull up (shorten) the column it is executed in +% to make room for the publisher ID. Place in the second column of +% the title page when using \IEEEpubid +% Is smart enough not to do anything when in single column text or +% if the user hasn't called \IEEEpubid +% currently needed in for the second column of a page with the +% publisher ID. If not needed in future releases, please provide this +% command and define it as \relax for backward compatibility +% v1.6b do not allow command to operate if the peer review option has been +% selected because \IEEEpubidadjcol will not be on the cover page. +% V1.7 do nothing if compsoc +\def\IEEEpubidadjcol{\ifCLASSOPTIONcompsoc\else\ifCLASSOPTIONpeerreview\else +\if@twocolumn\if@IEEEusingpubid\enlargethispage{-\@IEEEpubidpullup}\fi\fi\fi\fi} + +% Special thanks to Peter Wilson, Daniel Luecking, and the other +% gurus at comp.text.tex, for helping me to understand how best to +% implement the IEEEpubid command in LaTeX. + + + +%% Lockout some commands under various conditions + +% general purpose bit bucket +\newsavebox{\@IEEEtranrubishbin} + +% flags to prevent multiple warning messages +\newif\if@IEEEWARNthanks +\newif\if@IEEEWARNIEEEPARstart +\newif\if@IEEEWARNIEEEbiography +\newif\if@IEEEWARNIEEEbiographynophoto +\newif\if@IEEEWARNIEEEpubid +\newif\if@IEEEWARNIEEEpubidadjcol +\newif\if@IEEEWARNIEEEmembership +\newif\if@IEEEWARNIEEEaftertitletext +\@IEEEWARNthankstrue +\@IEEEWARNIEEEPARstarttrue +\@IEEEWARNIEEEbiographytrue +\@IEEEWARNIEEEbiographynophototrue +\@IEEEWARNIEEEpubidtrue +\@IEEEWARNIEEEpubidadjcoltrue +\@IEEEWARNIEEEmembershiptrue +\@IEEEWARNIEEEaftertitletexttrue + + +%% Lockout some commands when in various modes, but allow them to be restored if needed +%% +% save commands which might be locked out +% so that the user can later restore them if needed +\let\@IEEESAVECMDthanks\thanks +\let\@IEEESAVECMDIEEEPARstart\IEEEPARstart +\let\@IEEESAVECMDIEEEbiography\IEEEbiography +\let\@IEEESAVECMDendIEEEbiography\endIEEEbiography +\let\@IEEESAVECMDIEEEbiographynophoto\IEEEbiographynophoto +\let\@IEEESAVECMDendIEEEbiographynophoto\endIEEEbiographynophoto +\let\@IEEESAVECMDIEEEpubid\IEEEpubid +\let\@IEEESAVECMDIEEEpubidadjcol\IEEEpubidadjcol +\let\@IEEESAVECMDIEEEmembership\IEEEmembership +\let\@IEEESAVECMDIEEEaftertitletext\IEEEaftertitletext + + +% disable \IEEEPARstart when in draft mode +% This may have originally been done because the pre-V1.6 drop letter +% algorithm had problems with a non-unity baselinestretch +% At any rate, it seems too formal to have a drop letter in a draft +% paper. +\ifCLASSOPTIONdraftcls +\def\IEEEPARstart#1#2{#1#2\if@IEEEWARNIEEEPARstart\typeout{** ATTENTION: \noexpand\IEEEPARstart + is disabled in draft mode (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEPARstartfalse} +\fi +% and for technotes +\ifCLASSOPTIONtechnote +\def\IEEEPARstart#1#2{#1#2\if@IEEEWARNIEEEPARstart\typeout{** WARNING: \noexpand\IEEEPARstart + is locked out for technotes (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEPARstartfalse} +\fi + + +% lockout unneeded commands when in conference mode +\ifCLASSOPTIONconference +% when locked out, \thanks, \IEEEbiography, \IEEEbiographynophoto, \IEEEpubid, +% \IEEEmembership and \IEEEaftertitletext will all swallow their given text. +% \IEEEPARstart will output a normal character instead +% warn the user about these commands only once to prevent the console screen +% from filling up with redundant messages +\def\thanks#1{\if@IEEEWARNthanks\typeout{** WARNING: \noexpand\thanks + is locked out when in conference mode (line \the\inputlineno).}\fi\global\@IEEEWARNthanksfalse} +\def\IEEEPARstart#1#2{#1#2\if@IEEEWARNIEEEPARstart\typeout{** WARNING: \noexpand\IEEEPARstart + is locked out when in conference mode (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEPARstartfalse} + + +% LaTeX treats environments and commands with optional arguments differently. +% the actual ("internal") command is stored as \\commandname +% (accessed via \csname\string\commandname\endcsname ) +% the "external" command \commandname is a macro with code to determine +% whether or not the optional argument is presented and to provide the +% default if it is absent. So, in order to save and restore such a command +% we would have to save and restore \\commandname as well. But, if LaTeX +% ever changes the way it names the internal names, the trick would break. +% Instead let us just define a new environment so that the internal +% name can be left undisturbed. +\newenvironment{@IEEEbogusbiography}[2][]{\if@IEEEWARNIEEEbiography\typeout{** WARNING: \noexpand\IEEEbiography + is locked out when in conference mode (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEbiographyfalse% +\setbox\@IEEEtranrubishbin\vbox\bgroup}{\egroup\relax} +% and make biography point to our bogus biography +\let\IEEEbiography=\@IEEEbogusbiography +\let\endIEEEbiography=\end@IEEEbogusbiography + +\renewenvironment{IEEEbiographynophoto}[1]{\if@IEEEWARNIEEEbiographynophoto\typeout{** WARNING: \noexpand\IEEEbiographynophoto + is locked out when in conference mode (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEbiographynophotofalse% +\setbox\@IEEEtranrubishbin\vbox\bgroup}{\egroup\relax} + +\def\IEEEpubid#1{\if@IEEEWARNIEEEpubid\typeout{** WARNING: \noexpand\IEEEpubid + is locked out when in conference mode (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEpubidfalse} +\def\IEEEpubidadjcol{\if@IEEEWARNIEEEpubidadjcol\typeout{** WARNING: \noexpand\IEEEpubidadjcol + is locked out when in conference mode (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEpubidadjcolfalse} +\def\IEEEmembership#1{\if@IEEEWARNIEEEmembership\typeout{** WARNING: \noexpand\IEEEmembership + is locked out when in conference mode (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEmembershipfalse} +\def\IEEEaftertitletext#1{\if@IEEEWARNIEEEaftertitletext\typeout{** WARNING: \noexpand\IEEEaftertitletext + is locked out when in conference mode (line \the\inputlineno).}\fi\global\@IEEEWARNIEEEaftertitletextfalse} +\fi + + +% provide a way to restore the commands that are locked out +\def\IEEEoverridecommandlockouts{% +\typeout{** ATTENTION: Overriding command lockouts (line \the\inputlineno).}% +\let\thanks\@IEEESAVECMDthanks% +\let\IEEEPARstart\@IEEESAVECMDIEEEPARstart% +\let\IEEEbiography\@IEEESAVECMDIEEEbiography% +\let\endIEEEbiography\@IEEESAVECMDendIEEEbiography% +\let\IEEEbiographynophoto\@IEEESAVECMDIEEEbiographynophoto% +\let\endIEEEbiographynophoto\@IEEESAVECMDendIEEEbiographynophoto% +\let\IEEEpubid\@IEEESAVECMDIEEEpubid% +\let\IEEEpubidadjcol\@IEEESAVECMDIEEEpubidadjcol% +\let\IEEEmembership\@IEEESAVECMDIEEEmembership% +\let\IEEEaftertitletext\@IEEESAVECMDIEEEaftertitletext} + + + +% need a backslash character for typeout output +{\catcode`\|=0 \catcode`\\=12 +|xdef|@IEEEbackslash{\}} + + +% hook to allow easy disabling of all legacy warnings +\def\@IEEElegacywarn#1#2{\typeout{** ATTENTION: \@IEEEbackslash #1 is deprecated (line \the\inputlineno). +Use \@IEEEbackslash #2 instead.}} + + +% provide for legacy commands +\def\authorblockA{\@IEEElegacywarn{authorblockA}{IEEEauthorblockA}\IEEEauthorblockA} +\def\authorblockN{\@IEEElegacywarn{authorblockN}{IEEEauthorblockN}\IEEEauthorblockN} +\def\authorrefmark{\@IEEElegacywarn{authorrefmark}{IEEEauthorrefmark}\IEEEauthorrefmark} +\def\PARstart{\@IEEElegacywarn{PARstart}{IEEEPARstart}\IEEEPARstart} +\def\pubid{\@IEEElegacywarn{pubid}{IEEEpubid}\IEEEpubid} +\def\pubidadjcol{\@IEEElegacywarn{pubidadjcol}{IEEEpubidadjcol}\IEEEpubidadjcol} +\def\QED{\@IEEElegacywarn{QED}{IEEEQED}\IEEEQED} +\def\QEDclosed{\@IEEElegacywarn{QEDclosed}{IEEEQEDclosed}\IEEEQEDclosed} +\def\QEDopen{\@IEEElegacywarn{QEDopen}{IEEEQEDopen}\IEEEQEDopen} +\def\specialpapernotice{\@IEEElegacywarn{specialpapernotice}{IEEEspecialpapernotice}\IEEEspecialpapernotice} + + + +% provide for legacy environments +\def\biography{\@IEEElegacywarn{biography}{IEEEbiography}\IEEEbiography} +\def\biographynophoto{\@IEEElegacywarn{biographynophoto}{IEEEbiographynophoto}\IEEEbiographynophoto} +\def\keywords{\@IEEElegacywarn{keywords}{IEEEkeywords}\IEEEkeywords} +\def\endbiography{\endIEEEbiography} +\def\endbiographynophoto{\endIEEEbiographynophoto} +\def\endkeywords{\endIEEEkeywords} + + +% provide for legacy IED commands/lengths when possible +\let\labelindent\IEEElabelindent +\def\calcleftmargin{\@IEEElegacywarn{calcleftmargin}{IEEEcalcleftmargin}\IEEEcalcleftmargin} +\def\setlabelwidth{\@IEEElegacywarn{setlabelwidth}{IEEEsetlabelwidth}\IEEEsetlabelwidth} +\def\usemathlabelsep{\@IEEElegacywarn{usemathlabelsep}{IEEEusemathlabelsep}\IEEEusemathlabelsep} +\def\iedlabeljustifyc{\@IEEElegacywarn{iedlabeljustifyc}{IEEEiedlabeljustifyc}\IEEEiedlabeljustifyc} +\def\iedlabeljustifyl{\@IEEElegacywarn{iedlabeljustifyl}{IEEEiedlabeljustifyl}\IEEEiedlabeljustifyl} +\def\iedlabeljustifyr{\@IEEElegacywarn{iedlabeljustifyr}{IEEEiedlabeljustifyr}\IEEEiedlabeljustifyr} + + + +% let \proof use the IEEEtran version even after amsthm is loaded +% \proof is now deprecated in favor of \IEEEproof +\AtBeginDocument{\def\proof{\@IEEElegacywarn{proof}{IEEEproof}\IEEEproof}\def\endproof{\endIEEEproof}} + +% V1.7 \overrideIEEEmargins is no longer supported. +\def\overrideIEEEmargins{% +\typeout{** WARNING: \string\overrideIEEEmargins \space no longer supported (line \the\inputlineno).}% +\typeout{** Use the \string\CLASSINPUTinnersidemargin, \string\CLASSINPUToutersidemargin \space controls instead.}} + + +\endinput + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%% End of IEEEtran.cls %%%%%%%%%%%%%%%%%%%%%%%%%%%% +% That's all folks! + diff --git a/spec/consensus/consensus-paper/README.md b/spec/consensus/consensus-paper/README.md new file mode 100644 index 0000000000..33e3958061 --- /dev/null +++ b/spec/consensus/consensus-paper/README.md @@ -0,0 +1,24 @@ +# Tendermint-spec + +The repository contains the specification (and the proofs) of the Tendermint +consensus protocol. + +## How to install Latex on Mac OS + +MacTex is Latex distribution for Mac OS. You can download it [here](http://www.tug.org/mactex/mactex-download.html). + +Popular IDE for Latex-based projects is TexStudio. It can be downloaded +[here](https://www.texstudio.org/). + +## How to build project + +In order to compile the latex files (and write bibliography), execute + +`$ pdflatex paper`
+`$ bibtex paper`
+`$ pdflatex paper`
+`$ pdflatex paper`
+ +The generated file is paper.pdf. You can open it with + +`$ open paper.pdf` diff --git a/spec/consensus/consensus-paper/algorithmicplus.sty b/spec/consensus/consensus-paper/algorithmicplus.sty new file mode 100644 index 0000000000..de7ca01ea2 --- /dev/null +++ b/spec/consensus/consensus-paper/algorithmicplus.sty @@ -0,0 +1,195 @@ +% ALGORITHMICPLUS STYLE +% for LaTeX version 2e +% Original ``algorithmic.sty'' by -- 1994 Peter Williams +% Bug fix (13 July 2004) by Arnaud Giersch +% Includes ideas from 'algorithmicext' by Martin Biely +% and 'distribalgo' by Xavier Defago +% Modifications: Martin Hutle +% +% This style file is free software; you can redistribute it and/or +% modify it under the terms of the GNU Lesser General Public +% License as published by the Free Software Foundation; either +% version 2 of the License, or (at your option) any later version. +% +% This style file is distributed in the hope that it will be useful, +% but WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% Lesser General Public License for more details. +% +% You should have received a copy of the GNU Lesser General Public +% License along with this style file; if not, write to the +% Free Software Foundation, Inc., 59 Temple Place - Suite 330, +% Boston, MA 02111-1307, USA. +% +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{algorithmicplus} +\typeout{Document Style `algorithmicplus' - environment, replaces `algorithmic'} +% +\RequirePackage{ifthen} +\RequirePackage{calc} +\newboolean{ALC@noend} +\setboolean{ALC@noend}{false} +\newcounter{ALC@line} +\newcounter{ALC@rem} +\newcounter{ALC@depth} +\newcounter{ALCPLUS@lastline} +\newlength{\ALC@tlm} +% +\DeclareOption{noend}{\setboolean{ALC@noend}{true}} +% +\ProcessOptions +% +% ALGORITHMIC +\newcommand{\algorithmiclnosize}{\small} +\newcommand{\algorithmiclnofont}{\tt} +\newcommand{\algorithmiclnodelimiter}{:} +% +\newcommand{\algorithmicrequire}{\textbf{Require:}} +\newcommand{\algorithmicensure}{\textbf{Ensure:}} +\newcommand{\algorithmiccomment}[1]{\{#1\}} +\newcommand{\algorithmicend}{\textbf{end}} +\newcommand{\algorithmicif}{\textbf{if}} +\newcommand{\algorithmicthen}{\textbf{then}} +\newcommand{\algorithmicelse}{\textbf{else}} +\newcommand{\algorithmicelsif}{\algorithmicelse\ \algorithmicif} +\newcommand{\algorithmicendif}{\algorithmicend\ \algorithmicif} +\newcommand{\algorithmicfor}{\textbf{for}} +\newcommand{\algorithmicforall}{\textbf{for all}} +\newcommand{\algorithmicdo}{\textbf{do}} +\newcommand{\algorithmicendfor}{\algorithmicend\ \algorithmicfor} +\newcommand{\algorithmicwhile}{\textbf{while}} +\newcommand{\algorithmicendwhile}{\algorithmicend\ \algorithmicwhile} +\newcommand{\algorithmicloop}{\textbf{loop}} +\newcommand{\algorithmicendloop}{\algorithmicend\ \algorithmicloop} +\newcommand{\algorithmicrepeat}{\textbf{repeat}} +\newcommand{\algorithmicuntil}{\textbf{until}} +\def\ALC@item[#1]{% +\if@noparitem \@donoparitem + \else \if@inlabel \indent \par \fi + \ifhmode \unskip\unskip \par \fi + \if@newlist \if@nobreak \@nbitem \else + \addpenalty\@beginparpenalty + \addvspace\@topsep \addvspace{-\parskip}\fi + \else \addpenalty\@itempenalty \addvspace\itemsep + \fi + \global\@inlabeltrue +\fi +\everypar{\global\@minipagefalse\global\@newlistfalse + \if@inlabel\global\@inlabelfalse \hskip -\parindent \box\@labels + \penalty\z@ \fi + \everypar{}}\global\@nobreakfalse +\if@noitemarg \@noitemargfalse \if@nmbrlist \refstepcounter{\@listctr}\fi \fi +\sbox\@tempboxa{\makelabel{#1}}% +\global\setbox\@labels + \hbox{\unhbox\@labels \hskip \itemindent + \hskip -\labelwidth \hskip -\ALC@tlm + \ifdim \wd\@tempboxa >\labelwidth + \box\@tempboxa + \else \hbox to\labelwidth {\unhbox\@tempboxa}\fi + \hskip \ALC@tlm}\ignorespaces} +% +\newenvironment{algorithmic}[1][0]{ +\setcounter{ALC@depth}{\@listdepth}% +\let\@listdepth\c@ALC@depth% +\let\@item\ALC@item + \newcommand{\ALC@lno}{% +\ifthenelse{\equal{\arabic{ALC@rem}}{0}} +{{\algorithmiclnosize\algorithmiclnofont \arabic{ALC@line}\algorithmiclnodelimiter}}{}% +} +\let\@listii\@listi +\let\@listiii\@listi +\let\@listiv\@listi +\let\@listv\@listi +\let\@listvi\@listi +\let\@listvii\@listi + \newenvironment{ALC@g}{ + \begin{list}{\ALC@lno}{ \itemsep\z@ \itemindent\z@ + \listparindent\z@ \rightmargin\z@ + \topsep\z@ \partopsep\z@ \parskip\z@\parsep\z@ + \leftmargin 1em + \addtolength{\ALC@tlm}{\leftmargin} + } + } + {\end{list}} + \newcommand{\ALC@it}{\refstepcounter{ALC@line}\addtocounter{ALC@rem}{1}\ifthenelse{\equal{\arabic{ALC@rem}}{#1}}{\setcounter{ALC@rem}{0}}{}\item} + \newcommand{\ALC@com}[1]{\ifthenelse{\equal{##1}{default}}% +{}{\ \algorithmiccomment{##1}}} + \newcommand{\REQUIRE}{\item[\algorithmicrequire]} + \newcommand{\ENSURE}{\item[\algorithmicensure]} + \newcommand{\STATE}{\ALC@it} + \newcommand{\COMMENT}[1]{\algorithmiccomment{##1}} + \newenvironment{ALC@if}{\begin{ALC@g}}{\end{ALC@g}} + \newenvironment{ALC@for}{\begin{ALC@g}}{\end{ALC@g}} + \newenvironment{ALC@whl}{\begin{ALC@g}}{\end{ALC@g}} + \newenvironment{ALC@loop}{\begin{ALC@g}}{\end{ALC@g}} + \newenvironment{ALC@rpt}{\begin{ALC@g}}{\end{ALC@g}} + \renewcommand{\\}{\@centercr} + \newcommand{\IF}[2][default]{\ALC@it\algorithmicif\ ##2\ \algorithmicthen% +\ALC@com{##1}\begin{ALC@if}} + \newcommand{\ELSE}[1][default]{\end{ALC@if}\ALC@it\algorithmicelse% +\ALC@com{##1}\begin{ALC@if}} + \newcommand{\ELSIF}[2][default]% +{\end{ALC@if}\ALC@it\algorithmicelsif\ ##2\ \algorithmicthen% +\ALC@com{##1}\begin{ALC@if}} + \newcommand{\FOR}[2][default]{\ALC@it\algorithmicfor\ ##2\ \algorithmicdo% +\ALC@com{##1}\begin{ALC@for}} + \newcommand{\FORALL}[2][default]{\ALC@it\algorithmicforall\ ##2\ % +\algorithmicdo% +\ALC@com{##1}\begin{ALC@for}} + \newcommand{\WHILE}[2][default]{\ALC@it\algorithmicwhile\ ##2\ % +\algorithmicdo% +\ALC@com{##1}\begin{ALC@whl}} + \newcommand{\LOOP}[1][default]{\ALC@it\algorithmicloop% +\ALC@com{##1}\begin{ALC@loop}} + \newcommand{\REPEAT}[1][default]{\ALC@it\algorithmicrepeat% +\ALC@com{##1}\begin{ALC@rpt}} + \newcommand{\UNTIL}[1]{\end{ALC@rpt}\ALC@it\algorithmicuntil\ ##1} + \ifthenelse{\boolean{ALC@noend}}{ + \newcommand{\ENDIF}{\end{ALC@if}} + \newcommand{\ENDFOR}{\end{ALC@for}} + \newcommand{\ENDWHILE}{\end{ALC@whl}} + \newcommand{\ENDLOOP}{\end{ALC@loop}} + }{ + \newcommand{\ENDIF}{\end{ALC@if}\ALC@it\algorithmicendif} + \newcommand{\ENDFOR}{\end{ALC@for}\ALC@it\algorithmicendfor} + \newcommand{\ENDWHILE}{\end{ALC@whl}\ALC@it\algorithmicendwhile} + \newcommand{\ENDLOOP}{\end{ALC@loop}\ALC@it\algorithmicendloop} + } + \renewcommand{\@toodeep}{} + \begin{list}{\ALC@lno}{\setcounter{ALC@line}{0}\setcounter{ALC@rem}{0}% + \itemsep\z@ \itemindent\z@ \listparindent\z@% + \partopsep\z@ \parskip\z@ \parsep\z@% + \labelsep 0.5em \topsep 0.2em% +\ifthenelse{\equal{#1}{0}} + {\labelwidth 0.5em } + {\labelwidth 1.2em } +\leftmargin\labelwidth \addtolength{\leftmargin}{\labelsep} + \ALC@tlm\labelsep + } +} +{% +\setcounter{ALCPLUS@lastline}{\value{ALC@line}}% +\end{list}} + +\newcommand{\continuecounting}{\setcounter{ALC@line}{\value{ALCPLUS@lastline}}} +\newcommand{\startcounting}[1]{\setcounter{ALC@line}{#1}\addtocounter{ALC@line}{-1}} + +\newcommand{\EMPTY}{\item[]} +\newcommand{\SPACE}{\vspace{3mm}} +\newcommand{\SHORTSPACE}{\vspace{1mm}} +\newcommand{\newlinetag}[3]{\newcommand{#1}[#2]{\item[#3]}} +\newcommand{\newconstruct}[5]{% + \newenvironment{ALC@\string#1}{\begin{ALC@g}}{\end{ALC@g}} + \newcommand{#1}[2][default]{\ALC@it#2\ ##2\ #3% + \ALC@com{##1}\begin{ALC@\string#1}} + \ifthenelse{\boolean{ALC@noend}}{ + \newcommand{#4}{\end{ALC@\string#1}} + }{ + \newcommand{#4}{\end{ALC@\string#1}\ALC@it#5} + } +} + +\newconstruct{\INDENT}{}{}{\ENDINDENT}{} + +\newcommand{\setlinenosize}[1]{\renewcommand{\algorithmiclnosize}{#1}} +\newcommand{\setlinenofont}[1]{\renewcommand{\algorithmiclnofont}{#1}} diff --git a/spec/consensus/consensus-paper/conclusion.tex b/spec/consensus/consensus-paper/conclusion.tex new file mode 100644 index 0000000000..dd17ccf44d --- /dev/null +++ b/spec/consensus/consensus-paper/conclusion.tex @@ -0,0 +1,16 @@ +\section{Conclusion} \label{sec:conclusion} + +We have proposed a new Byzantine-fault tolerant consensus algorithm that is the +core of the Tendermint BFT SMR platform. The algorithm is designed for the wide +area network with high number of mutually distrusted nodes that communicate +over gossip based peer-to-peer network. It has only a single mode of execution +and the communication pattern is very similar to the "normal" case of the +state-of-the art PBFT algorithm. The algorithm ensures termination with a novel +mechanism that takes advantage of the gossip based communication between nodes. +The proposed algorithm and the proofs are simple and elegant, and we believe +that this makes it easier to understand and implement correctly. + +\section*{Acknowledgment} + +We would like to thank Anton Kaliaev, Ismail Khoffi and Dahlia Malkhi for comments on an earlier version of the paper. We also want to thank Marko Vukolic, Ming Chuan Lin, Maria Potop-Butucaru, Sara Tucci, Antonella Del Pozzo and Yackolley Amoussou-Guenou for pointing out the liveness issues +in the previous version of the algorithm. Finally, we want to thank the Tendermint team members and all project contributors for making Tendermint such a great platform. diff --git a/spec/consensus/consensus-paper/consensus.tex b/spec/consensus/consensus-paper/consensus.tex new file mode 100644 index 0000000000..3265b61c75 --- /dev/null +++ b/spec/consensus/consensus-paper/consensus.tex @@ -0,0 +1,397 @@ + +\section{Tendermint consensus algorithm} \label{sec:tendermint} + +\newcommand\Disseminate{\textbf{Disseminate}} + +\newcommand\Proposal{\mathsf{PROPOSAL}} +\newcommand\ProposalPart{\mathsf{PROPOSAL\mbox{-}PART}} +\newcommand\PrePrepare{\mathsf{INIT}} \newcommand\Prevote{\mathsf{PREVOTE}} +\newcommand\Precommit{\mathsf{PRECOMMIT}} +\newcommand\Decision{\mathsf{DECISION}} + +\newcommand\ViewChange{\mathsf{VC}} +\newcommand\ViewChangeAck{\mathsf{VC\mbox{-}ACK}} +\newcommand\NewPrePrepare{\mathsf{VC\mbox{-}INIT}} +\newcommand\coord{\mathsf{proposer}} + +\newcommand\newHeight{newHeight} \newcommand\newRound{newRound} +\newcommand\nil{nil} \newcommand\id{id} \newcommand{\propose}{propose} +\newcommand\prevote{prevote} \newcommand\prevoteWait{prevoteWait} +\newcommand\precommit{precommit} \newcommand\precommitWait{precommitWait} +\newcommand\commit{commit} + +\newcommand\timeoutPropose{timeoutPropose} +\newcommand\timeoutPrevote{timeoutPrevote} +\newcommand\timeoutPrecommit{timeoutPrecommit} +\newcommand\proofOfLocking{proof\mbox{-}of\mbox{-}locking} + +\begin{algorithm}[htb!] \def\baselinestretch{1} \scriptsize\raggedright + \begin{algorithmic}[1] + \SHORTSPACE + \INIT{} + \STATE $h_p := 0$ + \COMMENT{current height, or consensus instance we are currently executing} + \STATE $round_p := 0$ \COMMENT{current round number} + \STATE $step_p \in \set{\propose, \prevote, \precommit}$ + \STATE $decision_p[] := nil$ + \STATE $lockedValue_p := nil$ + \STATE $lockedRound_p := -1$ + \STATE $validValue_p := nil$ + \STATE $validRound_p := -1$ + \ENDINIT + \SHORTSPACE + \STATE \textbf{upon} start \textbf{do} $StartRound(0)$ + \SHORTSPACE + \FUNCTION{$StartRound(round)$} \label{line:tab:startRound} + \STATE $round_p \assign round$ + \STATE $step_p \assign \propose$ + \IF{$\coord(h_p, round_p) = p$} + \IF{$validValue_p \neq \nil$} \label{line:tab:isThereLockedValue} + \STATE $proposal \assign validValue_p$ \ELSE \STATE $proposal \assign + getValue()$ + \label{line:tab:getValidValue} + \ENDIF + \STATE \Broadcast\ $\li{\Proposal,h_p, round_p, proposal, validRound_p}$ + \label{line:tab:send-proposal} + \ELSE + \STATE \textbf{schedule} $OnTimeoutPropose(h_p, + round_p)$ to be executed \textbf{after} $\timeoutPropose(round_p)$ + \ENDIF + \ENDFUNCTION + + \SPACE + \UPON{$\li{\Proposal,h_p,round_p, v, -1}$ \From\ $\coord(h_p,round_p)$ + \With\ $step_p = \propose$} \label{line:tab:recvProposal} + \IF{$valid(v) \wedge (lockedRound_p = -1 \vee lockedValue_p = v$)} + \label{line:tab:accept-proposal-2} + \STATE \Broadcast \ $\li{\Prevote,h_p,round_p,id(v)}$ + \label{line:tab:prevote-proposal} + \ELSE + \label{line:tab:acceptProposal1} + \STATE \Broadcast \ $\li{\Prevote,h_p,round_p,\nil}$ + \label{line:tab:prevote-nil} + \ENDIF + \STATE $step_p \assign \prevote$ \label{line:tab:setStateToPrevote1} + \ENDUPON + + \SPACE + \UPON{$\li{\Proposal,h_p,round_p, v, vr}$ \From\ $\coord(h_p,round_p)$ + \textbf{AND} $2f+1$ $\li{\Prevote,h_p, vr,id(v)}$ \With\ $step_p = \propose \wedge (vr \ge 0 \wedge vr < round_p)$} + \label{line:tab:acceptProposal} + \IF{$valid(v) \wedge (lockedRound_p \le vr + \vee lockedValue_p = v)$} \label{line:tab:cond-prevote-higher-proposal} + \STATE \Broadcast \ $\li{\Prevote,h_p,round_p,id(v)}$ + \label{line:tab:prevote-higher-proposal} + \ELSE + \label{line:tab:acceptProposal2} + \STATE \Broadcast \ $\li{\Prevote,h_p,round_p,\nil}$ + \label{line:tab:prevote-nil2} + \ENDIF + \STATE $step_p \assign \prevote$ \label{line:tab:setStateToPrevote3} + \ENDUPON + + \SPACE + \UPON{$2f+1$ $\li{\Prevote,h_p, round_p,*}$ \With\ $step_p = \prevote$ for the first time} + \label{line:tab:recvAny2/3Prevote} + \STATE \textbf{schedule} $OnTimeoutPrevote(h_p, round_p)$ to be executed \textbf{after} $\timeoutPrevote(round_p)$ \label{line:tab:timeoutPrevote} + \ENDUPON + + \SPACE + \UPON{$\li{\Proposal,h_p,round_p, v, *}$ \From\ $\coord(h_p,round_p)$ + \textbf{AND} $2f+1$ $\li{\Prevote,h_p, round_p,id(v)}$ \With\ $valid(v) \wedge step_p \ge \prevote$ for the first time} + \label{line:tab:recvPrevote} + \IF{$step_p = \prevote$} + \STATE $lockedValue_p \assign v$ \label{line:tab:setLockedValue} + \STATE $lockedRound_p \assign round_p$ \label{line:tab:setLockedRound} + \STATE \Broadcast \ $\li{\Precommit,h_p,round_p,id(v))}$ + \label{line:tab:precommit-v} + \STATE $step_p \assign \precommit$ \label{line:tab:setStateToCommit} + \ENDIF + \STATE $validValue_p \assign v$ \label{line:tab:setValidRound} + \STATE $validRound_p \assign round_p$ \label{line:tab:setValidValue} + \ENDUPON + + \SHORTSPACE + \UPON{$2f+1$ $\li{\Prevote,h_p,round_p, \nil}$ + \With\ $step_p = \prevote$} + \STATE \Broadcast \ $\li{\Precommit,h_p,round_p, \nil}$ + \label{line:tab:precommit-v-1} + \STATE $step_p \assign \precommit$ + \ENDUPON + + \SPACE + \UPON{$2f+1$ $\li{\Precommit,h_p,round_p,*}$ for the first time} + \label{line:tab:startTimeoutPrecommit} + \STATE \textbf{schedule} $OnTimeoutPrecommit(h_p, round_p)$ to be executed \textbf{after} $\timeoutPrecommit(round_p)$ + + \ENDUPON + + \SPACE + \UPON{$\li{\Proposal,h_p,r, v, *}$ \From\ $\coord(h_p,r)$ \textbf{AND} + $2f+1$ $\li{\Precommit,h_p,r,id(v)}$ \With\ $decision_p[h_p] = \nil$} + \label{line:tab:onDecideRule} + \IF{$valid(v)$} \label{line:tab:validDecisionValue} + \STATE $decision_p[h_p] = v$ \label{line:tab:decide} + \STATE$h_p \assign h_p + 1$ \label{line:tab:increaseHeight} + \STATE reset $lockedRound_p$, $lockedValue_p$, $validRound_p$ and $validValue_p$ to initial values + and empty message log + \STATE $StartRound(0)$ + \ENDIF + \ENDUPON + + \SHORTSPACE + \UPON{$f+1$ $\li{*,h_p,round, *, *}$ \textbf{with} $round > round_p$} + \label{line:tab:skipRounds} + \STATE $StartRound(round)$ \label{line:tab:nextRound2} + \ENDUPON + + \SHORTSPACE + \FUNCTION{$OnTimeoutPropose(height,round)$} \label{line:tab:onTimeoutPropose} + \IF{$height = h_p \wedge round = round_p \wedge step_p = \propose$} + \STATE \Broadcast \ $\li{\Prevote,h_p,round_p, \nil}$ + \label{line:tab:prevote-nil-on-timeout} + \STATE $step_p \assign \prevote$ + \ENDIF + \ENDFUNCTION + + \SHORTSPACE + \FUNCTION{$OnTimeoutPrevote(height,round)$} \label{line:tab:onTimeoutPrevote} + \IF{$height = h_p \wedge round = round_p \wedge step_p = \prevote$} + \STATE \Broadcast \ $\li{\Precommit,h_p,round_p,\nil}$ + \label{line:tab:precommit-nil-onTimeout} + \STATE $step_p \assign \precommit$ + \ENDIF + \ENDFUNCTION + + \SHORTSPACE + \FUNCTION{$OnTimeoutPrecommit(height,round)$} \label{line:tab:onTimeoutPrecommit} + \IF{$height = h_p \wedge round = round_p$} + \STATE $StartRound(round_p + 1)$ \label{line:tab:nextRound} + \ENDIF + \ENDFUNCTION + \end{algorithmic} \caption{Tendermint consensus algorithm} + \label{alg:tendermint} +\end{algorithm} + +In this section we present the Tendermint Byzantine fault-tolerant consensus +algorithm. The algorithm is specified by the pseudo-code shown in +Algorithm~\ref{alg:tendermint}. We present the algorithm as a set of \emph{upon +rules} that are executed atomically\footnote{In case several rules are active +at the same time, the first rule to be executed is picked randomly. The +correctness of the algorithm does not depend on the order in which rules are +executed.}. We assume that processes exchange protocol messages using a gossip +protocol and that both sent and received messages are stored in a local message +log for every process. An upon rule is triggered once the message log contains +messages such that the corresponding condition evaluates to $\tt{true}$. The +condition that assumes reception of $X$ messages of a particular type and +content denotes reception of messages whose senders have aggregate voting power at +least equal to $X$. For example, the condition $2f+1$ $\li{\Precommit,h_p,r,id(v)}$, +evaluates to true upon reception of $\Precommit$ messages for height $h_p$, +a round $r$ and with value equal to $id(v)$ whose senders have aggregate voting +power at least equal to $2f+1$. Some of the rules ends with "for the first time" constraint +to denote that it is triggered only the first time a corresponding condition evaluates +to $\tt{true}$. This is because those rules do not always change the state of algorithm +variables so without this constraint, the algorithm could keep +executing those rules forever. The variables with index $p$ are process local state +variables, while variables without index $p$ are value placeholders. The sign +$*$ denotes any value. + +We denote with $n$ the total voting power of processes in the system, and we +assume that the total voting power of faulty processes in the system is bounded +with a system parameter $f$. The algorithm assumes that $n > 3f$, i.e., it +requires that the total voting power of faulty processes is smaller than one +third of the total voting power. For simplicity we present the algorithm for +the case $n = 3f + 1$. + +The algorithm proceeds in rounds, where each round has a dedicated +\emph{proposer}. The mapping of rounds to proposers is known to all processes +and is given as a function $\coord(h, round)$, returning the proposer for +the round $round$ in the consensus instance $h$. We +assume that the proposer selection function is weighted round-robin, where +processes are rotated proportional to their voting power\footnote{A validator +with more voting power is selected more frequently, proportional to its power. +More precisely, during a sequence of rounds of size $n$, every process is +proposer in a number of rounds equal to its voting power.}. +The internal protocol state transitions are triggered by message reception and +by expiration of timeouts. There are three timeouts in Algorithm \ref{alg:tendermint}: +$\timeoutPropose$, $\timeoutPrevote$ and $\timeoutPrecommit$. +The timeouts prevent the algorithm from blocking and +waiting forever for some condition to be true, ensure that processes continuously +transition between rounds, and guarantee that eventually (after GST) communication +between correct processes is timely and reliable so they can decide. +The last role is achieved by increasing the timeouts with every new round $r$, +i.e, $timeoutX(r) = initTimeoutX + r*timeoutDelta$; +they are reset for every new height (consensus +instance). + +Processes exchange the following messages in Tendermint: $\Proposal$, +$\Prevote$ and $\Precommit$. The $\Proposal$ message is used by the proposer of +the current round to suggest a potential decision value, while $\Prevote$ and +$\Precommit$ are votes for a proposed value. According to the classification of +consensus algorithms from \cite{RMS10:dsn}, Tendermint, like PBFT +\cite{CL02:tcs} and DLS \cite{DLS88:jacm}, belongs to class 3, so it requires +two voting steps (three communication exchanges in total) to decide a value. +The Tendermint consensus algorithm is designed for the blockchain context where +the value to decide is a block of transactions (ie. it is potentially quite +large, consisting of many transactions). Therefore, in the Algorithm +\ref{alg:tendermint} (similar as in \cite{CL02:tcs}) we are explicit about +sending a value (block of transactions) and a small, constant size value id (a +unique value identifier, normally a hash of the value, i.e., if $\id(v) = +\id(v')$, then $v=v'$). The $\Proposal$ message is the only one carrying the +value; $\Prevote$ and $\Precommit$ messages carry the value id. A correct +process decides on a value $v$ in Tendermint upon receiving the $\Proposal$ for +$v$ and $2f+1$ voting-power equivalent $\Precommit$ messages for $\id(v)$ in +some round $r$. In order to send $\Precommit$ message for $v$ in a round $r$, a +correct process waits to receive the $\Proposal$ and $2f+1$ of the +corresponding $\Prevote$ messages in the round $r$. Otherwise, +it sends $\Precommit$ message with a special $\nil$ value. +This ensures that correct processes can $\Precommit$ only a +single value (or $\nil$) in a round. As +proposers may be faulty, the proposed value is treated by correct processes as +a suggestion (it is not blindly accepted), and a correct process tells others +if it accepted the $\Proposal$ for value $v$ by sending $\Prevote$ message for +$\id(v)$; otherwise it sends $\Prevote$ message with the special $\nil$ value. + +Every process maintains the following variables in the Algorithm +\ref{alg:tendermint}: $step$, $lockedValue$, $lockedRound$, $validValue$ and +$validRound$. The $step$ denotes the current state of the internal Tendermint +state machine, i.e., it reflects the stage of the algorithm execution in the +current round. The $lockedValue$ stores the most recent value (with respect to +a round number) for which a $\Precommit$ message has been sent. The +$lockedRound$ is the last round in which the process sent a $\Precommit$ +message that is not $\nil$. We also say that a correct process locks a value +$v$ in a round $r$ by setting $lockedValue = v$ and $lockedRound = r$ before +sending $\Precommit$ message for $\id(v)$. As a correct process can decide a +value $v$ only if $2f+1$ $\Precommit$ messages for $\id(v)$ are received, this +implies that a possible decision value is a value that is locked by at least +$f+1$ voting power equivalent of correct processes. Therefore, any value $v$ +for which $\Proposal$ and $2f+1$ of the corresponding $\Prevote$ messages are +received in some round $r$ is a \emph{possible decision} value. The role of the +$validValue$ variable is to store the most recent possible decision value; the +$validRound$ is the last round in which $validValue$ is updated. Apart from +those variables, a process also stores the current consensus instance ($h_p$, +called \emph{height} in Tendermint), and the current round number ($round_p$) +and attaches them to every message. Finally, a process also stores an array of +decisions, $decision_p$ (Tendermint assumes a sequence of consensus instances, +one for each height). + +Every round starts by a proposer suggesting a value with the $\Proposal$ +message (see line \ref{line:tab:send-proposal}). In the initial round of each +height, the proposer is free to chose the value to suggest. In the +Algorithm~\ref{alg:tendermint}, a correct process obtains a value to propose +using an external function $getValue()$ that returns a valid value to +propose. In the following rounds, a correct proposer will suggest a new value +only if $validValue = \nil$; otherwise $validValue$ is proposed (see +lines~\ref{line:tab:isThereLockedValue}-\ref{line:tab:getValidValue}). +In addition to the value proposed, the $\Proposal$ message also +contains the $validRound$ so other processes are informed about the last round +in which the proposer observed $validValue$ as a possible decision value. +Note that if a correct proposer $p$ sends $validValue$ with the $validRound$ in the +$\Proposal$, this implies that the process $p$ received $\Proposal$ and the +corresponding $2f+1$ $\Prevote$ messages for $validValue$ in the round +$validRound$. +If a correct process sends $\Proposal$ message with $validValue$ ($validRound > -1$) +at time $t > GST$, by the \emph{Gossip communication} property, the +corresponding $\Proposal$ and the $\Prevote$ messages will be received by all +correct processes before time $t+\Delta$. Therefore, all correct processes will +be able to verify the correctness of the suggested value as it is supported by +the $\Proposal$ and the corresponding $2f+1$ voting power equivalent $\Prevote$ +messages. + +A correct process $p$ accepts the proposal for a value $v$ (send $\Prevote$ +for $id(v)$) if an external \emph{valid} function returns $true$ for the value +$v$, and if $p$ hasn't locked any value ($lockedRound = -1$) or $p$ has locked +the value $v$ ($lockedValue = v$); see the line +\ref{line:tab:accept-proposal-2}. In case the proposed pair is $(v,vr \ge 0)$ and a +correct process $p$ has locked some value, it will accept +$v$ if it is a more recent possible decision value\footnote{As +explained above, the possible decision value in a round $r$ is the one for +which $\Proposal$ and the corresponding $2f+1$ $\Prevote$ messages are received +for the round $r$.}, $vr > lockedRound_p$, or if $lockedValue = v$ +(see line~\ref{line:tab:cond-prevote-higher-proposal}). Otherwise, a correct +process will reject the proposal by sending $\Prevote$ message with $\nil$ +value. A correct process will send $\Prevote$ message with $\nil$ value also in +case $\timeoutPropose$ expired (it is triggered when a correct process starts a +new round) and a process has not sent $\Prevote$ message in the current round +yet (see the line \ref{line:tab:onTimeoutPropose}). + +If a correct process receives $\Proposal$ message for some value $v$ and $2f+1$ +$\Prevote$ messages for $\id(v)$, then it sends $\Precommit$ message with +$\id(v)$. Otherwise, it sends $\Precommit$ $\nil$. A correct process will send +$\Precommit$ message with $\nil$ value also in case $\timeoutPrevote$ expired +(it is started when a correct process sent $\Prevote$ message and received any +$2f+1$ $\Prevote$ messages) and a process has not sent $\Precommit$ message in +the current round yet (see the line \ref{line:tab:onTimeoutPrecommit}). A +correct process decides on some value $v$ if it receives in some round $r$ +$\Proposal$ message for $v$ and $2f+1$ $\Precommit$ messages with $\id(v)$ (see +the line \ref{line:tab:decide}). To prevent the algorithm from blocking and +waiting forever for this condition to be true, the Algorithm +\ref{alg:tendermint} relies on $\timeoutPrecommit$. It is triggered after a +process receives any set of $2f+1$ $\Precommit$ messages for the current round. +If the $\timeoutPrecommit$ expires and a process has not decided yet, the +process starts the next round (see the line \ref{line:tab:onTimeoutPrecommit}). +When a correct process $p$ decides, it starts the next consensus instance +(for the next height). The \emph{Gossip communication} property ensures +that $\Proposal$ and $2f+1$ $\Prevote$ messages that led $p$ to decide +are eventually received by all correct processes, so they will also decide. + +\subsection{Termination mechanism} + +Tendermint ensures termination by a novel mechanism that benefits from the +gossip based nature of communication (see \emph{Gossip communication} +property). It requires managing two additional variables, $validValue$ and +$validRound$ that are then used by the proposer during the propose step as +explained above. The $validValue$ and $validRound$ are updated to $v$ and $r$ +by a correct process in a round $r$ when the process receives valid $\Proposal$ +message for the value $v$ and the corresponding $2f+1$ $\Prevote$ messages for +$id(v)$ in the round $r$ (see the rule at line~\ref{line:tab:recvPrevote}). + +We now give briefly the intuition how managing and proposing $validValue$ +and $validRound$ ensures termination. Formal treatment is left for +Section~\ref{sec:proof}. + +The first thing to note is that during good period, because of the +\emph{Gossip communication} property, if a correct process $p$ locks a value +$v$ in some round $r$, all correct processes will update $validValue$ to $v$ +and $validRound$ to $r$ before the end of the round $r$ (we prove this formally +in the Section~\ref{sec:proof}). The intuition is that messages that led to $p$ +locking a value $v$ in the round $r$ will be gossiped to all correct processes +before the end of the round $r$, so it will update $validValue$ and +$validRound$ (the line~\ref{line:tab:recvPrevote}). Therefore, if a correct +process locks some value during good period, $validValue$ and $validRound$ are +updated by all correct processes so that the value proposed in the following +rounds will be acceptable by all correct processes. Note +that it could happen that during good period, no correct process locks a value, +but some correct process $q$ updates $validValue$ and $validRound$ during some +round. As no correct process locks a value in this case, $validValue_q$ and +$validRound_q$ will also be acceptable by all correct processes as +$validRound_q > lockedRound_c$ for every correct process $c$ and as the +\emph{Gossip communication} property ensures that the corresponding $\Prevote$ +messages that $q$ received in the round $validRound_q$ are received by all +correct processes $\Delta$ time later. + +Finally, it could happen that after GST, there is a long sequence of rounds in which +no correct process neither locks a value nor update $validValue$ and $validRound$. +In this case, during this sequence of rounds, the proposed value suggested by correct +processes was not accepted by all correct processes. Note that this sequence of rounds +is always finite as at the beginning of every +round there is at least a single correct process $c$ such that $validValue_c$ +and $validRound_c$ are acceptable by every correct process. This is true as +there exists a correct process $c$ such that for every other correct process +$p$, $validRound_c > lockedRound_p$ or $validValue_c = lockedValue_p$. This is +true as $c$ is the process that has locked a value in the most recent round +among all correct processes (or no correct process locked any value). Therefore, +eventually $c$ will be the proper in some round and the proposed value will be accepted +by all correct processes, terminating therefore this sequence of +rounds. + +Therefore, updating $validValue$ and $validRound$ variables, and the +\emph{Gossip communication} property, together ensures that eventually, during +the good period, there exists a round with a correct proposer whose proposed +value will be accepted by all correct processes, and all correct processes will +terminate in that round. Note that this mechanism, contrary to the common +termination mechanism illustrated in the +Figure~\ref{ch3:fig:coordinator-change}, does not require exchanging any +additional information in addition to messages already sent as part of what is +normally being called "normal" case. + diff --git a/spec/consensus/consensus-paper/definitions.tex b/spec/consensus/consensus-paper/definitions.tex new file mode 100644 index 0000000000..454dd445df --- /dev/null +++ b/spec/consensus/consensus-paper/definitions.tex @@ -0,0 +1,126 @@ +\section{Definitions} \label{sec:definitions} + +\subsection{Model} + +We consider a system of processes that communicate by exchanging messages. +Processes can be correct or faulty, where a faulty process can behave in an +arbitrary way, i.e., we consider Byzantine faults. We assume that each process +has some amount of voting power (voting power of a process can be $0$). +Processes in our model are not part of a single administrative domain; +therefore we cannot enforce a direct network connectivity between all +processes. Instead, we assume that each process is connected to a subset of +processes called peers, such that there is an indirect communication channel +between all correct processes. Communication between processes is established +using a gossip protocol \cite{Dem1987:gossip}. + +Formally, we model the network communication using a variant of the \emph{partially +synchronous system model}~\cite{DLS88:jacm}: in all executions of the system +there is a bound $\Delta$ and an instant GST (Global Stabilization Time) such +that all communication among correct processes after GST is reliable and +$\Delta$-timely, i.e., if a correct process $p$ sends message $m$ at time $t +\ge GST$ to a correct process $q$, then $q$ will receive $m$ before $t + +\Delta$\footnote{Note that as we do not assume direct communication channels + among all correct processes, this implies that before the message $m$ + reaches $q$, it might pass through a number of correct processes that will +forward the message $m$ using gossip protocol towards $q$.}. +In addition to the standard \emph{partially + synchronous system model}~\cite{DLS88:jacm}, we assume an auxiliary property +that captures gossip-based nature of communication\footnote{The details of the Tendermint gossip protocol will be discussed in a separate + technical report. }: + + +\begin{itemize} \item \emph{Gossip communication:} If a correct process $p$ + sends some message $m$ at time $t$, all correct processes will receive + $m$ before $max\{t, GST\} + \Delta$. Furthermore, if a correct process $p$ + receives some message $m$ at time $t$, all correct processes will receive + $m$ before $max\{t, GST\} + \Delta$. \end{itemize} + + +The bound $\Delta$ and GST are system +parameters whose values are not required to be known for the safety of our +algorithm. Termination of the algorithm is guaranteed within a bounded duration +after GST. In practice, the algorithm will work correctly in the slightly +weaker variant of the model where the system alternates between (long enough) +good periods (corresponds to the \emph{after} GST period where system is +reliable and $\Delta$-timely) and bad periods (corresponds to the period +\emph{before} GST during which the system is asynchronous and messages can be +lost), but consideration of the GST model simplifies the discussion. + +We assume that process steps (which might include sending and receiving +messages) take zero time. Processes are equipped with clocks so they can +measure local timeouts. +Spoofing/impersonation attacks are assumed to be impossible at all times due to +the use of public-key cryptography, i.e., we assume that all protocol messages contains a digital signature. +Therefore, when a correct +process $q$ receives a signed message $m$ from its peer, the process $q$ can +verify who was the original sender of the message $m$ and if the message signature is valid. +We do not explicitly state a signature verification step in the pseudo-code of the algorithm to improve readability; +we assume that only messages with the valid signature are considered at that level (and messages with invalid signatures +are dropped). + + + +%Messages that are being gossiped are created by the consensus layer. We can + %think about consensus protocol as a content creator, which %defines what + %messages should be disseminated using the gossip protocol. A correct + %process creates the message for dissemination either i) %explicitly, by + %invoking \emph{send} function as part of the consensus protocol or ii) + %implicitly, by receiving a message from some other %process. Note that in + %the case ii) gossiping of messages is implicit, i.e., it happens without + %explicit send clause in the consensus algorithm %whenever a correct + %process receives some messages in the consensus algorithm\footnote{If a + %message is received by a correct process at %the consensus level then it + %is considered valid from the protocol point of view, i.e., it has a + %correct signature, a proper message structure %and a valid height and + %round number.}. + +%\item Processes keep resending messages (in case of failures or message loss) + %until all its peers get them. This ensures that every message %sent or + %received by a correct process is eventually received by all correct + %processes. + +\subsection{State Machine Replication} + +State machine replication (SMR) is a general approach for replicating services +modeled as a deterministic state machine~\cite{Lam78:cacm,Sch90:survey}. The +key idea of this approach is to guarantee that all replicas start in the same +state and then apply requests from clients in the same order, thereby +guaranteeing that the replicas' states will not diverge. Following +Schneider~\cite{Sch90:survey}, we note that the following is key for +implementing a replicated state machine tolerant to (Byzantine) faults: + +\begin{itemize} \item \emph{Replica Coordination.} All [non-faulty] replicas + receive and process the same sequence of requests. \end{itemize} + +Moreover, as Schneider also notes, this property can be decomposed into two +parts, \emph{Agreement} and \emph{Order}: Agreement requires all (non-faulty) +replicas to receive all requests, and Order requires that the order of received +requests is the same at all replicas. + +There is an additional requirement that needs to be ensured by Byzantine +tolerant state machine replication: only requests (called transactions in the +Tendermint terminology) proposed by clients are executed. In Tendermint, +transaction verification is the responsibility of the service that is being +replicated; upon receiving a transaction from the client, the Tendermint +process will ask the service if the request is valid, and only valid requests +will be processed. + + \subsection{Consensus} \label{sec:consensus} + +Tendermint solves state machine replication by sequentially executing consensus +instances to agree on each block of transactions that are +then executed by the service being replicated. We consider a variant of the +Byzantine consensus problem called Validity Predicate-based Byzantine consensus +that is motivated by blockchain systems~\cite{GLR17:red-belly-bc}. The problem +is defined by an agreement, a termination, and a validity property. + + \begin{itemize} \item \emph{Agreement:} No two correct processes decide on + different values. \item \emph{Termination:} All correct processes + eventually decide on a value. \item \emph{Validity:} A decided value + is valid, i.e., it satisfies the predefined predicate denoted + \emph{valid()}. \end{itemize} + + This variant of the Byzantine consensus problem has an application-specific + \emph{valid()} predicate to indicate whether a value is valid. In the context + of blockchain systems, for example, a value is not valid if it does not + contain an appropriate hash of the last value (block) added to the blockchain. diff --git a/spec/consensus/consensus-paper/homodel.sty b/spec/consensus/consensus-paper/homodel.sty new file mode 100644 index 0000000000..19f83e926e --- /dev/null +++ b/spec/consensus/consensus-paper/homodel.sty @@ -0,0 +1,32 @@ +\newcommand{\NC}{\mbox{\it NC}} +\newcommand{\HO}{\mbox{\it HO}} +\newcommand{\AS}{\mbox{\it AS}} +\newcommand{\SK}{\mbox{\it SK}} +\newcommand{\SHO}{\mbox{\it SHO}} +\newcommand{\AHO}{\mbox{\it AHO}} +\newcommand{\CONS}{\mbox{\it CONS}} +\newcommand{\K}{\mbox{\it K}} + +\newcommand{\Alg}{\mathcal{A}} +\newcommand{\Pred}{\mathcal{P}} +\newcommand{\Spr}{S_p^r} +\newcommand{\Tpr}{T_p^r} +\newcommand{\mupr}{\vec{\mu}_p^{\,r}} + +\newcommand{\MSpr}{S_p^{\rho}} +\newcommand{\MTpr}{T_p^{\rho}} + + + +\newconstruct{\SEND}{$\Spr$:}{}{\ENDSEND}{} +\newconstruct{\TRAN}{$\Tpr$:}{}{\ENDTRAN}{} +\newconstruct{\ROUND}{\textbf{Round}}{\!\textbf{:}}{\ENDROUND}{} +\newconstruct{\VARIABLES}{\textbf{Variables:}}{}{\ENDVARIABLES}{} +\newconstruct{\INIT}{\textbf{Initialization:}}{}{\ENDINIT}{} + +\newconstruct{\MSEND}{$\MSpr$:}{}{\ENDMSEND}{} +\newconstruct{\MTRAN}{$\MTpr$:}{}{\ENDMTRAN}{} + +\newconstruct{\SROUND}{\textbf{Selection Round}}{\!\textbf{:}}{\ENDSROUND}{} +\newconstruct{\VROUND}{\textbf{Validation Round}}{\!\textbf{:}}{\ENDVROUND}{} +\newconstruct{\DROUND}{\textbf{Decision Round}}{\!\textbf{:}}{\ENDDROUND}{} diff --git a/spec/consensus/consensus-paper/intro.tex b/spec/consensus/consensus-paper/intro.tex new file mode 100644 index 0000000000..493b509e91 --- /dev/null +++ b/spec/consensus/consensus-paper/intro.tex @@ -0,0 +1,138 @@ +\section{Introduction} \label{sec:tendermint} + +Consensus is a fundamental problem in distributed computing. It +is important because of it's role in State Machine Replication (SMR), a generic +approach for replicating services that can be modeled as a deterministic state +machine~\cite{Lam78:cacm, Sch90:survey}. The key idea of this approach is that +service replicas start in the same initial state, and then execute requests +(also called transactions) in the same order; thereby guaranteeing that +replicas stay in sync with each other. The role of consensus in the SMR +approach is ensuring that all replicas receive transactions in the same order. +Traditionally, deployments of SMR based systems are in data-center settings +(local area network), have a small number of replicas (three to seven) and are +typically part of a single administration domain (e.g., Chubby +\cite{Bur:osdi06}); therefore they handle benign (crash) failures only, as more +general forms of failure (in particular, malicious or Byzantine faults) are +considered to occur with only negligible probability. + +The success of cryptocurrencies and blockchain systems in recent years (e.g., +\cite{Nak2012:bitcoin, But2014:ethereum}) pose a whole new set of challenges on +the design and deployment of SMR based systems: reaching agreement over wide +area network, among large number of nodes (hundreds or thousands) that are not +part of the same administrative domain, and where a subset of nodes can behave +maliciously (Byzantine faults). Furthermore, contrary to the previous +data-center deployments where nodes are fully connected to each other, in +blockchain systems, a node is only connected to a subset of other nodes, so +communication is achieved by gossip-based peer-to-peer protocols. +The new requirements demand designs and algorithms that are not necessarily +present in the classical academic literature on Byzantine fault tolerant +consensus (or SMR) systems (e.g., \cite{DLS88:jacm, CL02:tcs}) as the primary +focus was different setup. + +In this paper we describe a novel Byzantine-fault tolerant consensus algorithm +that is the core of the BFT SMR platform called Tendermint\footnote{The + Tendermint platform is available open source at + https://github.com/tendermint/tendermint.}. The Tendermint platform consists of +a high-performance BFT SMR implementation written in Go, a flexible interface +for +building arbitrary deterministic applications above the consensus, and a suite +of tools for deployment and management. + +The Tendermint consensus algorithm is inspired by the PBFT SMR +algorithm~\cite{CL99:osdi} and the DLS algorithm for authenticated faults (the +Algorithm 2 from \cite{DLS88:jacm}). Similar to DLS algorithm, Tendermint +proceeds in +rounds\footnote{Tendermint is not presented in the basic round model of + \cite{DLS88:jacm}. Furthermore, we use the term round differently than in + \cite{DLS88:jacm}; in Tendermint a round denotes a sequence of communication + steps instead of a single communication step in \cite{DLS88:jacm}.}, where each +round has a dedicated proposer (also called coordinator or +leader) and a process proceeds to a new round as part of normal +processing (not only in case the proposer is faulty or suspected as being faulty +by enough processes as in PBFT). +The communication pattern of each round is very similar to the "normal" case +of PBFT. Therefore, in preferable conditions (correct proposer, timely and +reliable communication between correct processes), Tendermint decides in three +communication steps (the same as PBFT). + +The major novelty and contribution of the Tendermint consensus algorithm is a +new termination mechanism. As explained in \cite{MHS09:opodis, RMS10:dsn}, the +existing BFT consensus (and SMR) algorithms for the partially synchronous +system model (for example PBFT~\cite{CL99:osdi}, \cite{DLS88:jacm}, +\cite{MA06:tdsc}) typically relies on the communication pattern illustrated in +Figure~\ref{ch3:fig:coordinator-change} for termination. The +Figure~\ref{ch3:fig:coordinator-change} illustrates messages exchanged during +the proposer change when processes start a new round\footnote{There is no + consistent terminology in the distributed computing terminology on naming + sequence of communication steps that corresponds to a logical unit. It is + sometimes called a round, phase or a view.}. It guarantees that eventually (ie. +after some Global Stabilization Time, GST), there exists a round with a correct +proposer that will bring the system into a univalent configuration. +Intuitively, in a round in which the proposed value is accepted +by all correct processes, and communication between correct processes is +timely and reliable, all correct processes decide. + + +\begin{figure}[tbh!] \def\rdstretch{5} \def\ystretch{3} \centering + \begin{rounddiag}{4}{2} \round{1}{~} \rdmessage{1}{1}{$v_1$} + \rdmessage{2}{1}{$v_2$} \rdmessage{3}{1}{$v_3$} \rdmessage{4}{1}{$v_4$} + \round{2}{~} \rdmessage{1}{1}{$x, [v_{1..4}]$} + \rdmessage{1}{2}{$~~~~~~x, [v_{1..4}]$} \rdmessage{1}{3}{$~~~~~~~~x, + [v_{1..4}]$} \rdmessage{1}{4}{$~~~~~~~x, [v_{1..4}]$} \end{rounddiag} + \vspace{-5mm} \caption{\boldmath Proposer (coordinator) change: $p_1$ is the + new proposer.} \label{ch3:fig:coordinator-change} \end{figure} + +To ensure that a proposed value is accepted by all correct +processes\footnote{The proposed value is not blindly accepted by correct + processes in BFT algorithms. A correct process always verifies if the proposed + value is safe to be accepted so that safety properties of consensus are not + violated.} +a proposer will 1) build the global state by receiving messages from other +processes, 2) select the safe value to propose and 3) send the selected value +together with the signed messages +received in the first step to support it. The +value $v_i$ that a correct process sends to the next proposer normally +corresponds to a value the process considers as acceptable for a decision: + +\begin{itemize} \item in PBFT~\cite{CL99:osdi} and DLS~\cite{DLS88:jacm} it is + not the value itself but a set of $2f+1$ signed messages with the same + value id, \item in Fast Byzantine Paxos~\cite{MA06:tdsc} the value + itself is being sent. \end{itemize} + +In both cases, using this mechanism in our system model (ie. high +number of nodes over gossip based network) would have high communication +complexity that increases with the number of processes: in the first case as +the message sent depends on the total number of processes, and in the second +case as the value (block of transactions) is sent by each process. The set of +messages received in the first step are normally piggybacked on the proposal +message (in the Figure~\ref{ch3:fig:coordinator-change} denoted with +$[v_{1..4}]$) to justify the choice of the selected value $x$. Note that +sending this message also does not scale with the number of processes in the +system. + +We designed a novel termination mechanism for Tendermint that better suits the +system model we consider. It does not require additional communication (neither +sending new messages nor piggybacking information on the existing messages) and +it is fully based on the communication pattern that is very similar to the +normal case in PBFT \cite{CL99:osdi}. Therefore, there is only a single mode of +execution in Tendermint, i.e., there is no separation between the normal and +the recovery mode, which is the case in other PBFT-like protocols (e.g., +\cite{CL99:osdi}, \cite{Ver09:spinning} or \cite{Cle09:aardvark}). We believe +this makes Tendermint simpler to understand and implement correctly. + +Note that the orthogonal approach for reducing message complexity in order to +improve +scalability and decentralization (number of processes) of BFT consensus +algorithms is using advanced cryptography (for example Boneh-Lynn-Shacham (BLS) +signatures \cite{BLS2001:crypto}) as done for example in SBFT +\cite{Gue2018:sbft}. + +The remainder of the paper is as follows: Section~\ref{sec:definitions} defines +the system model and gives the problem definitions. Tendermint +consensus algorithm is presented in Section~\ref{sec:tendermint} and the +proofs are given in Section~\ref{sec:proof}. We conclude in +Section~\ref{sec:conclusion}. + + + + diff --git a/spec/consensus/consensus-paper/latex8.bst b/spec/consensus/consensus-paper/latex8.bst new file mode 100644 index 0000000000..2c7af56479 --- /dev/null +++ b/spec/consensus/consensus-paper/latex8.bst @@ -0,0 +1,1124 @@ + +% --------------------------------------------------------------- +% +% $Id: latex8.bst,v 1.1 1995/09/15 15:13:49 ienne Exp $ +% +% by Paolo.Ienne@di.epfl.ch +% + +% --------------------------------------------------------------- +% +% no guarantee is given that the format corresponds perfectly to +% IEEE 8.5" x 11" Proceedings, but most features should be ok. +% +% --------------------------------------------------------------- +% +% `latex8' from BibTeX standard bibliography style `abbrv' +% version 0.99a for BibTeX versions 0.99a or later, LaTeX version 2.09. +% Copyright (C) 1985, all rights reserved. +% Copying of this file is authorized only if either +% (1) you make absolutely no changes to your copy, including name, or +% (2) if you do make changes, you name it something other than +% btxbst.doc, plain.bst, unsrt.bst, alpha.bst, and abbrv.bst. +% This restriction helps ensure that all standard styles are identical. +% The file btxbst.doc has the documentation for this style. + +ENTRY + { address + author + booktitle + chapter + edition + editor + howpublished + institution + journal + key + month + note + number + organization + pages + publisher + school + series + title + type + volume + year + } + {} + { label } + +INTEGERS { output.state before.all mid.sentence after.sentence after.block } + +FUNCTION {init.state.consts} +{ #0 'before.all := + #1 'mid.sentence := + #2 'after.sentence := + #3 'after.block := +} + +STRINGS { s t } + +FUNCTION {output.nonnull} +{ 's := + output.state mid.sentence = + { ", " * write$ } + { output.state after.block = + { add.period$ write$ + newline$ + "\newblock " write$ + } + { output.state before.all = + 'write$ + { add.period$ " " * write$ } + if$ + } + if$ + mid.sentence 'output.state := + } + if$ + s +} + +FUNCTION {output} +{ duplicate$ empty$ + 'pop$ + 'output.nonnull + if$ +} + +FUNCTION {output.check} +{ 't := + duplicate$ empty$ + { pop$ "empty " t * " in " * cite$ * warning$ } + 'output.nonnull + if$ +} + +FUNCTION {output.bibitem} +{ newline$ + "\bibitem{" write$ + cite$ write$ + "}" write$ + newline$ + "" + before.all 'output.state := +} + +FUNCTION {fin.entry} +{ add.period$ + write$ + newline$ +} + +FUNCTION {new.block} +{ output.state before.all = + 'skip$ + { after.block 'output.state := } + if$ +} + +FUNCTION {new.sentence} +{ output.state after.block = + 'skip$ + { output.state before.all = + 'skip$ + { after.sentence 'output.state := } + if$ + } + if$ +} + +FUNCTION {not} +{ { #0 } + { #1 } + if$ +} + +FUNCTION {and} +{ 'skip$ + { pop$ #0 } + if$ +} + +FUNCTION {or} +{ { pop$ #1 } + 'skip$ + if$ +} + +FUNCTION {new.block.checka} +{ empty$ + 'skip$ + 'new.block + if$ +} + +FUNCTION {new.block.checkb} +{ empty$ + swap$ empty$ + and + 'skip$ + 'new.block + if$ +} + +FUNCTION {new.sentence.checka} +{ empty$ + 'skip$ + 'new.sentence + if$ +} + +FUNCTION {new.sentence.checkb} +{ empty$ + swap$ empty$ + and + 'skip$ + 'new.sentence + if$ +} + +FUNCTION {field.or.null} +{ duplicate$ empty$ + { pop$ "" } + 'skip$ + if$ +} + +FUNCTION {emphasize} +{ duplicate$ empty$ + { pop$ "" } + { "{\em " swap$ * "}" * } + if$ +} + +INTEGERS { nameptr namesleft numnames } + +FUNCTION {format.names} +{ 's := + #1 'nameptr := + s num.names$ 'numnames := + numnames 'namesleft := + { namesleft #0 > } + { s nameptr "{f.~}{vv~}{ll}{, jj}" format.name$ 't := + nameptr #1 > + { namesleft #1 > + { ", " * t * } + { numnames #2 > + { "," * } + 'skip$ + if$ + t "others" = + { " et~al." * } + { " and " * t * } + if$ + } + if$ + } + 't + if$ + nameptr #1 + 'nameptr := + + namesleft #1 - 'namesleft := + } + while$ +} + +FUNCTION {format.authors} +{ author empty$ + { "" } + { author format.names } + if$ +} + +FUNCTION {format.editors} +{ editor empty$ + { "" } + { editor format.names + editor num.names$ #1 > + { ", editors" * } + { ", editor" * } + if$ + } + if$ +} + +FUNCTION {format.title} +{ title empty$ + { "" } + { title "t" change.case$ } + if$ +} + +FUNCTION {n.dashify} +{ 't := + "" + { t empty$ not } + { t #1 #1 substring$ "-" = + { t #1 #2 substring$ "--" = not + { "--" * + t #2 global.max$ substring$ 't := + } + { { t #1 #1 substring$ "-" = } + { "-" * + t #2 global.max$ substring$ 't := + } + while$ + } + if$ + } + { t #1 #1 substring$ * + t #2 global.max$ substring$ 't := + } + if$ + } + while$ +} + +FUNCTION {format.date} +{ year empty$ + { month empty$ + { "" } + { "there's a month but no year in " cite$ * warning$ + month + } + if$ + } + { month empty$ + 'year + { month " " * year * } + if$ + } + if$ +} + +FUNCTION {format.btitle} +{ title emphasize +} + +FUNCTION {tie.or.space.connect} +{ duplicate$ text.length$ #3 < + { "~" } + { " " } + if$ + swap$ * * +} + +FUNCTION {either.or.check} +{ empty$ + 'pop$ + { "can't use both " swap$ * " fields in " * cite$ * warning$ } + if$ +} + +FUNCTION {format.bvolume} +{ volume empty$ + { "" } + { "volume" volume tie.or.space.connect + series empty$ + 'skip$ + { " of " * series emphasize * } + if$ + "volume and number" number either.or.check + } + if$ +} + +FUNCTION {format.number.series} +{ volume empty$ + { number empty$ + { series field.or.null } + { output.state mid.sentence = + { "number" } + { "Number" } + if$ + number tie.or.space.connect + series empty$ + { "there's a number but no series in " cite$ * warning$ } + { " in " * series * } + if$ + } + if$ + } + { "" } + if$ +} + +FUNCTION {format.edition} +{ edition empty$ + { "" } + { output.state mid.sentence = + { edition "l" change.case$ " edition" * } + { edition "t" change.case$ " edition" * } + if$ + } + if$ +} + +INTEGERS { multiresult } + +FUNCTION {multi.page.check} +{ 't := + #0 'multiresult := + { multiresult not + t empty$ not + and + } + { t #1 #1 substring$ + duplicate$ "-" = + swap$ duplicate$ "," = + swap$ "+" = + or or + { #1 'multiresult := } + { t #2 global.max$ substring$ 't := } + if$ + } + while$ + multiresult +} + +FUNCTION {format.pages} +{ pages empty$ + { "" } + { pages multi.page.check + { "pages" pages n.dashify tie.or.space.connect } + { "page" pages tie.or.space.connect } + if$ + } + if$ +} + +FUNCTION {format.vol.num.pages} +{ volume field.or.null + number empty$ + 'skip$ + { "(" number * ")" * * + volume empty$ + { "there's a number but no volume in " cite$ * warning$ } + 'skip$ + if$ + } + if$ + pages empty$ + 'skip$ + { duplicate$ empty$ + { pop$ format.pages } + { ":" * pages n.dashify * } + if$ + } + if$ +} + +FUNCTION {format.chapter.pages} +{ chapter empty$ + 'format.pages + { type empty$ + { "chapter" } + { type "l" change.case$ } + if$ + chapter tie.or.space.connect + pages empty$ + 'skip$ + { ", " * format.pages * } + if$ + } + if$ +} + +FUNCTION {format.in.ed.booktitle} +{ booktitle empty$ + { "" } + { editor empty$ + { "In " booktitle emphasize * } + { "In " format.editors * ", " * booktitle emphasize * } + if$ + } + if$ +} + +FUNCTION {empty.misc.check} + +{ author empty$ title empty$ howpublished empty$ + month empty$ year empty$ note empty$ + and and and and and + key empty$ not and + { "all relevant fields are empty in " cite$ * warning$ } + 'skip$ + if$ +} + +FUNCTION {format.thesis.type} +{ type empty$ + 'skip$ + { pop$ + type "t" change.case$ + } + if$ +} + +FUNCTION {format.tr.number} +{ type empty$ + { "Technical Report" } + 'type + if$ + number empty$ + { "t" change.case$ } + { number tie.or.space.connect } + if$ +} + +FUNCTION {format.article.crossref} +{ key empty$ + { journal empty$ + { "need key or journal for " cite$ * " to crossref " * crossref * + warning$ + "" + } + { "In {\em " journal * "\/}" * } + if$ + } + { "In " key * } + if$ + " \cite{" * crossref * "}" * +} + +FUNCTION {format.crossref.editor} +{ editor #1 "{vv~}{ll}" format.name$ + editor num.names$ duplicate$ + #2 > + { pop$ " et~al." * } + { #2 < + 'skip$ + { editor #2 "{ff }{vv }{ll}{ jj}" format.name$ "others" = + { " et~al." * } + { " and " * editor #2 "{vv~}{ll}" format.name$ * } + if$ + } + if$ + } + if$ +} + +FUNCTION {format.book.crossref} +{ volume empty$ + { "empty volume in " cite$ * "'s crossref of " * crossref * warning$ + "In " + } + { "Volume" volume tie.or.space.connect + " of " * + } + if$ + editor empty$ + editor field.or.null author field.or.null = + or + { key empty$ + { series empty$ + { "need editor, key, or series for " cite$ * " to crossref " * + crossref * warning$ + "" * + } + { "{\em " * series * "\/}" * } + if$ + } + { key * } + if$ + } + { format.crossref.editor * } + if$ + " \cite{" * crossref * "}" * +} + +FUNCTION {format.incoll.inproc.crossref} +{ editor empty$ + editor field.or.null author field.or.null = + or + { key empty$ + { booktitle empty$ + { "need editor, key, or booktitle for " cite$ * " to crossref " * + crossref * warning$ + "" + } + { "In {\em " booktitle * "\/}" * } + if$ + } + { "In " key * } + if$ + } + { "In " format.crossref.editor * } + if$ + " \cite{" * crossref * "}" * +} + +FUNCTION {article} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + crossref missing$ + { journal emphasize "journal" output.check + format.vol.num.pages output + format.date "year" output.check + } + { format.article.crossref output.nonnull + format.pages output + } + if$ + new.block + note output + fin.entry +} + +FUNCTION {book} +{ output.bibitem + author empty$ + { format.editors "author and editor" output.check } + { format.authors output.nonnull + crossref missing$ + { "author and editor" editor either.or.check } + 'skip$ + if$ + } + if$ + new.block + format.btitle "title" output.check + crossref missing$ + { format.bvolume output + new.block + format.number.series output + new.sentence + publisher "publisher" output.check + address output + } + { new.block + format.book.crossref output.nonnull + } + if$ + format.edition output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {booklet} +{ output.bibitem + format.authors output + new.block + format.title "title" output.check + howpublished address new.block.checkb + howpublished output + address output + format.date output + new.block + note output + fin.entry +} + +FUNCTION {inbook} +{ output.bibitem + author empty$ + { format.editors "author and editor" output.check } + { format.authors output.nonnull + + crossref missing$ + { "author and editor" editor either.or.check } + 'skip$ + if$ + } + if$ + new.block + format.btitle "title" output.check + crossref missing$ + { format.bvolume output + format.chapter.pages "chapter and pages" output.check + new.block + format.number.series output + new.sentence + publisher "publisher" output.check + address output + } + { format.chapter.pages "chapter and pages" output.check + new.block + format.book.crossref output.nonnull + } + if$ + format.edition output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {incollection} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + crossref missing$ + { format.in.ed.booktitle "booktitle" output.check + format.bvolume output + format.number.series output + format.chapter.pages output + new.sentence + publisher "publisher" output.check + address output + format.edition output + format.date "year" output.check + } + { format.incoll.inproc.crossref output.nonnull + format.chapter.pages output + } + if$ + new.block + note output + fin.entry +} + +FUNCTION {inproceedings} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + crossref missing$ + { format.in.ed.booktitle "booktitle" output.check + format.bvolume output + format.number.series output + format.pages output + address empty$ + { organization publisher new.sentence.checkb + organization output + publisher output + format.date "year" output.check + } + { address output.nonnull + format.date "year" output.check + new.sentence + organization output + publisher output + } + if$ + } + { format.incoll.inproc.crossref output.nonnull + format.pages output + } + if$ + new.block + note output + fin.entry +} + +FUNCTION {conference} { inproceedings } + +FUNCTION {manual} +{ output.bibitem + author empty$ + { organization empty$ + 'skip$ + { organization output.nonnull + address output + } + if$ + } + { format.authors output.nonnull } + if$ + new.block + format.btitle "title" output.check + author empty$ + { organization empty$ + { address new.block.checka + address output + } + 'skip$ + if$ + } + { organization address new.block.checkb + organization output + address output + } + if$ + format.edition output + format.date output + new.block + note output + fin.entry +} + +FUNCTION {mastersthesis} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + "Master's thesis" format.thesis.type output.nonnull + school "school" output.check + address output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {misc} +{ output.bibitem + format.authors output + title howpublished new.block.checkb + format.title output + howpublished new.block.checka + howpublished output + format.date output + new.block + note output + fin.entry + empty.misc.check +} + +FUNCTION {phdthesis} +{ output.bibitem + format.authors "author" output.check + new.block + format.btitle "title" output.check + new.block + "PhD thesis" format.thesis.type output.nonnull + school "school" output.check + address output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {proceedings} +{ output.bibitem + editor empty$ + { organization output } + { format.editors output.nonnull } + + if$ + new.block + format.btitle "title" output.check + format.bvolume output + format.number.series output + address empty$ + { editor empty$ + { publisher new.sentence.checka } + { organization publisher new.sentence.checkb + organization output + } + if$ + publisher output + format.date "year" output.check + } + { address output.nonnull + format.date "year" output.check + new.sentence + editor empty$ + 'skip$ + { organization output } + if$ + publisher output + } + if$ + new.block + note output + fin.entry +} + +FUNCTION {techreport} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + format.tr.number output.nonnull + institution "institution" output.check + address output + format.date "year" output.check + new.block + note output + fin.entry +} + +FUNCTION {unpublished} +{ output.bibitem + format.authors "author" output.check + new.block + format.title "title" output.check + new.block + note "note" output.check + format.date output + fin.entry +} + +FUNCTION {default.type} { misc } + +MACRO {jan} {"Jan."} + +MACRO {feb} {"Feb."} + +MACRO {mar} {"Mar."} + +MACRO {apr} {"Apr."} + +MACRO {may} {"May"} + +MACRO {jun} {"June"} + +MACRO {jul} {"July"} + +MACRO {aug} {"Aug."} + +MACRO {sep} {"Sept."} + +MACRO {oct} {"Oct."} + +MACRO {nov} {"Nov."} + +MACRO {dec} {"Dec."} + +MACRO {acmcs} {"ACM Comput. Surv."} + +MACRO {acta} {"Acta Inf."} + +MACRO {cacm} {"Commun. ACM"} + +MACRO {ibmjrd} {"IBM J. Res. Dev."} + +MACRO {ibmsj} {"IBM Syst.~J."} + +MACRO {ieeese} {"IEEE Trans. Softw. Eng."} + +MACRO {ieeetc} {"IEEE Trans. Comput."} + +MACRO {ieeetcad} + {"IEEE Trans. Comput.-Aided Design Integrated Circuits"} + +MACRO {ipl} {"Inf. Process. Lett."} + +MACRO {jacm} {"J.~ACM"} + +MACRO {jcss} {"J.~Comput. Syst. Sci."} + +MACRO {scp} {"Sci. Comput. Programming"} + +MACRO {sicomp} {"SIAM J. Comput."} + +MACRO {tocs} {"ACM Trans. Comput. Syst."} + +MACRO {tods} {"ACM Trans. Database Syst."} + +MACRO {tog} {"ACM Trans. Gr."} + +MACRO {toms} {"ACM Trans. Math. Softw."} + +MACRO {toois} {"ACM Trans. Office Inf. Syst."} + +MACRO {toplas} {"ACM Trans. Prog. Lang. Syst."} + +MACRO {tcs} {"Theoretical Comput. Sci."} + +READ + +FUNCTION {sortify} +{ purify$ + "l" change.case$ +} + +INTEGERS { len } + +FUNCTION {chop.word} +{ 's := + 'len := + s #1 len substring$ = + { s len #1 + global.max$ substring$ } + 's + if$ +} + +FUNCTION {sort.format.names} +{ 's := + #1 'nameptr := + "" + s num.names$ 'numnames := + numnames 'namesleft := + { namesleft #0 > } + { nameptr #1 > + { " " * } + 'skip$ + if$ + s nameptr "{vv{ } }{ll{ }}{ f{ }}{ jj{ }}" format.name$ 't := + nameptr numnames = t "others" = and + { "et al" * } + { t sortify * } + if$ + nameptr #1 + 'nameptr := + namesleft #1 - 'namesleft := + } + while$ +} + +FUNCTION {sort.format.title} +{ 't := + "A " #2 + "An " #3 + "The " #4 t chop.word + chop.word + chop.word + sortify + #1 global.max$ substring$ +} + +FUNCTION {author.sort} +{ author empty$ + { key empty$ + { "to sort, need author or key in " cite$ * warning$ + "" + } + { key sortify } + if$ + } + { author sort.format.names } + if$ +} + +FUNCTION {author.editor.sort} +{ author empty$ + { editor empty$ + { key empty$ + { "to sort, need author, editor, or key in " cite$ * warning$ + "" + } + { key sortify } + if$ + } + { editor sort.format.names } + if$ + } + { author sort.format.names } + if$ +} + +FUNCTION {author.organization.sort} +{ author empty$ + + { organization empty$ + { key empty$ + { "to sort, need author, organization, or key in " cite$ * warning$ + "" + } + { key sortify } + if$ + } + { "The " #4 organization chop.word sortify } + if$ + } + { author sort.format.names } + if$ +} + +FUNCTION {editor.organization.sort} +{ editor empty$ + { organization empty$ + { key empty$ + { "to sort, need editor, organization, or key in " cite$ * warning$ + "" + } + { key sortify } + if$ + } + { "The " #4 organization chop.word sortify } + if$ + } + { editor sort.format.names } + if$ +} + +FUNCTION {presort} +{ type$ "book" = + type$ "inbook" = + or + 'author.editor.sort + { type$ "proceedings" = + 'editor.organization.sort + { type$ "manual" = + 'author.organization.sort + 'author.sort + if$ + } + if$ + } + if$ + " " + * + year field.or.null sortify + * + " " + * + title field.or.null + sort.format.title + * + #1 entry.max$ substring$ + 'sort.key$ := +} + +ITERATE {presort} + +SORT + +STRINGS { longest.label } + +INTEGERS { number.label longest.label.width } + +FUNCTION {initialize.longest.label} +{ "" 'longest.label := + #1 'number.label := + #0 'longest.label.width := +} + +FUNCTION {longest.label.pass} +{ number.label int.to.str$ 'label := + number.label #1 + 'number.label := + label width$ longest.label.width > + { label 'longest.label := + label width$ 'longest.label.width := + } + 'skip$ + if$ +} + +EXECUTE {initialize.longest.label} + +ITERATE {longest.label.pass} + +FUNCTION {begin.bib} +{ preamble$ empty$ + 'skip$ + { preamble$ write$ newline$ } + if$ + "\begin{thebibliography}{" longest.label * + "}\setlength{\itemsep}{-1ex}\small" * write$ newline$ +} + +EXECUTE {begin.bib} + +EXECUTE {init.state.consts} + +ITERATE {call.type$} + +FUNCTION {end.bib} +{ newline$ + "\end{thebibliography}" write$ newline$ +} + +EXECUTE {end.bib} + +% end of file latex8.bst +% --------------------------------------------------------------- + + + diff --git a/spec/consensus/consensus-paper/latex8.sty b/spec/consensus/consensus-paper/latex8.sty new file mode 100644 index 0000000000..1e6b0dc7e6 --- /dev/null +++ b/spec/consensus/consensus-paper/latex8.sty @@ -0,0 +1,168 @@ +% --------------------------------------------------------------- +% +% $Id: latex8.sty,v 1.2 1995/09/15 15:31:13 ienne Exp $ +% +% by Paolo.Ienne@di.epfl.ch +% +% --------------------------------------------------------------- +% +% no guarantee is given that the format corresponds perfectly to +% IEEE 8.5" x 11" Proceedings, but most features should be ok. +% +% --------------------------------------------------------------- +% with LaTeX2e: +% ============= +% +% use as +% \documentclass[times,10pt,twocolumn]{article} +% \usepackage{latex8} +% \usepackage{times} +% +% --------------------------------------------------------------- + +% with LaTeX 2.09: +% ================ +% +% use as +% \documentstyle[times,art10,twocolumn,latex8]{article} +% +% --------------------------------------------------------------- +% with both versions: +% =================== +% +% specify \pagestyle{empty} to omit page numbers in the final +% version +% +% specify references as +% \bibliographystyle{latex8} +% \bibliography{...your files...} +% +% use Section{} and SubSection{} instead of standard section{} +% and subsection{} to obtain headings in the form +% "1.3. My heading" +% +% --------------------------------------------------------------- + +\typeout{IEEE 8.5 x 11-Inch Proceedings Style `latex8.sty'.} + +% ten point helvetica bold required for captions +% in some sites the name of the helvetica bold font may differ, +% change the name here: +\font\tenhv = phvb at 10pt +%\font\tenhv = phvb7t at 10pt + +% eleven point times bold required for second-order headings +% \font\elvbf = cmbx10 scaled 1100 +\font\elvbf = ptmb scaled 1100 + +% set dimensions of columns, gap between columns, and paragraph indent +\setlength{\textheight}{8.875in} +\setlength{\textwidth}{6.875in} +\setlength{\columnsep}{0.3125in} +\setlength{\topmargin}{0in} +\setlength{\headheight}{0in} +\setlength{\headsep}{0in} +\setlength{\parindent}{1pc} +\setlength{\oddsidemargin}{-.304in} +\setlength{\evensidemargin}{-.304in} + +% memento from size10.clo +% \normalsize{\@setfontsize\normalsize\@xpt\@xiipt} +% \small{\@setfontsize\small\@ixpt{11}} +% \footnotesize{\@setfontsize\footnotesize\@viiipt{9.5}} +% \scriptsize{\@setfontsize\scriptsize\@viipt\@viiipt} +% \tiny{\@setfontsize\tiny\@vpt\@vipt} +% \large{\@setfontsize\large\@xiipt{14}} +% \Large{\@setfontsize\Large\@xivpt{18}} +% \LARGE{\@setfontsize\LARGE\@xviipt{22}} +% \huge{\@setfontsize\huge\@xxpt{25}} +% \Huge{\@setfontsize\Huge\@xxvpt{30}} + +\def\@maketitle + { + \newpage + \null + \vskip .375in + \begin{center} + {\Large \bf \@title \par} + % additional two empty lines at the end of the title + \vspace*{24pt} + { + \large + \lineskip .5em + \begin{tabular}[t]{c} + \@author + \end{tabular} + \par + } + % additional small space at the end of the author name + \vskip .5em + { + \large + \begin{tabular}[t]{c} + \@affiliation + \end{tabular} + \par + \ifx \@empty \@email + \else + \begin{tabular}{r@{~}l} + E-mail: & {\tt \@email} + \end{tabular} + \par + \fi + } + % additional empty line at the end of the title block + \vspace*{12pt} + \end{center} + } + +\def\abstract + {% + \centerline{\large\bf Abstract}% + \vspace*{12pt}% + \it% + } + +\def\endabstract + { + % additional empty line at the end of the abstract + \vspace*{12pt} + } + +\def\affiliation#1{\gdef\@affiliation{#1}} \gdef\@affiliation{} + +\def\email#1{\gdef\@email{#1}} +\gdef\@email{} + +\newlength{\@ctmp} +\newlength{\@figindent} +\setlength{\@figindent}{1pc} + +\long\def\@makecaption#1#2{ + \vskip 10pt + \setbox\@tempboxa\hbox{\tenhv\noindent #1.~#2} + \setlength{\@ctmp}{\hsize} + \addtolength{\@ctmp}{-\@figindent}\addtolength{\@ctmp}{-\@figindent} + % IF longer than one indented paragraph line + \ifdim \wd\@tempboxa >\@ctmp + % THEN set as an indented paragraph + \begin{list}{}{\leftmargin\@figindent \rightmargin\leftmargin} + \item[]\tenhv #1.~#2\par + \end{list} + \else + % ELSE center + \hbox to\hsize{\hfil\box\@tempboxa\hfil} + \fi} + +% correct heading spacing and type +\def\section{\@startsection {section}{1}{\z@} + {14pt plus 2pt minus 2pt}{14pt plus 2pt minus 2pt} {\large\bf}} +\def\subsection{\@startsection {subsection}{2}{\z@} + {13pt plus 2pt minus 2pt}{13pt plus 2pt minus 2pt} {\elvbf}} + +% add the period after section numbers +\newcommand{\Section}[1]{\section{\hskip -1em.~#1}} +\newcommand{\SubSection}[1]{\subsection{\hskip -1em.~#1}} + +% end of file latex8.sty +% --------------------------------------------------------------- diff --git a/spec/consensus/consensus-paper/lit.bib b/spec/consensus/consensus-paper/lit.bib new file mode 100644 index 0000000000..4abc83e70c --- /dev/null +++ b/spec/consensus/consensus-paper/lit.bib @@ -0,0 +1,1659 @@ +%--- conferences -------------------------------------------------- +@STRING{WDAG96 = "Proceedings of the 10th International Workshop + on Distributed Algorithms (WDAG'96)"} +@STRING{WDAG97 = "Proceedings of the 11th International Workshop + on Distributed Algorithms (WDAG'97)"} +@STRING{DISC98 = "Proceedings of the 12th International Conference + on Distributed Computing ({DISC}'98)"} +@STRING{DISC99 = "Proceedings of the 13th International Conference + on Distributed Computing ({DISC}'99)"} +@STRING{DISC98 = "Proceedings of the 13th International Conference + on Distributed Computing ({DISC}'98)"} +@STRING{DISC99 = "Proceedings of the 13th International Conference + on Distributed Computing ({DISC}'99)"} +@STRING{DISC00 = "Proceedings of the 14th International Conference + on Distributed Computing ({DISC}'00)"} +@STRING{DISC01 = "Proceedings of the 15th International Conference + on Distributed Computing ({DISC}'01)"} +@STRING{DISC02 = "Proceedings of the 16th International Conference + on Distributed Computing ({DISC}'02)"} +@STRING{DISC03 = "Proceedings of the 17th International Conference + on Distributed Computing ({DISC}'03)"} +@STRING{DISC04 = "Proceedings of the 18th International Conference + on Distributed Computing ({DISC}'04)"} +@STRING{DISC05 = "Proceedings of the 19th International Conference + on Distributed Computing ({DISC}'05)"} +@STRING{PODC83 = "Proceeding of the 1st Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'83)"} +@STRING{PODC91 = "Proceeding of the 9th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'91)"} +@STRING{PODC94 = "Proceeding of the 12th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'94)"} +@STRING{PODC95 = "Proceeding of the 13th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'95)"} +@STRING{PODC96 = "Proceeding of the 14th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'96)"} +@STRING{PODC97 = "Proceeding of the 15th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'97)"} +@STRING{PODC98 = "Proceeding of the 16th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'98)"} +@STRING{PODC99 = "Proceeding of the 17th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'99)"} +@STRING{PODC00 = "Proceeding of the 18th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'00)"} +@STRING{PODC01 = "Proceeding of the 19th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'01)"} +@STRING{PODC02 = "Proceeding of the 20th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'02)"} +@STRING{PODC03 = "Proceeding of the 21st Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'03)"} +@STRING{PODC03 = "Proceeding of the 22nd Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'03)"} +@STRING{PODC04 = "Proceeding of the 23rd Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'04)"} +@STRING{PODC05 = "Proceeding of the 24th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'05)"} +@STRING{PODC06 = "Proceedings of the 25th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'06)"} +@STRING{PODC07 = "Proceedings of the 26th Annual {ACM} Symposium on + Principles of Distributed Computing ({PODC}'07)"} +@STRING{STOC91 = "Proceedings of the 23rd Annual {ACM} Symposium on + Theory of Computing ({STOC}'91)"} +@STRING{WSS01 = "Proceedings of the 5th International Workshop on + Self-Stabilizing Systems ({WSS} '01)"} +@STRING{SSS06 = "Proceedings of the 8th International Symposium on + Stabilization, Safety, and Security of Distributed + Systems ({SSS} '06)"} +@STRING{DSN00 = "Dependable Systems and Networks ({DSN} 2000)"} +@STRING{DSN05 = "Dependable Systems and Networks ({DSN} 2005)"} +@STRING{DSN06 = "Dependable Systems and Networks ({DSN} 2006)"} +@STRING{DSN07 = "Dependable Systems and Networks ({DSN} 2007)"} + +%--- journals ----------------------------------------------------- +@STRING{PPL = "Parallel Processing Letters"} +@STRING{IPL = "Information Processing Letters"} +@STRING{DC = "Distributed Computing"} +@STRING{JACM = "Journal of the ACM"} +@STRING{IC = "Information and Control"} +@STRING{TCS = "Theoretical Computer Science"} +@STRING{ACMTCS = "ACM Transactions on Computer Systems"} +@STRING{TDSC = "Transactions on Dependable and Secure Computing"} +@STRING{TPLS = "ACM Trans. Program. Lang. Syst."} + +%--- publisher ---------------------------------------------------- +@STRING{ACM = "ACM Press"} +@STRING{IEEE = "IEEE"} +@STRING{SPR = "Springer-Verlag"} + +%--- institution -------------------------------------------------- +@STRING{TUAuto = {Technische Universit\"at Wien, Department of + Automation}} +@STRING{TUECS = {Technische Universit\"at Wien, Embedded Computing + Systems Group}} + + +%------------------------------------------------------------------ +@article{ABND+90:jacm, + author = {Hagit Attiya and Amotz Bar-Noy and Danny Dolev and + David Peleg and R{\"u}diger Reischuk}, + title = {Renaming in an asynchronous environment}, + journal = JACM, + volume = {37}, + number = {3}, + year = {1990}, + pages = {524--548}, + publisher = ACM, + address = {New York, NY, USA}, +} + +@article{ABND95:jacm, + author = {Hagit Attiya and Amotz Bar-Noy and Danny Dolev}, + title = {Sharing memory robustly in message-passing systems}, + journal = JACM, + volume = {42}, + number = {1}, + year = {1995}, + pages = {124--142}, + publisher = ACM, + address = {New York, NY, USA}, +} + +@inproceedings{ACKM04:podc, + author = {Ittai Abraham and Gregory Chockler and Idit Keidar + and Dahlia Malkhi}, + title = {Byzantine disk paxos: optimal resilience with + byzantine shared memory.}, + booktitle = PODC04, + year = {2004}, + pages = {226-235} +} + +@article{ACKM05:dc, + author = {Ittai Abraham and Gregory Chockler and Idit Keidar + and Dahlia Malkhi}, + title = {Byzantine disk paxos: optimal resilience with + byzantine shared memory.}, + journal = DC, + volume = {18}, + number = {5}, + year = {2006}, + pages = {387-408} +} + +@article{ACT00:dc, + author = "Marcos Kawazoe Aguilera and Wei Chen and Sam Toueg", + title = "Failure Detection and Consensus in the + Crash-Recovery Model", + journal = DC, + year = 2000, + month = apr, + volume = 13, + number = 2, + pages = "99--125", + url = + "http://www.cs.cornell.edu/home/sam/FDpapers/crash-recovery-finaldcversion.ps" +} + +@article{ACT00:siam, + author = "Marcos Kawazoe Aguilera and Wei Chen and Sam Toueg", + title = "On quiescent reliable communication", + journal = "SIAM Journal of Computing", + year = 2000, + volume = 29, + number = 6, + pages = "2040--2073", + month = apr +} + +@inproceedings{ACT97:wdag, + author = "Marcos Kawazoe Aguilera and Wei Chen and Sam Toueg", + title = "Heartbeat: A Timeout-Free Failure Detector for + Quiescent Reliable Communication", + booktitle = WDAG97, + year = 1997, + pages = "126--140", + url = + "http://simon.cs.cornell.edu/Info/People/weichen/research/mypapers/wdag97final.ps" +} + +@article{ACT98:disc, + author = "Marcos Kawazoe Aguilera and Wei Chen and Sam Toueg", + title = "Failure Detection and Consensus in the + Crash-Recovery Model", + journal = DISC98, + year = 1998, + pages = "231--245", + publisher = SPR +} + +@article{ACT99:tcs, + author = "Marcos Kawazoe Aguilera and Wei Chen and Sam Toueg", + title = "Using the Heartbeat Failure Detector for Quiescent + Reliable Communication and Consensus in + Partitionable Networks", + journal = "Theoretical Computer Science", + year = 1999, + month = jun, + volume = 220, + number = 1, + pages = "3--30", + url = + "http://www.cs.cornell.edu/home/sam/FDpapers/TCS98final.ps" +} + +@inproceedings{ADGF+04:ispdc, + author = {Anceaume, Emmanuelle and Delporte-Gallet, Carole and + Fauconnier, Hugues and Hurfin, Michel and Le Lann, + G{\'e}rard }, + title = {Designing Modular Services in the Scattered + Byzantine Failure Model.}, + booktitle = {ISPDC/HeteroPar}, + year = {2004}, + pages = {262-269} +} + +@inproceedings{ADGF+06:dsn, + author = {Marcos Kawazoe Aguilera and Carole Delporte-Gallet + and Hugues Fauconnier and Sam Toueg}, + title = {Consensus with Byzantine Failures and Little System + Synchrony.}, + booktitle = DSN06, + year = {2006}, + pages = {147-155} +} + +@inproceedings{ADGFT01:disc, + author = "Marcos Kawazoe Aguilera and Carole Delporte-Gallet + and Hugues Fauconnier and Sam Toueg", + title = "Stable Leader Election", + booktitle = DISC01, + year = 2001, + pages = "108--122", + publisher = SPR +} + +@inproceedings{ADGFT03:podc, + author = "Marcos K. Aguilera and Carole Delporte-Gallet and + Hugues Fauconnier and Sam Toueg", + title = "On implementing {O}mega with weak reliability and + synchrony assumptions", + booktitle = PODC03, + year = 2003, + publisher = ACM +} + +@inproceedings{ADGFT04:podc, + author = {Marcos K. Aguilera and Carole Delporte-Gallet and + Hugues Fauconnier and Sam Toueg}, + title = {Communication-efficient leader election and + consensus with limited link synchrony}, + booktitle = PODC04, + year = 2004, + pages = {328--337}, + address = {St. John's, Newfoundland, Canada}, + publisher = ACM +} + +@inproceedings{ADGFT06:dsn, + author = {Marcos Kawazoe Aguilera and Carole Delporte-Gallet + and Hugues Fauconnier and Sam Toueg}, + title = {Consensus with Byzantine Failures and Little System + Synchrony.}, + booktitle = DSN06, + year = 2006, + pages = {147-155}, + ee = + {http://doi.ieeecomputersociety.org/10.1109/DSN.2006.22}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + +@inproceedings{ADLS91:stoc, + author = "Hagit Attiya and Cynthia Dwork and Nancy A. Lynch + and Larry J. Stockmeyer", + title = "Bounds on the Time to Reach Agreement in the + Presence of Timing Uncertainty", + booktitle = STOC91, + year = 1991, + pages = "359--369", +} + +@article{AT99:ipl, + author = "Marcos Kawazoe Aguilera and Sam Toueg", + title = "A Simple Bivalency Proof that t -Resilient Consensus + Requires t + 1 Rounds", + journal = IPL, + volume = "71", + number = "3-4", + pages = "155--158", + year = "1999" +} + +@Book{AW04:book, + author = {Attiya, Hagit and Welch, Jennifer}, + title = {Distributed Computing}, + publisher = {John Wiley {\&} Sons}, + edition = {2nd}, + year = {2004} +} + +@Book{AW98:book, + author = {Hagit Attiya and Jennifer Welch}, + title = {Distributed Computing}, + publisher = {McGraw-Hill Publishing Company}, + year = {1998} +} + +@InBook{AW98:book:chap12, + author = {Hagit Attiya and Jennifer Welch}, + title = {Distributed Computing}, + publisher = {McGraw-Hill Publishing Company}, + year = {1998}, + chapter = {12, "Improving the fault-tolerance of algorithms"} +} + +@inproceedings{ABHMS11:disc, + author = {Hagit Attiya and + Fatemeh Borran and + Martin Hutle and + Zarko Milosevic and + Andr{\'e} Schiper}, + title = {Structured Derivation of Semi-Synchronous Algorithms}, + booktitle = {DISC}, + year = {2011}, + pages = {374-388} +} + +@inproceedings{BCBG+07:podc, + author = {Martin Biely and Bernadette Charron-Bost and Antoine + Gaillard and Martin Hutle and Andr{\'e} Schiper and + Josef Widder}, + title = {Tolerating Corrupted Communication}, + publisher = ACM, + booktitle = PODC07, + year = {2007} +} + +@InProceedings{BCBT96:wdag, + author = {Anindya Basu and Bernadette Charron-Bost and Sam + Toueg}, + title = {Simulating Reliable Links with Unreliable Links in + the Presence of Process Crashes}, + pages = {105--122}, + booktitle = {WDAG 1996}, + editor = {Babao{\u g}lu, {\"O}zalp}, + year = {1996}, + month = {Oct}, + volume = {1151}, + ISBN = {3-540-61769-8}, + pubisher = {Springer}, + series = {Lecture Notes in Computer Science}, +} + +@article{BDFG03:sigact, + author = "R. Boichat and P. Dutta and S. Frolund and + R. Guerraoui", + title = "Reconstructing {P}axos", + journal = "ACM SIGACT News", + year = "2003", + volume = "34", + number = "1", + pages = "47-67" +} + +@unpublished{BHR+06:note, + author = "Martin Biely and Martin Hutle and Sergio Rajsbaum + and Ulrich Schmid and Corentin Travers and Josef + Widder", + title = "Discussion note on moving timely links", + note = "Unpublished", + month = apr, + year = 2006 +} + +@article{BHRT03:jda, + author = {Roberto Baldoni and Jean-Michel H{\'e}lary and + Michel Raynal and L{\'e}naick Tanguy}, + title = {Consensus in Byzantine asynchronous systems.}, + journal = {J. Discrete Algorithms}, + volume = {1}, + number = {2}, + year = {2003}, + pages = {185-210}, + ee = {http://dx.doi.org/10.1016/S1570-8667(03)00025-X}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + +@unpublished{BHSS08:tdsc, + author = {Fatemeh Borran and Martin Hutle and Nuno Santos and + Andr{\'e} Schiper}, + title = {Solving Consensus with Communication Predicates: + A~Quantitative Approach}, + note = {Under submission}, + year = {2008} +} + +@inproceedings{Ben83:podc, + author = {Michael Ben-Or}, + title = {Another Advantage of Free Choice: Completely + Asynchronous Agreement Protocols}, + booktitle = {PODC}, + year = {1983}, +} + +@inproceedings{Bra04:podc, + author = {Bracha, Gabriel}, + title = {An asynchronous [(n - 1)/3]-resilient consensus protocol}, + booktitle = {PODC '84: Proceedings of the third annual ACM symposium on Principles of distributed computing}, + year = {1984}, + isbn = {0-89791-143-1}, + pages = {154--162}, + location = {Vancouver, British Columbia, Canada}, + doi = {http://doi.acm.org/10.1145/800222.806743}, + publisher = {ACM}, + address = {New York, NY, USA}, + } + + +@inproceedings{CBGS00:dsn, + author = "Bernadette Charron-Bost and Rachid Guerraoui and + Andr{\'{e}} Schiper", + title = "Synchronous System and Perfect Failure Detector: + {S}olvability and efficiency issues", + booktitle = DSN00, + publisher = "{IEEE} Computer Society", + address = "New York, {USA}", + pages = "523--532", + year = "2000" +} + +@inproceedings{CBS06:prdc, + author = {Bernadette Charron-Bost and Andr{\'e} Schiper}, + title = {Improving Fast Paxos: being optimistic with no + overhead}, + booktitle = {Pacific Rim Dependable Computing, Proceedings}, + year = {2006} +} + +@article{CBS09, + author = {B. Charron-Bost and A. Schiper}, + title = {The {H}eard-{O}f model: computing in distributed systems with benign failures}, + journal ={Distributed Computing}, + number = {1}, + volume = {22}, + pages = {49-71}, + year ={2009} + } + + +@article{CBS07:sigact, + author = {Bernadette Charron-Bost and Andr\'{e} Schiper}, + title = {Harmful dogmas in fault tolerant distributed + computing}, + journal = {SIGACT News}, + volume = {38}, + number = {1}, + year = {2007}, + pages = {53--61}, +} + +@techreport{CBS07:tr, + author = {Charron-Bost, Bernadette and Schiper, Andr{\'{e}}}, + title = {The Heard-Of Model: Unifying all Benign Failures}, + institution = {EPFL}, + year = 2007, + OPTnumber = {LSR-REPORT-2006-004} +} + +@article{CELT00:jacm, + author = {Soma Chaudhuri and Maurice Erlihy and Nancy A. Lynch + and Mark R. Tuttle}, + title = {Tight bounds for k-set agreement}, + journal = JACM, + volume = {47}, + number = {5}, + year = {2000}, + pages = {912--943}, + publisher = ACM, + address = {New York, NY, USA}, +} + +@article{CF99:tpds, + author = "Flaviu Cristian and Christof Fetzer", + title = "The Timed Asynchronous Distributed System Model", + journal = "IEEE Transactions on Parallel and Distributed + Systems", + volume = "10", + number = "6", + pages = "642--657", + year = "1999" +} + +@article{CHT96:jacm, + author = "Tushar Deepak Chandra and Vassos Hadzilacos and Sam + Toueg", + title = "The Weakest Failure Detector for Solving Consensus", + journal = {JACM}, + year = {1996}, +} + +@article{CL02:tcs, + author = {Miguel Castro and Barbara Liskov}, + title = {Practical byzantine fault tolerance and proactive + recovery}, + journal = {ACMTCS}, + year = {2002}, +} + +@inproceedings{CL99:osdi, + author = {Miguel Castro and Barbara Liskov}, + title = {Practical byzantine fault tolerance and proactive + recovery}, + booktitle = {Proceedings of the 3rd Symposium on Operating + Systems Design and Implementation}, + year = {1999}, + month = feb +} + +@inproceedings{CT91:podc, + author = {Tushar Deepak Chandra and Sam Toueg}, + title = {Unreliable Failure Detectors for Asynchronous + Systems (Preliminary Version)}, + booktitle = PODC91, + year = {1991}, + pages = {325-340} +} + +@article{CT96:jacm1, + author = "Tushar Deepak Chandra and Sam Toueg", + title = "Unreliable Failure Detectors for Reliable + Distributed Systems", + journal = {JACM}, + year = {1996}, +} + +@inproceedings{CTA00:dsn, + author = "Wei Chen and Sam Toueg and Marcos Kawazoe Aguilera", + title = "On the Quality of Service of Failure Detectors", + booktitle = "Proceedings IEEE International Conference on + Dependable Systems and Networks (DSN / FTCS'30)", + address = "New York City, USA", + year = 2000 +} + +@TechReport{DFKM96:tr, + author = {Danny Dolev and Roy Friedman and Idit Keidar and + Dahlia Malkhi}, + title = {Failure detectors in omission failure environments}, + institution = {Department of Computer Science, Cornell University}, + year = {1996}, + type = {Technical Report}, + number = {96-1608} +} + +@inproceedings{DG02:podc, + author = {Partha Dutta and Rachid Guerraoui}, + title = {The inherent price of indulgence}, + booktitle = PODC02, + year = 2002, + pages = {88--97}, + location = {Monterey, California}, + publisher = ACM, + address = {New York, NY, USA}, +} + +@inproceedings{DGFG+04:podc, + author = {Carole Delporte-Gallet and Hugues Fauconnier and + Rachid Guerraoui and Vassos Hadzilacos and Petr + Kouznetsov and Sam Toueg}, + title = {The weakest failure detectors to solve certain + fundamental problems in distributed computing}, + booktitle = PODC04, + year = 2004, + pages = {338--346}, + location = {St. John's, Newfoundland, Canada}, + publisher = ACM, + address = {New York, NY, USA} +} + +@inproceedings{DGL05:dsn, + author = {Partha Dutta and Rachid Guerraoui and Leslie + Lamport}, + title = {How Fast Can Eventual Synchrony Lead to Consensus?}, + booktitle = {Proceedings of the 2005 International Conference on + Dependable Systems and Networks (DSN'05)}, + pages = {22--27}, + year = {2005}, + address = {Los Alamitos, CA, USA} +} + +@article{DLS88:jacm, + author = "Cynthia Dwork and Nancy Lynch and Larry Stockmeyer", + title = "Consensus in the Presence of Partial Synchrony", + journal = {JACM}, + year = {1988}, +} + +@article{DPLL00:tcs, + author = "De Prisco, Roberto and Butler Lampson and Nancy + Lynch", + title = "Revisiting the {PAXOS} algorithm", + journal = TCS, + volume = "243", + number = "1--2", + pages = "35--91", + year = "2000" +} + +@techreport{DS97:tr, + author = {A. Doudou and A. Schiper}, + title = {Muteness Failure Detectors for Consensus with + {B}yzantine Processes}, + institution = {EPFL, Dept d'Informatique}, + year = {1997}, + type = {TR}, + month = {October}, + number = {97/230}, +} + +@inproceedings{DS98:podc, + author = {A. Doudou and A. Schiper}, + title = {Muteness Detectors for Consensus with {B}yzantine + Processes ({B}rief {A}nnouncement)}, + booktitle = {PODC}, + month = jul, + year = {1998} +} + +@article{DSU04:survey, + author = {D{\'e}fago, Xavier and Schiper, Andr{\'e} and Urb\'{a}n, P{\'e}ter}, + title = {Total order broadcast and multicast algorithms: Taxonomy and survey}, + journal = {ACM Comput. Surv.}, + issue_date = {December 2004}, + volume = {36}, + number = {4}, + month = dec, + year = {2004}, + issn = {0360-0300}, + pages = {372--421}, + numpages = {50}, + publisher = {ACM}, + address = {New York, NY, USA}, + keywords = {Distributed systems, agreement problems, atomic broadcast, atomic multicast, classification, distributed algorithms, fault-tolerance, global ordering, group communication, message passing, survey, taxonomy, total ordering}, +} + +@article{DeCandia07:dynamo, + author = {DeCandia, Giuseppe and Hastorun, Deniz and Jampani, Madan and Kakulapati, Gunavardhan and Lakshman, Avinash and Pilchin, Alex and Sivasubramanian, Swaminathan and Vosshall, Peter and Vogels, Werner}, + title = {Dynamo: amazon's highly available key-value store}, + journal = {SIGOPS Oper. Syst. Rev.}, + issue_date = {December 2007}, + volume = {41}, + number = {6}, + month = oct, + year = {2007}, + issn = {0163-5980}, + pages = {205--220}, + numpages = {16}, + publisher = {ACM}, + address = {New York, NY, USA}, + keywords = {performance, reliability, scalability}, +} + + +@book{Dol00:book, + author = {Shlomi Dolev}, + title = {Self-Stabilization}, + publisher = {The MIT Press}, + year = {2000} +} + +@inproceedings{FC95:podc, + author = "Christof Fetzer and Flaviu Cristian", + title = "Lower Bounds for Convergence Function Based Clock + Synchronization", + booktitle = PODC95, + year = 1995, + pages = "137--143" +} + +@article{FLP85:jacm, + author = "Michael J. Fischer and Nancy A. Lynch and + M. S. Paterson", + title = "Impossibility of Distributed Consensus with one + Faulty Process", + journal = {JACM}, + year = {1985}, +} + +@article{FMR05:tdsc, + author = {Roy Friedman and Achour Most{\'e}faoui and Michel + Raynal}, + title = {Simple and Efficient Oracle-Based Consensus + Protocols for Asynchronous Byzantine Systems.}, + journal = TDSC, + volume = {2}, + number = {1}, + year = {2005}, + pages = {46-56}, + ee = {http://dx.doi.org/10.1109/TDSC.2005.13}, + bibsource = {DBLP, http://dblp.uni-trier.de} +} + +@inproceedings{FS04:podc, + author = "Christof Fetzer and Ulrich Schmid", + title = "Brief announcement: on the possibility of consensus + in asynchronous systems with finite average response + times.", + booktitle = PODC04, + year = 2004, + pages = 402 +} + +@InProceedings{GL00:disc, + author = {Eli Gafni and Lesli Lamport}, + title = {Disk Paxos}, + booktitle = DISC00, + pages = {330--344}, + year = {2000}, +} + +@Article{GL03:dc, + author = {Eli Gafni and Lesli Lamport}, + title = {Disk Paxos}, + journal = DC, + year = 2003, + volume = {16}, + number = {1}, + pages = {1--20} +} + +@inproceedings{GP01:wss, + author = "Felix C. G{\"a}rtner and Stefan Pleisch", + title = "({I}m)Possibilities of Predicate Detection in + Crash-Affected Systems", + booktitle = WSS01, + year = 2001, + pages = "98--113" +} + +@inproceedings{GP02:disc, + author = "Felix C. G{\"a}rtner and Stefan Pleisch", + title = "Failure Detection Sequencers: Necessary and + Sufficient Information about Failures to Solve + Predicate Detection", + booktitle = DISC02, + year = 2002, + pages = "280--294" +} + +@inproceedings{GS96:wdag, + author = {Rachid Guerraoui and Andr{\'e} Schiper}, + title = {{``Gamma-Accurate''} Failure Detectors}, + booktitle = WDAG96, + year = {1996}, + pages = {269--286}, + publisher = SPR, + address = {London, UK} +} + +@inproceedings{Gaf98:podc, + author = {Eli Gafni}, + title = {Round-by-round fault detectors (extended abstract): + unifying synchrony and asynchrony}, + booktitle = PODC98, + year = {1998}, + pages = {143--152}, + address = {Puerto Vallarta, Mexico}, + publisher = ACM +} + +@incollection{Gra78:book, + author = {Jim N. Gray}, + title = {Notes on data base operating systems}, + booktitle = {Operating Systems: An Advanced Course}, + chapter = {3.F}, + publisher = {Springer}, + year = {1978}, + editor = {R. Bayer, R.M. Graham, G. Seegm\"uller}, + volume = {60}, + series = {Lecture Notes in Computer Science}, + address = {New York}, + pages = {465}, +} + +@InProceedings{HMR98:srds, + author = {Hurfin, M. and Mostefaoui, A. and Raynal, M.}, + title = {Consensus in asynchronous systems where processes + can crash and recover}, + booktitle = {Seventeenth IEEE Symposium on Reliable Distributed + Systems, Proceedings. }, + pages = { 280--286}, + year = {1998}, + address = {West Lafayette, IN}, + month = oct, + organization = {IEEE} +} + +@inproceedings{HMSZ06:sss, + author = "Martin Hutle and Dahlia Malkhi and Ulrich Schmid and + Lidong Zhou", + title = "Brief Announcement: Chasing the Weakest System Model + for Implementing {$\Omega$} and Consensus", + booktitle = SSS06, + year = 2006 +} + +@incollection{HT93:ds, + author = {Hadzilacos, Vassos and Toueg, Sam}, + title = {Fault-tolerant broadcasts and related problems}, + booktitle = {Distributed systems (2nd Ed.)}, + editor = {Mullender, Sape}, + year = {1993}, + isbn = {0-201-62427-3}, + pages = {97--145}, + numpages = {49} +} + + +@inproceedings{HS06:opodis, + author = {Heinrich Moser and Ulrich Schmid}, + title = {Optimal Clock Synchronization Revisited: Upper and + Lower Bounds in Real-Time Systems}, + booktitle = { Principles of Distributed Systems}, + pages = {94--109}, + year = {2006}, + volume = {4305}, + series = {Lecture Notes in Computer Science}, + publisher = SPR +} + +@techreport{HS06:tr, + author = {Martin Hutle and Andr{\'e} Schiper}, + title = { Communication predicates: A high-level abstraction + for coping with transient and dynamic faults}, + institution = {EPFL}, + number = { LSR-REPORT-2006-006 }, + year = {2006} +} + +@inproceedings{HS07:dsn, + author = {Martin Hutle and Andr{\'e} Schiper}, + title = { Communication predicates: A high-level abstraction + for coping with transient and dynamic faults}, + year = 2007, + booktitle = DSN07, + publisher = IEEE, + location = {Edinburgh,UK}, + pages = {92--10}, + month = jun +} + +@article{Her91:tpls, + author = {Maurice Herlihy}, + title = {Wait-free synchronization}, + journal = TPLS, + volume = {13}, + number = {1}, + year = {1991}, + pages = {124--149}, + publisher = ACM, + address = {New York, NY, USA}, +} + +@article{Kot09:zyzzyva, + author = {Kotla, Ramakrishna and Alvisi, Lorenzo and Dahlin, Mike and Clement, Allen and Wong, Edmund}, + title = {Zyzzyva: Speculative Byzantine fault tolerance}, + journal = {ACM Trans. Comput. Syst.}, + issue_date = {December 2009}, + volume = {27}, + number = {4}, + month = jan, + year = {2010}, + issn = {0734-2071}, + pages = {7:1--7:39}, + articleno = {7}, + numpages = {39}, + publisher = {ACM}, + address = {New York, NY, USA}, + keywords = {Byzantine fault tolerance, output commit, replication, speculative execution}, +} + + +@inproceedings{KMMS97:opodis, + author = "Kim Potter Kihlstrom and Louise E. Moser and + P. M. Melliar-Smith", + title = "Solving Consensus in a Byzantine Environment Using + an Unreliable Fault Detector", + booktitle = "Proceedings of the International Conference on + Principles of Distributed Systems (OPODIS)", + year = 1997, + month = dec, + address = "Chantilly, France", + pages = "61--75" +} + +@inproceedings{KS06:podc, + author = {Idit Keidar and Alexander Shraer}, + title = {Timeliness, failure-detectors, and consensus + performance}, + booktitle = PODC06, + year = {2006}, + pages = {169--178}, + location = {Denver, Colorado, USA}, + publisher = {ACM Press}, + address = {New York, NY, USA}, +} + +@InProceedings{LFA99:disc, + author = {Mikel Larrea and Antonio Fern\'andez and Sergio + Ar\'evalo}, + title = {Efficient algorithms to implement unreliable failure + detectors in partially synchronous systems}, + year = 1999, + month = sep, + pages = {34-48}, + series = "LNCS 1693", + booktitle = DISC99, + publisher = SPR, + address = {Bratislava, Slovaquia}, +} + +@article{LL84:ic, + author = "Jennifer Lundelius and Nancy A. Lynch", + title = "An Upper and Lower Bound for Clock Synchronization", + journal = IC, + volume = 62, + number = {2/3}, + year = 1984, + pages = {190--204} +} + +@techreport{LLS03:tr, + title = {How to Implement a Timer-free Perfect Failure + Detector in Partially Synchronous Systems}, + author = {Le Lann, G\'erard and Schmid, Ulrich}, + institution = TUAuto, + number = "183/1-127", + month = jan, + year = 2003 +} + +@article{LSP82:tpls, + author = {Leslie Lamport and Robert Shostak and Marshall + Pease}, + title = {The {B}yzantine Generals Problem}, + journal = {ACM Trans. Program. Lang. Syst.}, + year = {1982}, +} + +@inproceedings{Lam01:podc, + author = {Butler Lampson}, + title = {The ABCD's of Paxos}, + booktitle = {PODC}, + year = {2001}, + +} + +@inproceedings{Lam03:fddc, + author = {Leslie Lamport}, + title = {Lower Bounds for Asynchronous Consensus}, + booktitle = {Future Directions in Distributed Computing}, + pages = {22--23}, + year = {2003}, + editor = {Andr{\'e} Schiper and Alex A. Shvartsman and Hakim + Weatherspoon and Ben Y. Zhao}, + number = {2584}, + series = {Lecture Notes in Computer Science}, + publisher = SPR +} + +@techreport{Lam04:tr, + author = {Leslie Lamport}, + title = {Lower Bounds for Asynchronous Consensus}, + institution = {Microsoft Research}, + year = {2004}, + number = {MSR-TR-2004-72} +} + +@techreport{Lam05:tr, + author = {Leslie Lamport}, + title = {Fast Paxos}, + institution = {Microsoft Research}, + year = {2005}, + number = {MSR-TR-2005-12} +} + +@techreport{Lam05:tr-33, + author = {Leslie Lamport}, + title = {Generalized Consensus and Paxos}, + institution = {Microsoft Research}, + year = {2005}, + number = {MSR-TR-2005-33} +} + +@Misc{Lam06:slides, + author = {Leslie Lamport}, + title = {Byzantine Paxos}, + howpublished = {Unpublished slides}, + year = {2006} +} + +@Article{Lam86:dc, + author = {Lesli Lamport}, + title = {On Interprocess Communication--Part I: Basic + Formalism, Part II: Algorithms}, + journal = DC, + year = 1986, + volume = 1, + number = 2, + pages = {77--101} +} + +@Article {Lam98:tcs, + author = {Leslie Lamport}, + title = {The part-time parliament}, + journal = ACMTCS, + year = 1998, + volume = 16, + number = 2, + month = may, + pages = {133-169}, +} + +@book{Lyn96:book, + author = {Nancy Lynch}, + title = {Distributed Algorithms}, + publisher = {Morgan Kaufman}, + year = {1996}, +} + +@inproceedings{MA05:dsn, + author = {Martin, J.-P. and Alvisi, L. }, + title = {Fast Byzantine consensus}, + booktitle = DSN05, + pages = {402--411}, + year = {2005}, + month = jun, + organization = {IEEE}, +} + +@article{MA06:tdsc, + author = {Martin, J.-P. and Alvisi, L. }, + title = {Fast {B}yzantine Consensus}, + journal = {TDSC}, + year = {2006}, +} + +@InProceedings{MOZ05:dsn, + author = {Dahlia Malkhi and Florin Oprea and Lidong Zhou}, + title = {{$\Omega$} Meets Paxos: Leader Election and + Stability without Eventual Timely Links}, + booktitle = DSN05, + year = {2005} +} + +@inproceedings{MR00:podc, + author = "Achour Most{\'e}faoui and Michel Raynal", + title = "k-set agreement with limited accuracy failure + detectors", + booktitle = PODC00, + year = 2000, + pages = {143--152}, + location = {Portland, Oregon, United States}, + publisher = ACM +} + +@article{MR01:ppl, + author = "Achour Most{\'e}faoui and Michel Raynal", + title = "Leader-Based Consensus", + journal = PPL, + volume = 11, + number = 1, + year = 2001, + pages = {95--107} +} + +@techreport{OGS97:tr, + author = "Rui Oliveira and Rachid Guerraoui and {Andr\'e} + Schiper", + title = "Consensus in the crash-recover model", + number = "TR-97/239", + year = "1997" +} + +@article{PSL80:jacm, + author = {M. Pease and R. Shostak and L. Lamport}, + title = {Reaching Agreement in the Presence of Faults}, + journal = JACM, + volume = {27}, + number = {2}, + year = {1980}, + pages = {228--234}, + publisher = ACM, + address = ACMADDR, +} + +@article{ST87:jacm, + author = "T. K. Srikanth and Sam Toueg", + title = "Optimal clock synchronization", + journal = JACM, + volume = 34, + number = 3, + year = 1987, + pages = "626--645" +} + +@article{ST87:dc, + author = {T. K. Srikanth and Sam Toueg,}, + title = {Simulating authenticated broadcasts to derive simple fault-tolerant algorithms}, + journal = DC, + volume = {2}, + number = {2}, + year = {1987}, + pages = {80-94} +} + + +@inproceedings{SW89:stacs, + author = {Santoro, Nicola and Widmayer, Peter}, + title = {Time is not a healer}, + booktitle = {Proc.\ 6th Annual Symposium on Theor.\ Aspects of + Computer Science (STACS'89)}, + publisher = "Springer-Verlag", + series = {LNCS}, + volume = "349", + address = "Paderborn, Germany", + pages = "304-313", + year = "1989", + month = feb, +} + +@inproceedings{SW90:sigal, + author = {Nicola Santoro and Peter Widmayer}, + title = {Distributed Function Evaluation in the Presence of + Transmission Faults.}, + booktitle = {SIGAL International Symposium on Algorithms}, + year = {1990}, + pages = {358-367} +} + +@inproceedings{SWR02:icdcs, + author = {Ulrich Schmid and Bettina Weiss and John Rushby}, + title = {Formally Verified Byzantine Agreement in Presence of + Link Faults}, + booktitle = "22nd International Conference on Distributed + Computing Systems (ICDCS'02)", + year = 2002, + month = jul # " 2-5, ", + pages = "608--616", + address = "Vienna, Austria", +} + +@incollection{Sch93a:mullender, + Author = {F. B. Schneider}, + Title = {What Good are Models and What Models are Good}, + BookTitle = {Distributed Systems}, + Year = {1993}, + Editor = {Sape Mullender}, + Publisher = {ACM Press}, + Pages = {169-197}, +} + +@article{VL96:ic, + author = {George Varghese and Nancy A. Lynch}, + title = {A Tradeoff Between Safety and Liveness for + Randomized Coordinated Attack.}, + journal = {Inf. Comput.}, + volume = {128}, + number = {1}, + year = 1996, + pages = {57--71} +} + +@inproceedings{WGWB07:dsn, + title = {Synchronous Consensus with Mortal Byzantines}, + author = {Josef Widder and Günther Gridling and Bettina Weiss + and Jean-Paul Blanquart}, + year = {2007}, + booktitle = DSN07, + publisher = IEEE +} + +@inproceedings{Wid03:disc, + author = {Josef Widder}, + title = {Booting clock Synchronization in Partially + Synchronous Systems}, + booktitle = DISC03, + year = {2003}, + pages = {121--135} +} + +@techreport{Zie04:tr, + author = {Piotr Zieli{\'n}ski}, + title = {Paxos at War}, + institution = {University of Cambridge}, + year = {2004}, + number = {UCAM-CL-TR-593}, +} + +@article{Lam78:cacm, + author = {Leslie Lamport}, + title = {Time, clocks, and the ordering of events in a + distributed system}, + journal = {Commun. ACM}, + year = {1978}, +} + +@Article{Gue06:cj, + author = {Guerraoui, R. and Raynal, M.}, + journal = {The {C}omputer {J}ournal}, + title = {The {A}lpha of {I}ndulgent {C}onsensus}, + year = {2006} +} + +@Article{Gue03:toc, + affiliation = {EPFL}, + author = {Guerraoui, Rachid and Raynal, Michel}, + journal = {{IEEE} {T}rans. on {C}omputers}, + title = {The {I}nformation {S}tructure of {I}ndulgent {C}onsensus}, + year = {2004}, +} + +@techreport{Cas00, + author = {Castro, Miguel}, + title = {Practical {B}yzantine Fault-Tolerance. {PhD} thesis}, + institution = {MIT}, + year = 2000, +} + +@inproceedings{SongRSD08:icdcn, + author = {Yee Jiun Song and + Robbert van Renesse and + Fred B. Schneider and + Danny Dolev}, + title = {The Building Blocks of Consensus}, + booktitle = {ICDCN}, + year = {2008}, +} + + +@inproceedings{BS09:icdcn, + author = {Borran, Fatemeh and Schiper, Andr{\'e}}, + + title = {A {L}eader-free {B}yzantine {C}onsensus {A}lgorithm}, + note = {To appear in ICDCN, 2010}, +} + + +@inproceedings{MHS09:opodis, + author = {Zarko Milosevic and Martin Hutle and Andr{\'e} + Schiper}, + title = {Unifying {B}yzantine Consensus Algorithms with {W}eak + {I}nteractive {C}onsistency}, + note = {To appear in OPODIS 2009}, +} + +@inproceedings{MRR:dsn02, + author = {Most\'{e}faoui, Achour and Rajsbaum, Sergio and Raynal, Michel}, + title = {A Versatile and Modular Consensus Protocol}, + booktitle = {DSN}, + year = {2002}, + } + +@article{MR98:dc, + author = {Dahlia Malkhi and + Michael K. Reiter}, + title = {Byzantine Quorum Systems}, + journal = {Distributed Computing}, + year = {1998}, +} + +@inproceedings{Rei:ccs94, + author = {Reiter, Michael K.}, + title = {Secure agreement protocols: reliable and atomic group multicast in rampart}, + booktitle = {CCS}, + year = {1994}, + pages = {68--80}, + numpages = {13} +} + + +@techreport{RMS09-tr, + author = {Olivier R\"utti and Zarko Milosevic and Andr\'e Schiper}, + title = {{G}eneric construction of consensus algorithm for benign and {B}yzantine faults}, + institution = {EPFL-IC}, + number = {LSR-REPORT-2009-005}, + year = 2009, +} + +@inproceedings{Li:srds07, + author = {Li, Harry C. and Clement, Allen and Aiyer, Amitanand S. and Alvisi, Lorenzo}, + title = {The Paxos Register}, + booktitle = {SRDS}, + year = {2007}, + } + + @article{Amir11:prime, + author = {Amir, Yair and Coan, Brian and Kirsch, Jonathan and Lane, John}, + title = {Prime: Byzantine Replication under Attack}, + journal = {IEEE Trans. Dependable Secur. Comput.}, + issue_date = {July 2011}, + volume = {8}, + number = {4}, + month = jul, + year = {2011}, + issn = {1545-5971}, + pages = {564--577}, + numpages = {14}, + publisher = {IEEE Computer Society Press}, + address = {Los Alamitos, CA, USA}, + keywords = {Performance under attack, Byzantine fault tolerance, replicated state machines, distributed systems.}, +} + +@inproceedings{Mao08:mencius, + author = {Mao, Yanhua and Junqueira, Flavio P. and Marzullo, Keith}, + title = {Mencius: building efficient replicated state machines for WANs}, + booktitle = {OSDI}, + year = {2008}, + pages = {369--384}, + numpages = {16} +} + +@article{Sch90:survey, + author = {Schneider, Fred B.}, + title = {Implementing fault-tolerant services using the state machine approach: a tutorial}, + journal = {ACM Comput. Surv.}, + volume = {22}, + number = {4}, + month = dec, + year = {1990} +} + + +@techreport{HT94:TR, + author = {Hadzilacos, Vassos and Toueg, Sam}, + title = {A Modular Approach to Fault-Tolerant Broadcasts and Related Problems}, + year = {1994}, + source = {http://www.ncstrl.org:8900/ncstrl/servlet/search?formname=detail\&id=oai%3Ancstrlh%3Acornellcs%3ACORNELLCS%3ATR94-1425}, + publisher = {Cornell University}, + address = {Ithaca, NY, USA}, +} + +@inproceedings{Ver09:spinning, + author = {Veronese, Giuliana Santos and Correia, Miguel and Bessani, Alysson Neves and Lung, Lau Cheuk}, + title = {Spin One's Wheels? Byzantine Fault Tolerance with a Spinning Primary}, + booktitle = {SRDS}, + year = {2009}, + numpages = {10} +} + +@inproceedings{Cle09:aardvark, + author = {Clement, Allen and Wong, Edmund and Alvisi, Lorenzo and Dahlin, Mike and Marchetti, Mirco}, + title = {Making Byzantine fault tolerant systems tolerate Byzantine faults}, + booktitle = {NSDI}, + year = {2009}, + pages = {153--168}, + numpages = {16} +} + +@inproceedings{Aiyer05:barB, + author = {Aiyer, Amitanand S. and Alvisi, Lorenzo and Clement, Allen and Dahlin, Mike and Martin, Jean-Philippe and Porth, Carl}, + title = {BAR fault tolerance for cooperative services}, + booktitle = {SOSP}, + year = {2005}, + pages = {45--58}, + numpages = {14} +} + +@inproceedings{Cach01:crypto, + author = {Cachin, Christian and Kursawe, Klaus and Petzold, Frank and Shoup, Victor}, + title = {Secure and Efficient Asynchronous Broadcast Protocols}, + booktitle = {CRYPTO}, + year = {2001}, + pages = {524--541}, + numpages = {18} +} + +@article{Moniz11:ritas, + author = {Moniz, Henrique and Neves, Nuno Ferreria and Correia, Miguel and Verissimo, Paulo}, + title = {RITAS: Services for Randomized Intrusion Tolerance}, + journal = {IEEE Trans. Dependable Secur. Comput.}, + volume = {8}, + number = {1}, + month = jan, + year = {2011}, + pages = {122--136}, + numpages = {15} +} + +@inproceedings{MHS11:jabc, + author = {Milosevic, Zarko and Hutle, Martin and Schiper, Andre}, + title = {On the Reduction of Atomic Broadcast to Consensus with Byzantine Faults}, + booktitle = {SRDS}, + year = {2011}, + pages = {235--244}, + numpages = {10} +} + +@incollection{DHSZ03, + author={Driscoll, Kevin and Hall, Brendan and Sivencrona, Håkan and Zumsteg, Phil}, + title={Byzantine Fault Tolerance, from Theory to Reality}, + year={2003}, + booktitle={Computer Safety, Reliability, and Security}, + volume={2788}, + pages={235--248} +} + +@inproceedings{RMES:dsn07, + author = {Olivier R{\"u}tti and + Sergio Mena and + Richard Ekwall and + Andr{\'e} Schiper}, + title = {On the Cost of Modularity in Atomic Broadcast}, + booktitle = {DSN}, + year = {2007}, + pages = {635-644} +} + +@article{Ben:jc92, + author = {Charles H. Bennett and + Fran\c{c}ois Bessette and + Gilles Brassard and + Louis Salvail and + John A. Smolin}, + title = {Experimental Quantum Cryptography}, + journal = {J. Cryptology}, + volume = {5}, + number = {1}, + year = {1992}, + pages = {3-28} +} + +@inproceedings{Aiyer:disc08, + author = {Aiyer, Amitanand S. and Alvisi, Lorenzo and Bazzi, Rida A. and Clement, Allen}, + title = {Matrix Signatures: From MACs to Digital Signatures in Distributed Systems}, + booktitle = {DISC}, + year = {2008}, + pages = {16--31}, + numpages = {16} +} + +@inproceedings{Biel13:dsn, + author = {Biely, Martin and Delgado, Pamela and Milosevic, Zarko and Schiper, Andr{\'e}}, + title = {Distal: A Framework for Implementing Fault-tolerant Distributed Algorithms}, + note = {To appear in DSN, 2013}, + year = 2013 +} + +@inproceedings{BS10:icdcn, + author = {Borran, Fatemeh and Schiper, Andr{\'e}}, + title = {A leader-free Byzantine consensus algorithm}, + booktitle = {ICDCN}, + year = {2010}, + pages = {67--78}, + numpages = {12} +} + +@article{Cor06:cj, + author = {Correia, Miguel and Neves, Nuno Ferreira and Ver\'{\i}ssimo, Paulo}, + title = {From Consensus to Atomic Broadcast: Time-Free Byzantine-Resistant Protocols without Signatures}, + journal = {Comput. J.}, + volume = {49}, + number = {1}, + year = {2006}, + pages = {82--96}, + numpages = {15} +} + +@inproceedings{RMS10:dsn, + author = {Olivier R{\"u}tti and + Zarko Milosevic and + Andr{\'e} Schiper}, + title = {Generic construction of consensus algorithms for benign + and Byzantine faults}, + booktitle = {DSN}, + year = {2010}, + pages = {343-352} +} + + + +@inproceedings{HKJR:usenix10, + author = {Hunt, Patrick and Konar, Mahadev and Junqueira, Flavio P. and Reed, Benjamin}, + title = {ZooKeeper: wait-free coordination for internet-scale systems}, + OPTbooktitle = {Proceedings of the 2010 USENIX conference on USENIX annual technical conference}, + booktitle = {USENIXATC}, + year = {2010}, + OPTlocation = {Boston, MA}, + pages = {11}, + numpages = {1}, + OPTurl = {http://dl.acm.org/citation.cfm?id=1855840.1855851}, + acmid = {1855851}, + OPTpublisher = {USENIX Association}, + OPTaddress = {Berkeley, CA, USA}, +} + +@inproceedings{Bur:osdi06, + author = {Burrows, Mike}, + title = {The Chubby lock service for loosely-coupled distributed systems}, + booktitle = {OSDI}, + year = {2006}, + pages = {335--350}, + numpages = {16}, +} + +@INPROCEEDINGS{Mao09:hotdep, + author = {Yanhua Mao and Flavio P. Junqueira and Keith Marzullo}, + title = {Towards low latency state machine replication for uncivil wide-area networks}, + booktitle = {HotDep}, + year = {2009} +} + +@inproceedings{Chun07:a2m, + author = {Chun, Byung-Gon and Maniatis, Petros and Shenker, Scott and Kubiatowicz, John}, + title = {Attested append-only memory: making adversaries stick to their word}, + booktitle = {SOSP}, + year = {2007}, + pages = {189--204}, + numpages = {16} +} + +@TECHREPORT{MBS:epfltr, + author = {Zarko Milosevic and Martin Biely and Andr\'e Schiper}, + title = {Bounded {D}elay in {B}yzantine {T}olerant {S}tate {M}achine {R}eplication}, + year = 2013, + month = april, + institution = {EPFL}, + number = {185962}, +} + +@book{BH09:datacenter, + author = {Barroso, Luiz Andre and Hoelzle, Urs}, + title = {The Datacenter as a Computer: An Introduction to the Design of Warehouse-Scale Machines}, + year = {2009}, + isbn = {159829556X, 9781598295566}, + edition = {1st}, + publisher = {Morgan and Claypool Publishers}, +} + +@inproceedings{Kir11:csiirw, + author = {Kirsch, Jonathan and Goose, Stuart and Amir, Yair and Skare, Paul}, + title = {Toward survivable SCADA}, + booktitle = {CSIIRW}, + year = {2011}, + pages = {21:1--21:1}, + articleno = {21}, + numpages = {1} +} + +@inproceedings{Ongaro14:raft, + author = {Ongaro, Diego and Ousterhout, John}, + title = {In Search of an Understandable Consensus Algorithm}, + booktitle = {Proceedings of the 2014 USENIX Conference on USENIX Annual Technical Conference}, + series = {USENIX ATC'14}, + year = {2014}, + isbn = {978-1-931971-10-2}, + location = {Philadelphia, PA}, + pages = {305--320}, + numpages = {16}, + url = {http://dl.acm.org/citation.cfm?id=2643634.2643666}, + acmid = {2643666}, + publisher = {USENIX Association}, + address = {Berkeley, CA, USA}, +} + +@article{GLR17:red-belly-bc, + author = {Tyler Crain and + Vincent Gramoli and + Mikel Larrea and + Michel Raynal}, + title = {Leader/Randomization/Signature-free Byzantine Consensus for Consortium + Blockchains}, + journal = {CoRR}, + volume = {abs/1702.03068}, + year = {2017}, + url = {http://arxiv.org/abs/1702.03068}, + archivePrefix = {arXiv}, + eprint = {1702.03068}, + timestamp = {Wed, 07 Jun 2017 14:41:08 +0200}, + biburl = {http://dblp.org/rec/bib/journals/corr/CrainGLR17}, + bibsource = {dblp computer science bibliography, http://dblp.org} +} + + +@misc{Nak2012:bitcoin, + added-at = {2014-04-17T08:33:06.000+0200}, + author = {Nakamoto, Satoshi}, + biburl = {https://www.bibsonomy.org/bibtex/23db66df0fc9fa2b5033f096a901f1c36/ngnn}, + interhash = {423c2cdff70ba0cd0bca55ebb164d770}, + intrahash = {3db66df0fc9fa2b5033f096a901f1c36}, + keywords = {imported}, + timestamp = {2014-04-17T08:33:06.000+0200}, + title = {Bitcoin: A peer-to-peer electronic cash system}, + url = {http://www.bitcoin.org/bitcoin.pdf}, + year = 2009 +} + +@misc{But2014:ethereum, + author = {Vitalik Buterin}, + title = {Ethereum: A next-generation smart contract and decentralized application platform}, + year = {2014}, + howpublished = {\url{https://github.com/ethereum/wiki/wiki/White-Paper}}, + note = {Accessed: 2018-07-11}, + url = {https://github.com/ethereum/wiki/wiki/White-Paper}, +} + +@inproceedings{Dem1987:gossip, + author = {Demers, Alan and Greene, Dan and Hauser, Carl and Irish, Wes and Larson, John and Shenker, Scott and Sturgis, Howard and Swinehart, Dan and Terry, Doug}, + title = {Epidemic Algorithms for Replicated Database Maintenance}, + booktitle = {Proceedings of the Sixth Annual ACM Symposium on Principles of Distributed Computing}, + series = {PODC '87}, + year = {1987}, + isbn = {0-89791-239-X}, + location = {Vancouver, British Columbia, Canada}, + pages = {1--12}, + numpages = {12}, + url = {http://doi.acm.org/10.1145/41840.41841}, + doi = {10.1145/41840.41841}, + acmid = {41841}, + publisher = {ACM}, + address = {New York, NY, USA}, +} + +@article{Gue2018:sbft, + author = {Guy Golan{-}Gueta and + Ittai Abraham and + Shelly Grossman and + Dahlia Malkhi and + Benny Pinkas and + Michael K. Reiter and + Dragos{-}Adrian Seredinschi and + Orr Tamir and + Alin Tomescu}, + title = {{SBFT:} a Scalable Decentralized Trust Infrastructure for Blockchains}, + journal = {CoRR}, + volume = {abs/1804.01626}, + year = {2018}, + url = {http://arxiv.org/abs/1804.01626}, + archivePrefix = {arXiv}, + eprint = {1804.01626}, + timestamp = {Tue, 01 May 2018 19:46:29 +0200}, + biburl = {https://dblp.org/rec/bib/journals/corr/abs-1804-01626}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} + +@inproceedings{BLS2001:crypto, + author = {Boneh, Dan and Lynn, Ben and Shacham, Hovav}, + title = {Short Signatures from the Weil Pairing}, + booktitle = {Proceedings of the 7th International Conference on the Theory and Application of Cryptology and Information Security: Advances in Cryptology}, + series = {ASIACRYPT '01}, + year = {2001}, + isbn = {3-540-42987-5}, + pages = {514--532}, + numpages = {19}, + url = {http://dl.acm.org/citation.cfm?id=647097.717005}, + acmid = {717005}, + publisher = {Springer-Verlag}, + address = {Berlin, Heidelberg}, +} + + diff --git a/spec/consensus/consensus-paper/paper.tex b/spec/consensus/consensus-paper/paper.tex new file mode 100644 index 0000000000..22f8b405fc --- /dev/null +++ b/spec/consensus/consensus-paper/paper.tex @@ -0,0 +1,153 @@ +%\documentclass[conference]{IEEEtran} +\documentclass[conference,onecolumn,draft,a4paper]{IEEEtran} +% Add the compsoc option for Computer Society conferences. +% +% If IEEEtran.cls has not been installed into the LaTeX system files, +% manually specify the path to it like: +% \documentclass[conference]{../sty/IEEEtran} + + + +% *** GRAPHICS RELATED PACKAGES *** +% +\ifCLASSINFOpdf +\else +\fi + +% correct bad hyphenation here +\hyphenation{op-tical net-works semi-conduc-tor} + +%\usepackage[caption=false,font=footnotesize]{subfig} +\usepackage{tikz} +\usetikzlibrary{decorations,shapes,backgrounds,calc} +\tikzstyle{msg}=[->,black,>=latex] +\tikzstyle{rubber}=[|<->|] +\tikzstyle{announce}=[draw=blue,fill=blue,shape=diamond,right,minimum + height=2mm,minimum width=1.6667mm,inner sep=0pt] +\tikzstyle{decide}=[draw=red,fill=red,shape=isosceles triangle,right,minimum + height=2mm,minimum width=1.6667mm,inner sep=0pt,shape border rotate=90] +\tikzstyle{cast}=[draw=green!50!black,fill=green!50!black,shape=circle,left,minimum + height=2mm,minimum width=1.6667mm,inner sep=0pt] + + +\usepackage{multirow} +\usepackage{graphicx} +\usepackage{epstopdf} +\usepackage{amssymb} +\usepackage{rounddiag} +\graphicspath{{../}} + +\usepackage{technote} +\usepackage{homodel} +\usepackage{enumerate} +%%\usepackage{ulem}\normalem + +% to center caption +\usepackage{caption} + +\newcommand{\textstretch}{1.4} +\newcommand{\algostretch}{1} +\newcommand{\eqnstretch}{0.5} + +\newconstruct{\FOREACH}{\textbf{for each}}{\textbf{do}}{\ENDFOREACH}{} + +%\newconstruct{\ON}{\textbf{on}}{\textbf{do}}{\ENDON}{\textbf{end on}} +\newcommand\With{\textbf{while}} +\newcommand\From{\textbf{from}} +\newcommand\Broadcast{\textbf{broadcast}} +\newcommand\PBroadcast{send} +\newcommand\UpCall{\textbf{UpCall}} +\newcommand\DownCall{\textbf{DownCall}} +\newcommand \Call{\textbf{Call}} +\newident{noop} +\newconstruct{\UPON}{\textbf{upon}}{\textbf{do}}{\ENDUPON}{} + + + +\newcommand{\abcast}{\mathsf{to\mbox{\sf-}broadcast}} +\newcommand{\adeliver}{\mathsf{to\mbox{\sf-}deliver}} + +\newcommand{\ABCAgreement}{\emph{TO-Agreement}} +\newcommand{\ABCIntegrity}{\emph{TO-Integrity}} +\newcommand{\ABCValidity}{\emph{TO-Validity}} +\newcommand{\ABCTotalOrder}{\emph{TO-Order}} +\newcommand{\ABCBoundedDelivery}{\emph{TO-Bounded Delivery}} + + +\newcommand{\tabc}{\mathit{atab\mbox{\sf-}cast}} +\newcommand{\anno}{\mathit{atab\mbox{\sf-}announce}} +\newcommand{\abort}{\mathit{atab\mbox{\sf-}abort}} +\newcommand{\tadel}{\mathit{atab\mbox{\sf-}deliver}} + +\newcommand{\ATABAgreement}{\emph{ATAB-Agreement}} +\newcommand{\ATABAbort}{\emph{ATAB-Abort}} +\newcommand{\ATABIntegrity}{\emph{ATAB-Integrity}} +\newcommand{\ATABValidity}{\emph{ATAB-Validity}} +\newcommand{\ATABAnnounce}{\emph{ATAB-Announcement}} +\newcommand{\ATABTermination}{\emph{ATAB-Termination}} +%\newcommand{\ATABFastAnnounce}{\emph{ATAB-Fast-Announcement}} + +%% Command for observations. +\newtheorem{observation}{Observation} + + +%% HO ALGORITHM DEFINITIONS +\newconstruct{\FUNCTION}{\textbf{Function}}{\textbf{:}}{\ENDFUNCTION}{} + +%% Uncomment the following four lines to remove remarks and visible traces of +%% modifications in the document +%%\renewcommand{\sout}[1]{\relaxx} +%%\renewcommand{\uline}[1]{#1} +%% \renewcommand{\uwave}[1]{#1} + \renewcommand{\note}[2][default]{\relax} + + +%% The following commands can be used to generate TR or Conference version of the paper +\newcommand{\tr}[1]{} +\renewcommand{\tr}[1]{#1} +\newcommand{\onlypaper}[1]{#1} +%\renewcommand{\onlypaper}[1]{} +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%\pagestyle{plain} +%\pagestyle{empty} + +%% IEEE tweaks +%\setlength{\IEEEilabelindent}{.5\parindent} +%\setlength{\IEEEiednormlabelsep}{.5\parindent} + +\begin{document} +% +% paper title +% can use linebreaks \\ within to get better formatting as desired +\title{The latest gossip on BFT consensus\vspace{-0.7\baselineskip}} + + + +\author{\IEEEauthorblockN{\large Ethan Buchman, Jae Kwon and Zarko Milosevic\\} + \IEEEauthorblockN{\large Tendermint}\\ + %\\\vspace{-0.5\baselineskip} + \IEEEauthorblockN{September 24, 2018} +} + +% make the title area +\maketitle +\vspace*{0.5em} + +\begin{abstract} +This paper presents Tendermint, a new protocol for ordering events in a distributed network under adversarial conditions. More commonly known as Byzantine Fault Tolerant (BFT) consensus or atomic broadcast, the problem has attracted significant attention in recent years due to the widespread success of blockchain-based digital currencies, such as Bitcoin and Ethereum, which successfully solved the problem in a public setting without a central authority. Tendermint modernizes classic academic work on the subject and simplifies the design of the BFT algorithm by relying on a peer-to-peer gossip protocol among nodes. +\end{abstract} + +%\noindent \textbf{Keywords:} Blockchain, Byzantine Fault Tolerance, State Machine %Replication + +\input{intro} +\input{definitions} +\input{consensus} +\input{proof} +\input{conclusion} + +\bibliographystyle{IEEEtran} +\bibliography{lit} + +%\appendix + +\end{document} diff --git a/spec/consensus/consensus-paper/proof.tex b/spec/consensus/consensus-paper/proof.tex new file mode 100644 index 0000000000..1c84d9b11e --- /dev/null +++ b/spec/consensus/consensus-paper/proof.tex @@ -0,0 +1,280 @@ +\section{Proof of Tendermint consensus algorithm} \label{sec:proof} + +\begin{lemma} \label{lemma:majority-intersection} For all $f\geq 0$, any two +sets of processes with voting power at least equal to $2f+1$ have at least one +correct process in common. \end{lemma} + +\begin{proof} As the total voting power is equal to $n=3f+1$, we have $2(2f+1) + = n+f+1$. This means that the intersection of two sets with the voting + power equal to $2f+1$ contains at least $f+1$ voting power in common, \ie, + at least one correct process (as the total voting power of faulty processes + is $f$). The result follows directly from this. \end{proof} + +\begin{lemma} \label{lemma:locked-decision_value-prevote-v} If $f+1$ correct +processes lock value $v$ in round $r_0$ ($lockedValue = v$ and $lockedRound = +r_0$), then in all rounds $r > r_0$, they send $\Prevote$ for $id(v)$ or +$\nil$. \end{lemma} + +\begin{proof} We prove the result by induction on $r$. + +\emph{Base step $r = r_0 + 1:$} Let's denote with $C$ the set of correct +processes with voting power equal to $f+1$. By the rules at +line~\ref{line:tab:recvProposal} and line~\ref{line:tab:acceptProposal}, the +processes from the set $C$ can't accept $\Proposal$ for any value different +from $v$ in round $r$, and therefore can't send a $\li{\Prevote,height_p, +r,id(v')}$ message, if $v' \neq v$. Therefore, the Lemma holds for the base +step. + +\emph{Induction step from $r_1$ to $r_1+1$:} We assume that no process from the +set $C$ has sent $\Prevote$ for values different than $id(v)$ or $\nil$ until +round $r_1 + 1$. We now prove that the Lemma also holds for round $r_1 + 1$. As +processes from the set $C$ send $\Prevote$ for $id(v)$ or $\nil$ in rounds $r_0 +\le r \le r_1$, by Lemma~\ref{lemma:majority-intersection} there is no value +$v' \neq v$ for which it is possible to receive $2f+1$ $\Prevote$ messages in +those rounds (i). Therefore, we have for all processes from the set $C$, +$lockedValue = v$ and $lockedRound \ge r_0$. Let's assume by a contradiction +that a process $q$ from the set $C$ sends $\Prevote$ in round $r_1 + 1$ for +value $id(v')$, where $v' \neq v$. This is possible only by +line~\ref{line:tab:prevote-higher-proposal}. Note that this implies that $q$ +received $2f+1$ $\li{\Prevote,h_q, r,id(v')}$ messages, where $r > r_0$ and $r +< r_1 +1$ (see line~\ref{line:tab:cond-prevote-higher-proposal}). A +contradiction with (i) and Lemma~\ref{lemma:majority-intersection}. +\end{proof} + +\begin{lemma} \label{lemma:agreement} Algorithm~\ref{alg:tendermint} satisfies +Agreement. \end{lemma} + +\begin{proof} Let round $r_0$ be the first round of height $h$ such that some + correct process $p$ decides $v$. We now prove that if some correct process + $q$ decides $v'$ in some round $r \ge r_0$, then $v = v'$. + +In case $r = r_0$, $q$ has received at least $2f+1$ +$\li{\Precommit,h_p,r_0,id(v')}$ messages at line~\ref{line:tab:onDecideRule}, +while $p$ has received at least $2f+1$ $\li{\Precommit,h_p,r_0,id(v)}$ +messages. By Lemma~\ref{lemma:majority-intersection} two sets of messages of +voting power $2f+1$ intersect in at least one correct process. As a correct +process sends a single $\Precommit$ message in a round, then $v=v'$. + +We prove the case $r > r_0$ by contradiction. By the +rule~\ref{line:tab:onDecideRule}, $p$ has received at least $2f+1$ voting-power +equivalent of $\li{\Precommit,h_p,r_0,id(v)}$ messages, i.e., at least $f+1$ +voting-power equivalent correct processes have locked value $v$ in round $r_0$ and have +sent those messages (i). Let denote this set of messages with $C$. On the +other side, $q$ has received at least $2f+1$ voting power equivalent of +$\li{\Precommit,h_q, r,id(v')}$ messages. As the voting power of all faulty +processes is at most $f$, some correct process $c$ has sent one of those +messages. By the rule at line~\ref{line:tab:recvPrevote}, $c$ has locked value +$v'$ in round $r$ before sending $\li{\Precommit,h_q, r,id(v')}$. Therefore $c$ +has received $2f+1$ $\Prevote$ messages for $id(v')$ in round $r > r_0$ (see +line~\ref{line:tab:recvPrevote}). By Lemma~\ref{lemma:majority-intersection}, a +process from the set $C$ has sent $\Prevote$ message for $id(v')$ in round $r$. +A contradiction with (i) and Lemma~\ref{lemma:locked-decision_value-prevote-v}. +\end{proof} + +\begin{lemma} \label{lemma:agreement} Algorithm~\ref{alg:tendermint} satisfies +Validity. \end{lemma} + +\begin{proof} Trivially follows from the rule at line +\ref{line:tab:validDecisionValue} which ensures that only valid values can be +decided. \end{proof} + +\begin{lemma} \label{lemma:round-synchronisation} If we assume that: +\begin{enumerate} + \item a correct process $p$ is the first correct process to + enter a round $r>0$ at time $t > GST$ (for every correct process + $c$, $round_c \le r$ at time $t$) + \item the proposer of round $r$ is + a correct process $q$ + \item for every correct process $c$, + $lockedRound_c \le validRound_q$ at time $t$ + \item $\timeoutPropose(r) + > 2\Delta + \timeoutPrecommit(r-1)$, $\timeoutPrevote(r) > 2\Delta$ and + $\timeoutPrecommit(r) > 2\Delta$, +\end{enumerate} +then all correct processes decide in round $r$ before $t + 4\Delta + + \timeoutPrecommit(r-1)$. +\end{lemma} + +\begin{proof} As $p$ is the first correct process to enter round $r$, it + executed the line~\ref{line:tab:nextRound} after $\timeoutPrecommit(r-1)$ + expired. Therefore, $p$ received $2f+1$ $\Precommit$ messages in the round + $r-1$ before time $t$. By the \emph{Gossip communication} property, all + correct processes will receive those messages the latest at time $t + + \Delta$. Correct processes that are in rounds $< r-1$ at time $t$ will + enter round $r-1$ (see the rule at line~\ref{line:tab:nextRound2}) and + trigger $\timeoutPrecommit(r-1)$ (see rule~\ref{line:tab:startTimeoutPrecommit}) + by time $t+\Delta$. Therefore, all correct processes will start round $r$ + by time $t+\Delta+\timeoutPrecommit(r-1)$ (i). + +In the worst case, the process $q$ is the last correct process to enter round +$r$, so $q$ starts round $r$ and sends $\Proposal$ message for some value $v$ +at time $t + \Delta + \timeoutPrecommit(r-1)$. Therefore, all correct processes +receive the $\Proposal$ message from $q$ the latest by time $t + 2\Delta + +\timeoutPrecommit(r-1)$. Therefore, if $\timeoutPropose(r) > 2\Delta + +\timeoutPrecommit(r-1)$, all correct processes will receive $\Proposal$ message +before $\timeoutPropose(r)$ expires. + +By (3) and the rules at line~\ref{line:tab:recvProposal} and +\ref{line:tab:acceptProposal}, all correct processes will accept the +$\Proposal$ message for value $v$ and will send a $\Prevote$ message for +$id(v)$ by time $t + 2\Delta + \timeoutPrecommit(r-1)$. Note that by the +\emph{Gossip communication} property, the $\Prevote$ messages needed to trigger +the rule at line~\ref{line:tab:acceptProposal} are received before time $t + +\Delta$. + +By time $t + 3\Delta + \timeoutPrecommit(r-1)$, all correct processes will receive +$\Proposal$ for $v$ and $2f+1$ corresponding $\Prevote$ messages for $id(v)$. +By the rule at line~\ref{line:tab:recvPrevote}, all correct processes will send +a $\Precommit$ message (see line~\ref{line:tab:precommit-v}) for $id(v)$ by +time $t + 3\Delta + \timeoutPrecommit(r-1)$. Therefore, by time $t + 4\Delta + +\timeoutPrecommit(r-1)$, all correct processes will have received the $\Proposal$ +for $v$ and $2f+1$ $\Precommit$ messages for $id(v)$, so they decide at +line~\ref{line:tab:decide} on $v$. + +This scenario holds if every correct process $q$ sends a $\Precommit$ message +before $\timeoutPrevote(r)$ expires, and if $\timeoutPrecommit(r)$ does not expire +before $t + 4\Delta + \timeoutPrecommit(r-1)$. Let's assume that a correct process +$c_1$ is the first correct process to trigger $\timeoutPrevote(r)$ (see the rule +at line~\ref{line:tab:recvAny2/3Prevote}) at time $t_1 > t$. This implies that +before time $t_1$, $c_1$ received a $\Proposal$ ($step_{c_1}$ must be +$\prevote$ by the rule at line~\ref{line:tab:recvAny2/3Prevote}) and a set of +$2f+1$ $\Prevote$ messages. By time $t_1 + \Delta$, all correct processes will +receive those messages. Note that even if some correct process was in the +smaller round before time $t_1$, at time $t_1 + \Delta$ it will start round $r$ +after receiving those messages (see the rule at +line~\ref{line:tab:skipRounds}). Therefore, all correct processes will send +their $\Prevote$ message for $id(v)$ by time $t_1 + \Delta$, and all correct +processes will receive those messages the by time $t_1 + 2\Delta$. Therefore, +as $\timeoutPrevote(r) > 2\Delta$, this ensures that all correct processes receive +$\Prevote$ messages from all correct processes before their respective local +$\timeoutPrevote(r)$ expire. + +On the other hand, $\timeoutPrecommit(r)$ is triggered in a correct process $c_2$ +after it receives any set of $2f+1$ $\Precommit$ messages for the first time. +Let's denote with $t_2 > t$ the earliest point in time $\timeoutPrecommit(r)$ is +triggered in some correct process $c_2$. This implies that $c_2$ has received +at least $f+1$ $\Precommit$ messages for $id(v)$ from correct processes, i.e., +those processes have received $\Proposal$ for $v$ and $2f+1$ $\Prevote$ +messages for $id(v)$ before time $t_2$. By the \emph{Gossip communication} +property, all correct processes will receive those messages by time $t_2 + +\Delta$, and will send $\Precommit$ messages for $id(v)$. Note that even if +some correct processes were at time $t_2$ in a round smaller than $r$, by the +rule at line~\ref{line:tab:skipRounds} they will enter round $r$ by time $t_2 + +\Delta$. Therefore, by time $t_2 + 2\Delta$, all correct processes will +receive $\Proposal$ for $v$ and $2f+1$ $\Precommit$ messages for $id(v)$. So if +$\timeoutPrecommit(r) > 2\Delta$, all correct processes will decide before the +timeout expires. \end{proof} + + +\begin{lemma} \label{lemma:validValue} If a correct process $p$ locks a value + $v$ at time $t_0 > GST$ in some round $r$ ($lockedValue = v$ and + $lockedRound = r$) and $\timeoutPrecommit(r) > 2\Delta$, then all correct + processes set $validValue$ to $v$ and $validRound$ to $r$ before starting + round $r+1$. \end{lemma} + +\begin{proof} In order to prove this Lemma, we need to prove that if the + process $p$ locks a value $v$ at time $t_0$, then no correct process will + leave round $r$ before time $t_0 + \Delta$ (unless it has already set + $validValue$ to $v$ and $validRound$ to $r$). It is sufficient to prove + this, since by the \emph{Gossip communication} property the messages that + $p$ received at time $t_0$ and that triggered rule at + line~\ref{line:tab:recvPrevote} will be received by time $t_0 + \Delta$ by + all correct processes, so all correct processes that are still in round $r$ + will set $validValue$ to $v$ and $validRound$ to $r$ (by the rule at + line~\ref{line:tab:recvPrevote}). To prove this, we need to compute the + earliest point in time a correct process could leave round $r$ without + updating $validValue$ to $v$ and $validRound$ to $r$ (we denote this time + with $t_1$). The Lemma is correct if $t_0 + \Delta < t_1$. + +If the process $p$ locks a value $v$ at time $t_0$, this implies that $p$ +received the valid $\Proposal$ message for $v$ and $2f+1$ +$\li{\Prevote,h,r,id(v)}$ at time $t_0$. At least $f+1$ of those messages are +sent by correct processes. Let's denote this set of correct processes as $C$. By +Lemma~\ref{lemma:majority-intersection} any set of $2f+1$ $\Prevote$ messages +in round $r$ contains at least a single message from the set $C$. + +Let's denote as time $t$ the earliest point in time a correct process, $c_1$, triggered +$\timeoutPrevote(r)$. This implies that $c_1$ received $2f+1$ $\Prevote$ messages +(see the rule at line \ref{line:tab:recvAny2/3Prevote}), where at least one of +those messages was sent by a process $c_2$ from the set $C$. Therefore, process +$c_2$ had received $\Proposal$ message before time $t$. By the \emph{Gossip +communication} property, all correct processes will receive $\Proposal$ and +$2f+1$ $\Prevote$ messages for round $r$ by time $t+\Delta$. The latest point +in time $p$ will trigger $\timeoutPrevote(r)$ is $t+\Delta$\footnote{Note that +even if $p$ was in smaller round at time $t$ it will start round $r$ by time +$t+\Delta$.}. So the latest point in time $p$ can lock the value $v$ in +round $r$ is $t_0 = t+\Delta+\timeoutPrevote(r)$ (as at this point +$\timeoutPrevote(r)$ expires, so a process sends $\Precommit$ $\nil$ and updates +$step$ to $\precommit$, see line \ref{line:tab:onTimeoutPrevote}). + +Note that according to the Algorithm \ref{alg:tendermint}, a correct process +can not send a $\Precommit$ message before receiving $2f+1$ $\Prevote$ +messages. Therefore, no correct process can send a $\Precommit$ message in +round $r$ before time $t$. If a correct process sends a $\Precommit$ message +for $\nil$, it implies that it has waited for the full duration of +$\timeoutPrevote(r)$ (see line +\ref{line:tab:precommit-nil-onTimeout})\footnote{The other case in which a +correct process $\Precommit$ for $\nil$ is after receiving $2f+1$ $Prevote$ for +$\nil$ messages, see the line \ref{line:tab:precommit-v-1}. By +Lemma~\ref{lemma:majority-intersection}, this is not possible in round $r$.}. +Therefore, no correct process can send $\Precommit$ for $\nil$ before time $t + +\timeoutPrevote(r)$ (*). + +A correct process $q$ that enters round $r+1$ must wait (i) $\timeoutPrecommit(r)$ +(see line \ref{line:tab:nextRound}) or (ii) receiving $f+1$ messages from the +round $r+1$ (see the line \ref{line:tab:skipRounds}). In the former case, $q$ +receives $2f+1$ $\Precommit$ messages before starting $\timeoutPrecommit(r)$. If +at least a single $\Precommit$ message from a correct process (at least $f+1$ +voting power equivalent of those messages is sent by correct processes) is for +$\nil$, then $q$ cannot start round $r+1$ before time $t_1 = t + +\timeoutPrevote(r) + \timeoutPrecommit(r)$ (see (*)). Therefore in this case we have: +$t_0 + \Delta < t_1$, i.e., $t+2\Delta+\timeoutPrevote(r) < t + \timeoutPrevote(r) + +\timeoutPrecommit(r)$, and this is true whenever $\timeoutPrecommit(r) > 2\Delta$, so +Lemma holds in this case. + +If in the set of $2f+1$ $\Precommit$ messages $q$ receives, there is at least a +single $\Precommit$ for $id(v)$ message from a correct process $c$, then $q$ +can start the round $r+1$ the earliest at time $t_1 = t+\timeoutPrecommit(r)$. In +this case, by the \emph{Gossip communication} property, all correct processes +will receive $\Proposal$ and $2f+1$ $\Prevote$ messages (that $c$ received +before time $t$) the latest at time $t+\Delta$. Therefore, $q$ will set +$validValue$ to $v$ and $validRound$ to $r$ the latest at time $t+\Delta$. As +$t+\Delta < t+\timeoutPrecommit(r)$, whenever $\timeoutPrecommit(r) > \Delta$, the +Lemma holds also in this case. + +In case (ii), $q$ received at least a single message from a correct process $c$ +from the round $r+1$. The earliest point in time $c$ could have started round +$r+1$ is $t+\timeoutPrecommit(r)$ in case it received a $\Precommit$ message for +$v$ from some correct process in the set of $2f+1$ $\Precommit$ messages it +received. The same reasoning as above holds also in this case, so $q$ set +$validValue$ to $v$ and $validRound$ to $r$ the latest by time $t+\Delta$. As +$t+\Delta < t+\timeoutPrecommit(r)$, whenever $\timeoutPrecommit(r) > \Delta$, the +Lemma holds also in this case. \end{proof} + +\begin{lemma} \label{lemma:agreement} Algorithm~\ref{alg:tendermint} satisfies +Termination. \end{lemma} + +\begin{proof} Lemma~\ref{lemma:round-synchronisation} defines a scenario in + which all correct processes decide. We now prove that within a bounded + duration after GST such a scenario will unfold. Let's assume that at time + $GST$ the highest round started by a correct process is $r_0$, and that + there exists a correct process $p$ such that the following holds: for every + correct process $c$, $lockedRound_c \le validRound_p$. Furthermore, we + assume that $p$ will be the proposer in some round $r_1 > r$ (this is + ensured by the $\coord$ function). + +We have two cases to consider. In the first case, for all rounds $r \ge r_0$ +and $r < r_1$, no correct process locks a value (set $lockedRound$ to $r$). So +in round $r_1$ we have the scenario from the +Lemma~\ref{lemma:round-synchronisation}, so all correct processes decides in +round $r_1$. + +In the second case, a correct process locks a value $v$ in round $r_2$, where +$r_2 \ge r_0$ and $r_2 < r_1$. Let's assume that $r_2$ is the highest round +before $r_1$ in which some correct process $q$ locks a value. By Lemma +\ref{lemma:validValue} at the end of round $r_2$ the following holds for all +correct processes $c$: $validValue_c = lockedValue_q$ and $validRound_c = r_2$. +Then in round $r_1$, the conditions for the +Lemma~\ref{lemma:round-synchronisation} holds, so all correct processes decide. +\end{proof} + diff --git a/spec/consensus/consensus-paper/rounddiag.sty b/spec/consensus/consensus-paper/rounddiag.sty new file mode 100644 index 0000000000..a6ca5d8835 --- /dev/null +++ b/spec/consensus/consensus-paper/rounddiag.sty @@ -0,0 +1,62 @@ +% ROUNDDIAG STYLE +% for LaTeX version 2e +% by -- 2008 Martin Hutle +% +% This style file is free software; you can redistribute it and/or +% modify it under the terms of the GNU Lesser General Public +% License as published by the Free Software Foundation; either +% version 2 of the License, or (at your option) any later version. +% +% This style file is distributed in the hope that it will be useful, +% but WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +% Lesser General Public License for more details. +% +% You should have received a copy of the GNU Lesser General Public +% License along with this style file; if not, write to the +% Free Software Foundation, Inc., 59 Temple Place - Suite 330, +% Boston, MA 02111-1307, USA. +% +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{rounddiag} +\typeout{Document Style `rounddiag' - provides simple round diagrams} +% +\RequirePackage{ifthen} +\RequirePackage{calc} +\RequirePackage{tikz} + +\def\rdstretch{3} + +\tikzstyle{msg}=[->,thick,>=latex] +\tikzstyle{rndline}=[dotted] +\tikzstyle{procline}=[dotted] + +\newenvironment{rounddiag}[2]{ +\begin{center} +\begin{tikzpicture} +\foreach \i in {1,...,#1}{ + \draw[procline] (0,#1-\i) node[xshift=-1em]{$p_{\i}$} -- (#2*\rdstretch+1,#1-\i); +} +\foreach \i in {0,...,#2}{ + \draw[rndline] (\i*\rdstretch+0.5,0) -- (\i*\rdstretch+0.5,#1-1); +} +\newcommand{\rdat}[2]{ + (##2*\rdstretch+0.5,#1-##1) +}% +\newcommand{\round}[2]{% + \def\rdround{##1} + \ifthenelse{\equal{##2}{}}{}{ + \node[yshift=-1em] at ({##1*\rdstretch+0.5-0.5*\rdstretch},0) {##2}; + } +}% +\newcommand{\rdmessage}[3]{\draw[msg] + (\rdround*\rdstretch-\rdstretch+0.5,#1-##1) -- node[yshift=1.2ex]{##3} + (\rdround*\rdstretch+0.5,#1-##2);}% +\newcommand{\rdalltoall}{% + \foreach \i in {1,...,#1}{ + \foreach \j in {1,...,#1}{ + { \rdmessage{\i}{\j}{}}}}}% +}{% +\end{tikzpicture} +\end{center} +} diff --git a/spec/consensus/consensus-paper/technote.sty b/spec/consensus/consensus-paper/technote.sty new file mode 100644 index 0000000000..5353f13cd3 --- /dev/null +++ b/spec/consensus/consensus-paper/technote.sty @@ -0,0 +1,118 @@ +\NeedsTeXFormat{LaTeX2e} +\ProvidesPackage{technote}[2007/11/09] +\typeout{Template for quick notes with some useful definitions} + +\RequirePackage{ifthen} +\RequirePackage{calc} +\RequirePackage{amsmath,amssymb,amsthm} +\RequirePackage{epsfig} +\RequirePackage{algorithm} +\RequirePackage[noend]{algorithmicplus} + +\newboolean{technote@noedit} +\setboolean{technote@noedit}{false} +\DeclareOption{noedit}{\setboolean{technote@noedit}{true}} + +\newcounter{technote@lang} +\setcounter{technote@lang}{0} +\DeclareOption{german}{\setcounter{technote@lang}{1}} +\DeclareOption{french}{\setcounter{technote@lang}{2}} + +\DeclareOption{fullpage}{ +\oddsidemargin -10mm % Margin on odd side pages (default=0mm) +\evensidemargin -10mm % Margin on even side pages (default=0mm) +\topmargin -10mm % Top margin space (default=16mm) +\headheight \baselineskip % Height of headers (default=0mm) +\headsep \baselineskip % Separation spc btw header and text (d=0mm) +\footskip 30pt % Separation spc btw text and footer (d=30pt) +\textheight 230mm % Total text height (default=200mm) +\textwidth 180mm % Total text width (default=160mm) +} + +\renewcommand{\algorithmiccomment}[1]{\hfill/* #1 */} +\renewcommand{\algorithmiclnosize}{\scriptsize} + +\newboolean{technote@truenumbers} +\setboolean{technote@truenumbers}{false} +\DeclareOption{truenumbers}{\setboolean{technote@truenumbers}{true}} + +\ProcessOptions + +\newcommand{\N}{\ifthenelse{\boolean{technote@truenumbers}}% + {\mbox{\rm I\hspace{-.5em}N}}% + {\mathbb{N}}} + +\newcommand{\R}{\ifthenelse{\boolean{technote@truenumbers}}% + {\mbox{\rm I\hspace{-.2em}R}}% + {\mathbb{R}}} + +\newcommand{\Z}{\mathbb{Z}} + +\newcommand{\set}[1]{\left\{#1\right\}} +\newcommand{\mathsc}[1]{\mbox{\sc #1}} +\newcommand{\li}[1]{\langle#1\rangle} +\newcommand{\st}{\;s.t.\;} +\newcommand{\Real}{\R} +\newcommand{\Natural}{\N} +\newcommand{\Integer}{\Z} + +% edit commands +\newcommand{\newedit}[2]{ + \newcommand{#1}[2][default]{% + \ifthenelse{\boolean{technote@noedit}}{}{ + \par\vspace{2mm} + \noindent + \begin{tabular}{|l|}\hline + \parbox{\linewidth-\tabcolsep*2}{{\bf #2:}\hfill\ifthenelse{\equal{##1}{default}}{}{##1}}\\\hline + \parbox{\linewidth-\tabcolsep*2}{\rule{0pt}{5mm}##2\rule[-2mm]{0pt}{2mm}}\\\hline + \end{tabular} + \par\vspace{2mm} + } + } +} + +\newedit{\note}{Note} +\newedit{\comment}{Comment} +\newedit{\question}{Question} +\newedit{\content}{Content} +\newedit{\problem}{Problem} + +\newcommand{\mnote}[1]{\marginpar{\scriptsize\it + \begin{minipage}[t]{0.8 in} + \raggedright #1 + \end{minipage}}} + +\newcommand{\Insert}[1]{\underline{#1}\marginpar{$|$}} + +\newcommand{\Delete}[1]{\marginpar{$|$} +} + +% lemma, theorem, etc. +\newtheorem{lemma}{Lemma} +\newtheorem{proposition}{Proposition} +\newtheorem{theorem}{Theorem} +\newtheorem{corollary}{Corollary} +\newtheorem{assumption}{Assumption} +\newtheorem{definition}{Definition} + +\gdef\op|{\,|\;} +\gdef\op:{\,:\;} +\newcommand{\assign}{\leftarrow} +\newcommand{\inc}[1]{#1 \assign #1 + 1} +\newcommand{\isdef}{:=} + +\newcommand{\ident}[1]{\mathit{#1}} +\def\newident#1{\expandafter\def\csname #1\endcsname{\ident{#1}}} + +\newcommand{\eg}{{\it e.g.}} +\newcommand{\ie}{{\it i.e.}} +\newcommand{\apriori}{{\it apriori}} +\newcommand{\etal}{{\it et al.}} + +\newcommand\ps@technote{% + \renewcommand\@oddhead{\theheader}% + \let\@evenhead\@oddhead + \renewcommand\@evenfoot + {\hfil\normalfont\textrm{\thepage}\hfil}% + \let\@oddfoot\@evenfoot +} diff --git a/spec/consensus/consensus.md b/spec/consensus/consensus.md new file mode 100644 index 0000000000..5d1526a178 --- /dev/null +++ b/spec/consensus/consensus.md @@ -0,0 +1,352 @@ +--- +order: 1 +--- +# Byzantine Consensus Algorithm + +## Terms + +- The network is composed of optionally connected _nodes_. Nodes + directly connected to a particular node are called _peers_. +- The consensus process in deciding the next block (at some _height_ + `H`) is composed of one or many _rounds_. +- `NewHeight`, `Propose`, `Prevote`, `Precommit`, and `Commit` + represent state machine states of a round. (aka `RoundStep` or + just "step"). +- A node is said to be _at_ a given height, round, and step, or at + `(H,R,S)`, or at `(H,R)` in short to omit the step. +- To _prevote_ or _precommit_ something means to broadcast a [prevote + vote](https://godoc.org/github.com/tendermint/tendermint/types#Vote) + or [first precommit + vote](https://godoc.org/github.com/tendermint/tendermint/types#FirstPrecommit) + for something. +- A vote _at_ `(H,R)` is a vote signed with the bytes for `H` and `R` + included in its [sign-bytes](../core/data_structures.md#vote). +- _+2/3_ is short for "more than 2/3" +- _1/3+_ is short for "1/3 or more" +- A set of +2/3 of prevotes for a particular block or `` at + `(H,R)` is called a _proof-of-lock-change_ or _PoLC_ for short. + +## State Machine Overview + +At each height of the blockchain a round-based protocol is run to +determine the next block. Each round is composed of three _steps_ +(`Propose`, `Prevote`, and `Precommit`), along with two special steps +`Commit` and `NewHeight`. + +In the optimal scenario, the order of steps is: + +```md +NewHeight -> (Propose -> Prevote -> Precommit)+ -> Commit -> NewHeight ->... +``` + +The sequence `(Propose -> Prevote -> Precommit)` is called a _round_. +There may be more than one round required to commit a block at a given +height. Examples for why more rounds may be required include: + +- The designated proposer was not online. +- The block proposed by the designated proposer was not valid. +- The block proposed by the designated proposer did not propagate + in time. +- The block proposed was valid, but +2/3 of prevotes for the proposed + block were not received in time for enough validator nodes by the + time they reached the `Precommit` step. Even though +2/3 of prevotes + are necessary to progress to the next step, at least one validator + may have voted `` or maliciously voted for something else. +- The block proposed was valid, and +2/3 of prevotes were received for + enough nodes, but +2/3 of precommits for the proposed block were not + received for enough validator nodes. + +Some of these problems are resolved by moving onto the next round & +proposer. Others are resolved by increasing certain round timeout +parameters over each successive round. + +## State Machine Diagram + +```md + +-------------------------------------+ + v |(Wait til `CommmitTime+timeoutCommit`) + +-----------+ +-----+-----+ + +----------> | Propose +--------------+ | NewHeight | + | +-----------+ | +-----------+ + | | ^ + |(Else, after timeoutPrecommit) v | ++-----+-----+ +-----------+ | +| Precommit | <------------------------+ Prevote | | ++-----+-----+ +-----------+ | + |(When +2/3 Precommits for block found) | + v | ++--------------------------------------------------------------------+ +| Commit | +| | +| * Set CommitTime = now; | +| * Wait for block, then stage/save/commit block; | ++--------------------------------------------------------------------+ +``` + +# Background Gossip + +A node may not have a corresponding validator private key, but it +nevertheless plays an active role in the consensus process by relaying +relevant meta-data, proposals, blocks, and votes to its peers. A node +that has the private keys of an active validator and is engaged in +signing votes is called a _validator-node_. All nodes (not just +validator-nodes) have an associated state (the current height, round, +and step) and work to make progress. + +Between two nodes there exists a `Connection`, and multiplexed on top of +this connection are fairly throttled `Channel`s of information. An +epidemic gossip protocol is implemented among some of these channels to +bring peers up to speed on the most recent state of consensus. For +example, + +- Nodes gossip `PartSet` parts of the current round's proposer's + proposed block. A LibSwift inspired algorithm is used to quickly + broadcast blocks across the gossip network. +- Nodes gossip prevote/precommit votes. A node `NODE_A` that is ahead + of `NODE_B` can send `NODE_B` prevotes or precommits for `NODE_B`'s + current (or future) round to enable it to progress forward. +- Nodes gossip prevotes for the proposed PoLC (proof-of-lock-change) + round if one is proposed. +- Nodes gossip to nodes lagging in blockchain height with block + [commits](https://godoc.org/github.com/tendermint/tendermint/types#Commit) + for older blocks. +- Nodes opportunistically gossip `ReceivedVote` messages to hint peers what + votes it already has. +- Nodes broadcast their current state to all neighboring peers. (but + is not gossiped further) + +There's more, but let's not get ahead of ourselves here. + +## Proposals + +A proposal is signed and published by the designated proposer at each +round. The proposer is chosen by a deterministic and non-choking round +robin selection algorithm that selects proposers in proportion to their +voting power (see +[implementation](https://github.com/tendermint/tendermint/blob/master/types/validator_set.go)). + +A proposal at `(H,R)` is composed of a block and an optional latest +`PoLC-Round < R` which is included iff the proposer knows of one. This +hints the network to allow nodes to unlock (when safe) to ensure the +liveness property. + +## State Machine Spec + +### Propose Step (height:H,round:R) + +Upon entering `Propose`: + +- The designated proposer proposes a block at `(H,R)`. + +The `Propose` step ends: + +- After `timeoutProposeR` after entering `Propose`. --> goto + `Prevote(H,R)` +- After receiving proposal block and all prevotes at `PoLC-Round`. --> + goto `Prevote(H,R)` +- After [common exit conditions](#common-exit-conditions) + +### Prevote Step (height:H,round:R) + +Upon entering `Prevote`, each validator broadcasts its prevote vote. + +- First, if the validator is locked on a block since `LastLockRound` + but now has a PoLC for something else at round `PoLC-Round` where + `LastLockRound < PoLC-Round < R`, then it unlocks. +- If the validator is still locked on a block, it prevotes that. +- Else, if the proposed block from `Propose(H,R)` is good, it + prevotes that. +- Else, if the proposal is invalid or wasn't received on time, it + prevotes ``. + +The `Prevote` step ends: + +- After +2/3 prevotes for a particular block or ``. -->; goto + `Precommit(H,R)` +- After `timeoutPrevote` after receiving any +2/3 prevotes. --> goto + `Precommit(H,R)` +- After [common exit conditions](#common-exit-conditions) + +### Precommit Step (height:H,round:R) + +Upon entering `Precommit`, each validator broadcasts its precommit vote. + +- If the validator has a PoLC at `(H,R)` for a particular block `B`, it + (re)locks (or changes lock to) and precommits `B` and sets + `LastLockRound = R`. +- Else, if the validator has a PoLC at `(H,R)` for ``, it unlocks + and precommits ``. +- Else, it keeps the lock unchanged and precommits ``. + +A precommit for `` means "I didn’t see a PoLC for this round, but I +did get +2/3 prevotes and waited a bit". + +The Precommit step ends: + +- After +2/3 precommits for ``. --> goto `Propose(H,R+1)` +- After `timeoutPrecommit` after receiving any +2/3 precommits. --> goto + `Propose(H,R+1)` +- After [common exit conditions](#common-exit-conditions) + +### Common exit conditions + +- After +2/3 precommits for a particular block. --> goto + `Commit(H)` +- After any +2/3 prevotes received at `(H,R+x)`. --> goto + `Prevote(H,R+x)` +- After any +2/3 precommits received at `(H,R+x)`. --> goto + `Precommit(H,R+x)` + +### Commit Step (height:H) + +- Set `CommitTime = now()` +- Wait until block is received. --> goto `NewHeight(H+1)` + +### NewHeight Step (height:H) + +- Move `Precommits` to `LastCommit` and increment height. +- Set `StartTime = CommitTime+timeoutCommit` +- Wait until `StartTime` to receive straggler commits. --> goto + `Propose(H,0)` + +## Proofs + +### Proof of Safety + +Assume that at most -1/3 of the voting power of validators is byzantine. +If a validator commits block `B` at round `R`, it's because it saw +2/3 +of precommits at round `R`. This implies that 1/3+ of honest nodes are +still locked at round `R' > R`. These locked validators will remain +locked until they see a PoLC at `R' > R`, but this won't happen because +1/3+ are locked and honest, so at most -2/3 are available to vote for +anything other than `B`. + +### Proof of Liveness + +If 1/3+ honest validators are locked on two different blocks from +different rounds, a proposers' `PoLC-Round` will eventually cause nodes +locked from the earlier round to unlock. Eventually, the designated +proposer will be one that is aware of a PoLC at the later round. Also, +`timeoutProposalR` increments with round `R`, while the size of a +proposal are capped, so eventually the network is able to "fully gossip" +the whole proposal (e.g. the block & PoLC). + +### Proof of Fork Accountability + +Define the JSet (justification-vote-set) at height `H` of a validator +`V1` to be all the votes signed by the validator at `H` along with +justification PoLC prevotes for each lock change. For example, if `V1` +signed the following precommits: `Precommit(B1 @ round 0)`, +`Precommit( @ round 1)`, `Precommit(B2 @ round 4)` (note that no +precommits were signed for rounds 2 and 3, and that's ok), +`Precommit(B1 @ round 0)` must be justified by a PoLC at round 0, and +`Precommit(B2 @ round 4)` must be justified by a PoLC at round 4; but +the precommit for `` at round 1 is not a lock-change by definition +so the JSet for `V1` need not include any prevotes at round 1, 2, or 3 +(unless `V1` happened to have prevoted for those rounds). + +Further, define the JSet at height `H` of a set of validators `VSet` to +be the union of the JSets for each validator in `VSet`. For a given +commit by honest validators at round `R` for block `B` we can construct +a JSet to justify the commit for `B` at `R`. We say that a JSet +_justifies_ a commit at `(H,R)` if all the committers (validators in the +commit-set) are each justified in the JSet with no duplicitous vote +signatures (by the committers). + +- **Lemma**: When a fork is detected by the existence of two + conflicting [commits](../core/data_structures.md#commit), the + union of the JSets for both commits (if they can be compiled) must + include double-signing by at least 1/3+ of the validator set. + **Proof**: The commit cannot be at the same round, because that + would immediately imply double-signing by 1/3+. Take the union of + the JSets of both commits. If there is no double-signing by at least + 1/3+ of the validator set in the union, then no honest validator + could have precommitted any different block after the first commit. + Yet, +2/3 did. Reductio ad absurdum. + +As a corollary, when there is a fork, an external process can determine +the blame by requiring each validator to justify all of its round votes. +Either we will find 1/3+ who cannot justify at least one of their votes, +and/or, we will find 1/3+ who had double-signed. + +### Alternative algorithm + +Alternatively, we can take the JSet of a commit to be the "full commit". +That is, if light clients and validators do not consider a block to be +committed unless the JSet of the commit is also known, then we get the +desirable property that if there ever is a fork (e.g. there are two +conflicting "full commits"), then 1/3+ of the validators are immediately +punishable for double-signing. + +There are many ways to ensure that the gossip network efficiently share +the JSet of a commit. One solution is to add a new message type that +tells peers that this node has (or does not have) a +2/3 majority for B +(or) at (H,R), and a bitarray of which votes contributed towards that +majority. Peers can react by responding with appropriate votes. + +We will implement such an algorithm for the next iteration of the +Tendermint consensus protocol. + +Other potential improvements include adding more data in votes such as +the last known PoLC round that caused a lock change, and the last voted +round/step (or, we may require that validators not skip any votes). This +may make JSet verification/gossip logic easier to implement. + +### Censorship Attacks + +Due to the definition of a block +[commit](https://github.com/tendermint/tendermint/blob/master/docs/tendermint-core/validators.md), any 1/3+ coalition of +validators can halt the blockchain by not broadcasting their votes. Such +a coalition can also censor particular transactions by rejecting blocks +that include these transactions, though this would result in a +significant proportion of block proposals to be rejected, which would +slow down the rate of block commits of the blockchain, reducing its +utility and value. The malicious coalition might also broadcast votes in +a trickle so as to grind blockchain block commits to a near halt, or +engage in any combination of these attacks. + +If a global active adversary were also involved, it can partition the +network in such a way that it may appear that the wrong subset of +validators were responsible for the slowdown. This is not just a +limitation of Tendermint, but rather a limitation of all consensus +protocols whose network is potentially controlled by an active +adversary. + +### Overcoming Forks and Censorship Attacks + +For these types of attacks, a subset of the validators through external +means should coordinate to sign a reorg-proposal that chooses a fork +(and any evidence thereof) and the initial subset of validators with +their signatures. Validators who sign such a reorg-proposal forego its +collateral on all other forks. Clients should verify the signatures on +the reorg-proposal, verify any evidence, and make a judgement or prompt +the end-user for a decision. For example, a phone wallet app may prompt +the user with a security warning, while a refrigerator may accept any +reorg-proposal signed by +1/2 of the original validators. + +No non-synchronous Byzantine fault-tolerant algorithm can come to +consensus when 1/3+ of validators are dishonest, yet a fork assumes that +1/3+ of validators have already been dishonest by double-signing or +lock-changing without justification. So, signing the reorg-proposal is a +coordination problem that cannot be solved by any non-synchronous +protocol (i.e. automatically, and without making assumptions about the +reliability of the underlying network). It must be provided by means +external to the weakly-synchronous Tendermint consensus algorithm. For +now, we leave the problem of reorg-proposal coordination to human +coordination via internet media. Validators must take care to ensure +that there are no significant network partitions, to avoid situations +where two conflicting reorg-proposals are signed. + +Assuming that the external coordination medium and protocol is robust, +it follows that forks are less of a concern than [censorship +attacks](#censorship-attacks). + +### Canonical vs subjective commit + +We distinguish between "canonical" and "subjective" commits. A subjective commit is what +each validator sees locally when they decide to commit a block. The canonical commit is +what is included by the proposer of the next block in the `LastCommit` field of +the block. This is what makes it canonical and ensures every validator agrees on the canonical commit, +even if it is different from the +2/3 votes a validator has seen, which caused the validator to +commit the respective block. Each block contains a canonical +2/3 commit for the previous +block. diff --git a/spec/consensus/creating-proposal.md b/spec/consensus/creating-proposal.md new file mode 100644 index 0000000000..cb43c8ebb4 --- /dev/null +++ b/spec/consensus/creating-proposal.md @@ -0,0 +1,43 @@ +--- +order: 2 +--- +# Creating a proposal + +A block consists of a header, transactions, votes (the commit), +and a list of evidence of malfeasance (ie. signing conflicting votes). + +We include no more than 1/10th of the maximum block size +(`ConsensusParams.Block.MaxBytes`) of evidence with each block. + +## Reaping transactions from the mempool + +When we reap transactions from the mempool, we calculate maximum data +size by subtracting maximum header size (`MaxHeaderBytes`), the maximum +amino overhead for a block (`MaxAminoOverheadForBlock`), the size of +the last commit (if present) and evidence (if present). While reaping +we account for amino overhead for each transaction. + +```go +func MaxDataBytes(maxBytes int64, valsCount, evidenceCount int) int64 { + return maxBytes - + MaxOverheadForBlock - + MaxHeaderBytes - + int64(valsCount)*MaxVoteBytes - + int64(evidenceCount)*MaxEvidenceBytes +} +``` + +## Validating transactions in the mempool + +Before we accept a transaction in the mempool, we check if it's size is no more +than {MaxDataSize}. {MaxDataSize} is calculated using the same formula as +above, except we subtract the max number of evidence, {MaxNum} by the maximum size of evidence + +```go +func MaxDataBytesUnknownEvidence(maxBytes int64, valsCount int) int64 { + return maxBytes - + MaxOverheadForBlock - + MaxHeaderBytes - + (maxNumEvidence * MaxEvidenceBytes) +} +``` diff --git a/spec/consensus/evidence.md b/spec/consensus/evidence.md new file mode 100644 index 0000000000..edf9e53ffa --- /dev/null +++ b/spec/consensus/evidence.md @@ -0,0 +1,199 @@ +# Evidence + +Evidence is an important component of Tendermint's security model. Whilst the core +consensus protocol provides correctness guarantees for state machine replication +that can tolerate less than 1/3 failures, the evidence system looks to detect and +gossip byzantine faults whose combined power is greater than or equal to 1/3. It is worth noting that +the evidence system is designed purely to detect possible attacks, gossip them, +commit them on chain and inform the application running on top of Tendermint. +Evidence in itself does not punish "bad actors", this is left to the discretion +of the application. A common form of punishment is slashing where the validators +that were caught violating the protocol have all or a portion of their voting +power removed. Evidence, given the assumption that 1/3+ of the network is still +byzantine, is susceptible to censorship and should therefore be considered added +security on a "best effort" basis. + +This document walks through the various forms of evidence, how they are detected, +gossiped, verified and committed. + +> NOTE: Evidence here is internal to tendermint and should not be confused with +> application evidence + +## Detection + +### Equivocation + +Equivocation is the most fundamental of byzantine faults. Simply put, to prevent +replication of state across all nodes, a validator tries to convince some subset +of nodes to commit one block whilst convincing another subset to commit a +different block. This is achieved by double voting (hence +`DuplicateVoteEvidence`). A successful duplicate vote attack requires greater +than 1/3 voting power and a (temporary) network partition between the aforementioned +subsets. This is because in consensus, votes are gossiped around. When a node +observes two conflicting votes from the same peer, it will use the two votes of +evidence and begin gossiping this evidence to other nodes. [Verification](#duplicatevoteevidence) is addressed further down. + +```go +type DuplicateVoteEvidence struct { + VoteA Vote + VoteB Vote + + // and abci specific fields +} +``` + +### Light Client Attacks + +Light clients also comply with the 1/3+ security model, however, by using a +different, more lightweight verification method they are subject to a +different kind of 1/3+ attack whereby the byzantine validators could sign an +alternative light block that the light client will think is valid. Detection, +explained in greater detail +[here](../light-client/detection/detection_003_reviewed.md), involves comparison +with multiple other nodes in the hope that at least one is "honest". An "honest" +node will return a challenging light block for the light client to validate. If +this challenging light block also meets the +[validation criteria](../light-client/verification/verification_001_published.md) +then the light client sends the "forged" light block to the node. +[Verification](#lightclientattackevidence) is addressed further down. + +```go +type LightClientAttackEvidence struct { + ConflictingBlock LightBlock + CommonHeight int64 + + // and abci specific fields +} +``` + +## Verification + +If a node receives evidence, it will first try to verify it, then persist it. +Evidence of byzantine behavior should only be committed once (uniqueness) and +should be committed within a certain period from the point that it occurred +(timely). Timelines is defined by the `EvidenceParams`: `MaxAgeNumBlocks` and +`MaxAgeDuration`. In Proof of Stake chains where validators are bonded, evidence +age should be less than the unbonding period so validators still can be +punished. Given these two propoerties the following initial checks are made. + +1. Has the evidence expired? This is done by taking the height of the `Vote` + within `DuplicateVoteEvidence` or `CommonHeight` within + `LightClientAttakEvidence`. The evidence height is then used to retrieve the + header and thus the time of the block that corresponds to the evidence. If + `CurrentHeight - MaxAgeNumBlocks > EvidenceHeight` && `CurrentTime - + MaxAgeDuration > EvidenceTime`, the evidence is considered expired and + ignored. + +2. Has the evidence already been committed? The evidence pool tracks the hash of + all committed evidence and uses this to determine uniqueness. If a new + evidence has the same hash as a committed one, the new evidence will be + ignored. + +### DuplicateVoteEvidence + +Valid `DuplicateVoteEvidence` must adhere to the following rules: + +- Validator Address, Height, Round and Type must be the same for both votes + +- BlockID must be different for both votes (BlockID can be for a nil block) + +- Validator must have been in the validator set at that height + +- Vote signature must be correctly signed. This also uses `ChainID` so we know + that the fault occurred on this chain + +### LightClientAttackEvidence + +Valid Light Client Attack Evidence must adhere to the following rules: + +- If the header of the light block is invalid, thus indicating a lunatic attack, + the node must check that they can use `verifySkipping` from their header at + the common height to the conflicting header + +- If the header is valid, then the validator sets are the same and this is + either a form of equivocation or amnesia. We therefore check that 2/3 of the + validator set also signed the conflicting header. + +- The nodes own header at the same height as the conflicting header must have a + different hash to the conflicting header. + +- If the nodes latest header is less in height to the conflicting header, then + the node must check that the conflicting block has a time that is less than + this latest header (This is a forward lunatic attack). + +## Gossiping + +If a node verifies evidence it then broadcasts it to all peers, continously sending +the same evidence once every 10 seconds until the evidence is seen on chain or +expires. + +## Commiting on Chain + +Evidence takes strict priority over regular transactions, thus a block is filled +with evidence first and transactions take up the remainder of the space. To +mitigate the threat of an already punished node from spamming the network with +more evidence, the size of the evidence in a block can be capped by +`EvidenceParams.MaxBytes`. Nodes receiving blocks with evidence will validate +the evidence before sending `Prevote` and `Precommit` votes. The evidence pool +will usually cache verifications so that this process is much quicker. + +## Sending Evidence to the Application + +After evidence is committed, the block is then processed by the block executor +which delivers the evidence to the application via `EndBlock`. Evidence is +stripped of the actual proof, split up per faulty validator and only the +validator, height, time and evidence type is sent. + +```proto +enum EvidenceType { + UNKNOWN = 0; + DUPLICATE_VOTE = 1; + LIGHT_CLIENT_ATTACK = 2; +} + +message Evidence { + EvidenceType type = 1; + // The offending validator + Validator validator = 2 [(gogoproto.nullable) = false]; + // The height when the offense occurred + int64 height = 3; + // The corresponding time where the offense occurred + google.protobuf.Timestamp time = 4 [ + (gogoproto.nullable) = false, (gogoproto.stdtime) = true]; + // Total voting power of the validator set in case the ABCI application does + // not store historical validators. + // https://github.com/tendermint/tendermint/issues/4581 + int64 total_voting_power = 5; +} +``` + +`DuplicateVoteEvidence` and `LightClientAttackEvidence` are self-contained in +the sense that the evidence can be used to derive the `abci.Evidence` that is +sent to the application. Because of this, extra fields are necessary: + +```go +type DuplicateVoteEvidence struct { + VoteA *Vote + VoteB *Vote + + // abci specific information + TotalVotingPower int64 + ValidatorPower int64 + Timestamp time.Time +} + +type LightClientAttackEvidence struct { + ConflictingBlock *LightBlock + CommonHeight int64 + + // abci specific information + ByzantineValidators []*Validator + TotalVotingPower int64 + Timestamp time.Time +} +``` + +These ABCI specific fields don't affect validity of the evidence itself but must +be consistent amongst nodes and agreed upon on chain. If evidence with the +incorrect abci information is sent, a node will create new evidence from it and +replace the ABCI fields with the correct information. diff --git a/spec/consensus/light-client/README.md b/spec/consensus/light-client/README.md new file mode 100644 index 0000000000..44b9e0c762 --- /dev/null +++ b/spec/consensus/light-client/README.md @@ -0,0 +1,9 @@ +--- +order: 1 +parent: + title: Light Client + order: false +--- +# Tendermint Light Client Protocol + +Deprecated, please see [light-client](../../light-client/README.md). diff --git a/spec/consensus/light-client/accountability.md b/spec/consensus/light-client/accountability.md new file mode 100644 index 0000000000..6684021d63 --- /dev/null +++ b/spec/consensus/light-client/accountability.md @@ -0,0 +1,3 @@ +# Fork accountability + +Deprecated, please see [light-client/accountability](../../light-client/accountability/README.md). diff --git a/spec/consensus/light-client/assets/light-node-image.png b/spec/consensus/light-client/assets/light-node-image.png new file mode 100644 index 0000000000..f0b93c6e41 Binary files /dev/null and b/spec/consensus/light-client/assets/light-node-image.png differ diff --git a/spec/consensus/light-client/detection.md b/spec/consensus/light-client/detection.md new file mode 100644 index 0000000000..484f6094b6 --- /dev/null +++ b/spec/consensus/light-client/detection.md @@ -0,0 +1,3 @@ +# Detection + +Deprecated, please see [light-client/detection](../../light-client/detection/README.md). diff --git a/spec/consensus/light-client/verification.md b/spec/consensus/light-client/verification.md new file mode 100644 index 0000000000..7f3ab47184 --- /dev/null +++ b/spec/consensus/light-client/verification.md @@ -0,0 +1,3 @@ +# Core Verification + +Deprecated, please see [light-client/accountability](../../light-client/verification/README.md). diff --git a/spec/consensus/proposer-based-timestamp/README.md b/spec/consensus/proposer-based-timestamp/README.md new file mode 100644 index 0000000000..8e3acf9d6e --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/README.md @@ -0,0 +1,157 @@ +# Proposer-Based Timestamps (PBTS) + +This section describes a version of the Tendermint consensus protocol +that uses proposer-based timestamps. + +## Context + +Tendermint provides a deterministic, Byzantine fault-tolerant, source of time, +defined by the `Time` field present in the headers of committed blocks. + +In the current consensus implementation, the timestamp of a block is +computed by the [`BFTTime`][bfttime] algorithm: + +- Validators include a timestamp in the `Precommit` messages they broadcast. +Timestamps are retrieved from the validators' local clocks, +with the only restriction that they must be **monotonic**: + + - The timestamp of a `Precommit` message voting for a block + cannot be earlier than the `Time` field of that block; + +- The timestamp of a block is deterministically computed from the timestamps of +a set of `Precommit` messages that certify the commit of the previous block. +This certificate, a set of `Precommit` messages from a round of the previous height, +is selected by the block's proposer and stored in the `Commit` field of the block: + + - The block timestamp is the *median* of the timestamps of the `Precommit` messages + included in the `Commit` field, weighted by their voting power. + Block timestamps are **monotonic** because + timestamps of valid `Precommit` messages are monotonic; + +Assuming that the voting power controlled by Byzantine validators is bounded by `f`, +the cumulative voting power of any valid `Commit` set must be at least `2f+1`. +As a result, the timestamp computed by `BFTTime` is not influenced by Byzantine validators, +as the weighted median of `Commit` timestamps comes from the clock of a non-faulty validator. + +Tendermint does not make any assumptions regarding the clocks of (correct) validators, +as block timestamps have no impact in the consensus protocol. +However, the `Time` field of committed blocks is used by other components of Tendermint, +such as IBC, the evidence, staking, and slashing modules. +And it is used based on the common belief that block timestamps +should bear some resemblance to real time, which is **not guaranteed**. + +A more comprehensive discussion of the limitations of `BFTTime` +can be found in the [first draft][main_v1] of this proposal. +Of particular interest is to possibility of having validators equipped with "faulty" clocks, +not fairly accurate with real time, that control more than `f` voting power, +plus the proposer's flexibility when selecting a `Commit` set, +and thus determining the timestamp for a block. + +## Proposal + +In the proposed solution, the timestamp of a block is assigned by its +proposer, according with its local clock. +In other words, the proposer of a block also *proposes* a timestamp for the block. +Validators can accept or reject a proposed block. +A block is only accepted if its timestamp is acceptable. +A proposed timestamp is acceptable if it is *received* within a certain time window, +determined by synchronous parameters. + +PBTS therefore augments the system model considered by Tendermint with *synchronous assumptions*: + +- **Synchronized clocks**: simultaneous clock reads at any two correct validators +differ by at most `PRECISION`; + +- **Bounded message delays**: the end-to-end delay for delivering a message to all correct validators +is bounded by `MSGDELAY`. +This assumption is restricted to `Proposal` messages, broadcast by proposers. + +`PRECISION` and `MSGDELAY` are consensus parameters, shared by all validators, +that define whether the timestamp of a block is acceptable. +Let `t` be the time, read from its local clock, at which a validator +receives, for the first time, a proposal with timestamp `ts`: + +- **[Time-Validity]** The proposed timestamp `ts` received at local time `t` +is accepted if it satisfies the **timely** predicate: + > `ts - PRECISION <= t <= ts + MSGDELAY + PRECISION` + +The left inequality of the *timely* predicate establishes that proposed timestamps +should be in the past, when adjusted by the clocks `PRECISION`. +The right inequality of the *timely* predicate establishes that proposed timestamps +should not be too much in the past, more precisely, not more than `MSGDELAY` in the past, +when adjusted by the clocks `PRECISION`. + +A more detailed and formalized description is available in the +[System Model and Properties][sysmodel] document + +## Implementation + +The implementation of PBTS requires some changes in Tendermint consensus algorithm, +summarized below: + +- A proposer timestamps a block with the current time, read from its local clock. +The block's timestamp represents the time at which it was assembled +(after the `getValue()` call in line 18 of the [arXiv][arXiv] algorithm): + + - Block timestamps are definitive, meaning that the original timestamp + is retained when a block is re-proposed (line 16); + + - To preserve monotonicity, a proposer might need to wait until its clock + reads a time greater than the timestamp of the previous block; + +- A validator only prevotes for *timely* blocks, +that is, blocks whose timestamps are considered *timely* (compared to the original Tendermint consensus, a check is added to line 23). +If the block proposed in a round is considered *untimely*, +the validator prevotes `nil` (line 26): + + - Validators register the time at which they received `Proposal` messages, + in order to evaluate the *timely* predicate; + + - Blocks that are re-proposed because they received `2f+1 Prevotes` + in a previous round (line 28) are not subject to the *timely* predicate, + as they have already been evaluated as *timely* at a previous round. + +The more complex change proposed regards blocks that can be re-proposed in multiple rounds. +The current solution improves the [first version of the specification][algorithm_v1] (that never had been implemented) +by simplifying the way this situation is handled, +from a recursive reasoning regarding valid blocks that are re-proposed. + +The full solution is detailed and formalized in the [Protocol Specification][algorithm] document. + +## Further details + +- [System Model and Properties][sysmodel] +- [Protocol Specification][algorithm] +- [TLA+ Specification][proposertla] (first draft, not updated) + +### Open issues + +- [PBTS: evidence #355][issue355]: not really clear the context, probably not going to be solved. +- [PBTS: should synchrony parameters be adaptive? #371][issue371] +- [PBTS: Treat proposal and block parts explicitly in the spec #372][issue372] +- [PBTS: margins for proposal times assigned by Byzantine proposers #377][issue377] + +### Closed issues + +- [Proposer time - fix message filter condition #353][issue353] +- [PBTS: association between timely predicate and timeout_commit #370][issue370] + +[main_v1]: ./v1/pbts_001_draft.md + +[algorithm]: ./pbts-algorithm_002_draft.md +[algorithm_v1]: ./v1/pbts-algorithm_001_draft.md + +[sysmodel]: ./pbts-sysmodel_002_draft.md +[sysmodel_v1]: ./v1/pbts-sysmodel_001_draft.md + +[proposertla]: ./tla/TendermintPBT_001_draft.tla + +[bfttime]: https://github.com/tendermint/tendermint/blob/master/spec/consensus/bft-time.md +[arXiv]: https://arxiv.org/pdf/1807.04938.pdf + +[issue353]: https://github.com/tendermint/spec/issues/353 +[issue355]: https://github.com/tendermint/spec/issues/355 +[issue370]: https://github.com/tendermint/spec/issues/370 +[issue371]: https://github.com/tendermint/spec/issues/371 +[issue372]: https://github.com/tendermint/spec/issues/372 +[issue377]: https://github.com/tendermint/spec/issues/377 diff --git a/spec/consensus/proposer-based-timestamp/pbts-algorithm_002_draft.md b/spec/consensus/proposer-based-timestamp/pbts-algorithm_002_draft.md new file mode 100644 index 0000000000..f2ec140363 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/pbts-algorithm_002_draft.md @@ -0,0 +1,148 @@ +# PBTS: Protocol Specification + +## Proposal Time + +PBTS computes for a proposed value `v` the proposal time `v.time`, with bounded difference to the actual real-time the proposed value was generated. +The proposal time is read from the clock of the process that proposes a value for the first time, its original proposer. + +With PBTS, therefore, we assume that processes have access to **synchronized clocks**. +The proper definition of what it means can be found in the [system model][sysmodel], +but essentially we assume that two correct processes do not simultaneous read from their clocks +time values that differ more than `PRECISION`, which is a system parameter. + +### Proposal times are definitive + +When a value `v` is produced by a process, it also assigns the associated proposal time `v.time`. +If the same value `v` is then re-proposed in a subsequent round of consensus, +it retains its original time, assigned by its original proposer. + +A value `v` should re-proposed when it becomes locked by the network, i.e., when it receives `2f + 1 PREVOTES` in a round `r` of consensus. +This means that processes with `2f + 1`-equivalent voting power accepted, in round `r`, both `v` and its associated time `v.time`. +Since the originally proposed value and its associated time were considered valid, there is no reason for reassigning `v.time`. + +In the [first version][algorithm_v1] of this specification, proposals were defined as pairs `(v, time)`. +In addition, the same value `v` could be proposed, in different rounds, but would be associated to distinct times each time it was reproposed. +Since this possibility does not exist in this second specification, the proposal time became part of the proposed value. +With this simplification, several small changes to the [arXiv][arXiv] algorithm are no longer required. + +## Time Monotonicity + +Values decided in successive heights of consensus must have increasing times, so: + +- Monotonicity: for any process `p` and any two decided heights `h` and `h'`, if `h > h'` then `decision_p[h].time > decision_p[h'].time`. + +For ensuring time monotonicity, it is enough to ensure that a value `v` proposed by process `p` at height `h_p` has `v.time > decision_p[h_p-1].time`. +So, if process `p` is the proposer of a round of height `h_p` and reads from its clock a time `now_p <= decision_p[h_p-1]`, +it should postpone the generation of its proposal until `now_p > decision_p[h_p-1]`. + +> Although it should be considered, this scenario is unlikely during regular operation, +as from `decision_p[h_p-1].time` and the start of height `h_p`, a complete consensus instance need to terminate. + +Notice that monotonicity is not introduced by this proposal, being already ensured by [`BFTTime`][bfttime]. +In `BFTTime`, the `Timestamp` field of every `Precommit` message of height `h_p` sent by a correct process is required to be larger than `decision_p[h_p-1].time`, as one of such `Timestamp` fields becomes the time assigned to a value proposed at height `h_p`. + +The time monotonicity of values proposed in heights of consensus is verified by the `valid()` predicate, to which every proposed value is submitted. +A value rejected by the `valid()` implementation is not accepted by any correct process. + +## Timely Proposals + +PBTS introduces a new requirement for a process to accept a proposal: the proposal must be `timely`. +It is a temporal requirement, associated with the following synchrony (that is, timing) +[assumptions][sysmodel] regarding the behavior of processes and the network: + +- Synchronized clocks: the values simultaneously read from clocks of any two correct processes differ by at most `PRECISION`; +- Bounded transmission delays: the real time interval between the sending of a proposal at a correct process, and the reception of the proposal at any correct process is upper bounded by `MSGDELAY`. + +#### **[PBTS-RECEPTION-STEP.1]** + +Let `now_p` be the time, read from the clock of process `p`, at which `p` receives the proposed value `v`. +The proposal is considered `timely` by `p` when: + +1. `now_p >= v.time - PRECISION` +1. `now_p <= v.time + MSGDELAY + PRECISION` + +The first condition derives from the fact that the generation and sending of `v` precedes its reception. +The minimum receiving time `now_p` for `v` be considered `timely` by `p` is derived from the extreme scenario when +the clock of `p` is `PRECISION` *behind* of the clock of the proposer of `v`, and the proposal's transmission delay is `0` (minimum). + +The second condition derives from the assumption of an upper bound for the transmission delay of a proposal. +The maximum receiving time `now_p` for `v` be considered `timely` by `p` is derived from the extreme scenario when +the clock of `p` is `PRECISION` *ahead* of the clock of the proposer of `v`, and the proposal's transmission delay is `MSGDELAY` (maximum). + +## Updated Consensus Algorithm + +The following changes are proposed for the algorithm in the [arXiv paper][arXiv]. + +#### New `StartRound` + +There are two additions to the `propose` round step when executed by the `proposer` of a round: + +1. to ensure time monotonicity, the proposer does not propose a value until its current local time becomes greater than the previously decided value's time +1. when the proposer produce a new proposal it sets the proposal's time to its current local time + - no changes are made to the logic when a proposer has a non-nil `validValue`, which retains its original proposal time. + +#### **[PBTS-ALG-STARTROUND.1]** + +```go +function StartRound(round) { + round_p ← round + step_p ← propose + if proposer(h_p, round_p) = p { + wait until now_p > decision_p[h_p-1].time // time monotonicity + if validValue_p != nil { + proposal ← validValue_p + } else { + proposal ← getValue() + proposal.time ← now_p // proposal time + } + broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ + } else { + schedule OnTimeoutPropose(h_p,round_p) to be executed after timeoutPropose(round_p) + } +} +``` + +#### New Rule Replacing Lines 22 - 27 + +The rule on line 22 applies to values `v` proposed for the first time, i.e., for proposals not backed by `2f + 1 PREVOTE`s for `v` in a previous round. +The `PROPOSAL` message, in this case, carry `-1` in its `validRound` field. + +The new rule for issuing a `PREVOTE` for a proposed value `v` requires the value to be `timely`. +As the `timely` predicate is evaluated in the moment that the value is received, +as part of a `PROPOSAL` message, we require the `PROPOSAL` message to be `timely`. + +#### **[PBTS-ALG-UPON-PROP.1]** + +```go +upon timely(⟨PROPOSAL, h_p, round_p, v, −1⟩) from proposer(h_p, round_p) while step_p = propose do { + if valid(v) ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { + broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ + } + else { + broadcast ⟨PREVOTE, h_p, round_p, nil⟩ + } + step_p ← prevote +} +``` + +#### Rules at Lines 28 - 33 remain unchanged + +The rule on line 28 applies to values `v` proposed again in the current round because its proposer received `2f + 1 PREVOTE`s for `v` in a previous round `vr`. +This means that there was a round `r <= vr` in which `2f + 1` processes accepted `v` for the first time, and so sent `PREVOTE`s for `v`. +Which, in turn, means that these processes executed the line 22 of the algorithm, and therefore judged `v` as a `timely` proposal. + +In other words, we don't need to verify whether `v` is a timely proposal because at least `f + 1` processes judged `v` as `timely` in a previous round, +and because, since `v` was re-proposed as a `validValue` (line 16), `v.time` has not being updated from its original proposal. + +**All other rules remains unchanged.** + +Back to [main document][main]. + +[main]: ./README.md + +[algorithm_v1]: ./v1/pbts-algorithm_001_draft.md + +[sysmodel]: ./pbts-sysmodel_002_draft.md + +[bfttime]: https://github.com/tendermint/tendermint/blob/master/spec/consensus/bft-time.md +[arXiv]: https://arxiv.org/pdf/1807.04938.pdf diff --git a/spec/consensus/proposer-based-timestamp/pbts-sysmodel_002_draft.md b/spec/consensus/proposer-based-timestamp/pbts-sysmodel_002_draft.md new file mode 100644 index 0000000000..d6fcb54b6e --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/pbts-sysmodel_002_draft.md @@ -0,0 +1,357 @@ +# PBTS: System Model and Properties + +## Outline + + - [System model](#system-model) + - [Synchronized clocks](#synchronized-clocks) + - [Message delays](#message-delays) + - [Problem Statement](#problem-statement) + - [Protocol Analysis - Timely Proposals](#protocol-analysis---timely-proposals) + - [Timely Proof-of-Locks](#timely-proof-of-locks) + - [Derived Proof-of-Locks](#derived-proof-of-locks) + - [Temporal Analysis](#temporal-analysis) + - [Safety](#safety) + - [Liveness](#liveness) + +## System Model + +#### **[PBTS-CLOCK-NEWTON.0]** + +There is a reference Newtonian real-time `t`. + +No process has direct access to this reference time, used only for specification purposes. +The reference real-time is assumed to be aligned with the Coordinated Universal Time (UTC). + +### Synchronized clocks + +Processes are assumed to be equipped with synchronized clocks, +aligned with the Coordinated Universal Time (UTC). + +This requires processes to periodically synchronize their local clocks with an +external and trusted source of the time (e.g. NTP servers). +Each synchronization cycle aligns the process local clock with the external +source of time, making it a *fairly accurate* source of real time. +The periodic (re)synchronization aims to correct the *drift* of local clocks, +which tend to pace slightly faster or slower than the real time. + +To avoid an excessive level detail in the parameters and guarantees of +synchronized clocks, we adopt a single system parameter `PRECISION` to +encapsulate the potential inaccuracy of the synchronization mechanisms, +and drifts of local clocks from real time. + +#### **[PBTS-CLOCK-PRECISION.0]** + +There exists a system parameter `PRECISION`, such that +for any two processes `p` and `q`, with local clocks `C_p` and `C_q`: + +- If `p` and `q` are equipped with synchronized clocks, + then for any real-time `t` we have `|C_p(t) - C_q(t)| <= PRECISION`. + +`PRECISION` thus bounds the difference on the times simultaneously read by processes +from their local clocks, so that their clocks can be considered synchronized. + +#### Accuracy + +A second relevant clock parameter is accuracy, which binds the values read by +processes from their clocks to real time. + +##### **[PBTS-CLOCK-ACCURACY.0]** + +For the sake of completeness, we define a parameter `ACCURACY` such that: + +- At real time `t` there is at least one correct process `p` which clock marks + `C_p(t)` with `|C_p(t) - t| <= ACCURACY`. + +As a consequence, applying the definition of `PRECISION`, we have: + +- At real time `t` the synchronized clock of any correct process `p` marks + `C_p(t)` with `|C_p(t) - t| <= ACCURACY + PRECISION`. + +The reason for not adopting `ACCURACY` as a system parameter is the assumption +that `PRECISION >> ACCURACY`. +This allows us to consider, for practical purposes, that the `PRECISION` system +parameter embodies the `ACCURACY` model parameter. + +### Message Delays + +The assumption that processes have access to synchronized clocks ensures that proposal times +assigned by *correct processes* have a bounded relation with the real time. +It is not enough, however, to identify (and reject) proposal times proposed by Byzantine processes. + +To properly evaluate whether the time assigned to a proposal is consistent with the real time, +we need some information regarding the time it takes for a message carrying a proposal +to reach all its (correct) destinations. +More precisely, the *maximum delay* for delivering a proposal to its destinations allows +defining a lower bound, a *minimum time* that a correct process assigns to proposal. +While *minimum delay* for delivering a proposal to a destination allows defining +an upper bound, the *maximum time* assigned to a proposal. + +#### **[PBTS-MSG-DELAY.0]** + +There exists a system parameter `MSGDELAY` for end-to-end delays of proposal messages, +such for any two correct processes `p` and `q`: + +- If `p` sends a proposal message `m` at real time `t` and `q` receives `m` at + real time `t'`, then `t <= t' <= t + MSGDELAY`. + +Notice that, as a system parameter, `MSGDELAY` should be observed for any +proposal message broadcast by correct processes: it is a *worst-case* parameter. +As message delays depends on the message size, the above requirement implicitly +indicates that the size of proposal messages is either fixed or upper bounded. + +## Problem Statement + +In this section we define the properties of Tendermint consensus +(cf. the [arXiv paper][arXiv]) in this system model. + +### **[PBTS-PROPOSE.0]** + +A proposer proposes a consensus value `v` that includes a proposal time +`v.time`. + +> We then restrict the allowed decisions along the following lines: + +#### **[PBTS-INV-AGREEMENT.0]** + +- [Agreement] No two correct processes decide on different values `v`. + +This implies that no two correct processes decide on different proposal times +`v.time`. + +#### **[PBTS-INV-VALID.0]** + +- [Validity] If a correct process decides on value `v`, then `v` satisfies a + predefined `valid` predicate. + +With respect to PBTS, the `valid` predicate requires proposal times to be +[monotonic](./pbts-algorithm_002_draft.md#time-monotonicity) over heights of +consensus: + +##### **[PBTS-INV-MONOTONICITY.0]** + +- If a correct process decides on value `v` at the height `h` of consensus, + thus setting `decision[h] = v`, then `v.time > decision[h'].time` for all + previous heights `h' < h`. + +The monotonicity of proposal times, and external validity in general, +implicitly assumes that heights of consensus are executed in order. + +#### **[PBTS-INV-TIMELY.0]** + +- [Time-Validity] If a correct process decides on value `v`, then the proposal + time `v.time` was considered `timely` by at least one correct process. + +PBTS introduces a `timely` predicate that restricts the allowed decisions based +on the proposal time `v.time` associated with a proposed value `v`. +As a synchronous predicate, the time at which it is evaluated impacts on +whether a process accepts or reject a proposal time. +For this reason, the Time-Validity property refers to the previous evaluation +of the `timely` predicate, detailed in the following section. + +## Protocol Analysis - Timely proposals + +For PBTS, a `proposal` is a tuple `(v, v.time, v.round)`, where: + +- `v` is the proposed value; +- `v.time` is the associated proposal time; +- `v.round` is the round at which `v` was first proposed. + +We include the proposal round `v.round` in the proposal definition because a +value `v` and its associated proposal time `v.time` can be proposed in multiple +rounds, but the evaluation of the `timely` predicate is only relevant at round +`v.round`. + +> Considering the algorithm in the [arXiv paper][arXiv], a new proposal is +> produced by the `getValue()` method, invoked by the proposer `p` of round +> `round_p` when starting its proposing round with a nil `validValue_p`. +> The first round at which a value `v` is proposed is then the round at which +> the proposal for `v` was produced, and broadcast in a `PROPOSAL` message with +> `vr = -1`. + +#### **[PBTS-PROPOSAL-RECEPTION.0]** + +The `timely` predicate is evaluated when a process receives a proposal. +More precisely, let `p` be a correct process: + +- `proposalReceptionTime(p,r)` is the time `p` reads from its local clock when + `p` is at round `r` and receives the proposal of round `r`. + +#### **[PBTS-TIMELY.0]** + +The proposal `(v, v.time, v.round)` is considered `timely` by a correct process +`p` if: + +1. `proposalReceptionTime(p,v.round)` is set, and +1. `proposalReceptionTime(p,v.round) >= v.time - PRECISION`, and +1. `proposalReceptionTime(p,v.round) <= v.time + MSGDELAY + PRECISION`. + +A correct process at round `v.round` only sends a `PREVOTE` for `v` if the +associated proposal time `v.time` is considered `timely`. + +> Considering the algorithm in the [arXiv paper][arXiv], the `timely` predicate +> is evaluated by a process `p` when it receives a valid `PROPOSAL` message +> from the proposer of the current round `round_p` with `vr = -1`. + +### Timely Proof-of-Locks + +A *Proof-of-Lock* is a set of `PREVOTE` messages of round of consensus for the +same value from processes whose cumulative voting power is at least `2f + 1`. +We denote as `POL(v,r)` a proof-of-lock of value `v` at round `r`. + +For PBTS, we are particularly interested in the `POL(v,v.round)` produced in +the round `v.round` at which a value `v` was first proposed. +We call it a *timely* proof-of-lock for `v` because it can only be observed +if at least one correct process considered it `timely`: + +#### **[PBTS-TIMELY-POL.0]** + +If + +- there is a valid `POL(v,r)` with `r = v.round`, and +- `POL(v,v.round)` contains a `PREVOTE` message from at least one correct process, + +Then, let `p` is a such correct process: + +- `p` received a `PROPOSAL` message of round `v.round`, and +- the `PROPOSAL` message contained a proposal `(v, v.time, v.round)`, and +- `p` was in round `v.round` and evaluated the proposal time `v.time` as `timely`. + +The existence of a such correct process `p` is guaranteed provided that the +voting power of Byzantine processes is bounded by `2f`. + +### Derived Proof-of-Locks + +The existence of `POL(v,r)` is a requirement for the decision of `v` at round +`r` of consensus. + +At the same time, the Time-Validity property establishes that if `v` is decided +then a timely proof-of-lock `POL(v,v.round)` must have been produced. + +So, we need to demonstrate here that any valid `POL(v,r)` is either a timely +proof-of-lock or it is derived from a timely proof-of-lock: + +#### **[PBTS-DERIVED-POL.0]** + +If + +- there is a valid `POL(v,r)`, and +- `POL(v,r)` contains a `PREVOTE` message from at least one correct process, + +Then + +- there is a valid `POL(v,v.round)` with `v.round <= r` which is a timely proof-of-lock. + +The above relation is trivially observed when `r = v.round`, as `POL(v,r)` must +be a timely proof-of-lock. +Notice that we cannot have `r < v.round`, as `v.round` is defined as the first +round at which `v` was proposed. + +For `r > v.round` we need to demonstrate that if there is a valid `POL(v,r)`, +then a timely `POL(v,v.round)` was previously obtained. +We observe that a condition for observing a `POL(v,r)` is that the proposer of +round `r` has broadcast a `PROPOSAL` message for `v`. +As `r > v.round`, we can affirm that `v` was not produced in round `r`. +Instead, by the protocol operation, `v` was a *valid value* for the proposer of +round `r`, which means that if the proposer has observed a `POL(v,vr)` with `vr +< r`. +The above operation considers a *correct* proposer, but since a `POL(v,r)` was +produced (by hypothesis) we can affirm that at least one correct process (also) +observed a `POL(v,vr)`. + +> Considering the algorithm in the [arXiv paper][arXiv], `v` was proposed by +> the proposer `p` of round `round_p` because its `validValue_p` variable was +> set to `v`. +> The `PROPOSAL` message broadcast by the proposer, in this case, had `vr > -1`, +> and it could only be accepted by processes that also observed a `POL(v,vr)`. + +Thus, if there is a `POL(v,r)` with `r > v.round`, then there is a valid +`POL(v,vr)` with `v.round <= vr < r`. +If `vr = v.round` then `POL(vr,v)` is a timely proof-of-lock and we are done. +Otherwise, there is another valid `POL(v,vr')` with `v.round <= vr' < vr`, +and the above reasoning can be recursively applied until we get `vr' = v.round` +and observe a timely proof-of-lock. + +## Temporal analysis + +In this section we present invariants that need be observed for ensuring that +PBTS is both safe and live. + +In addition to the variables and system parameters already defined, we use +`beginRound(p,r)` as the value of process `p`'s local clock +when it starts round `r` of consensus. + +### Safety + +The safety of PBTS requires that if a value `v` is decided, then at least one +correct process `p` considered the associated proposal time `v.time` timely. +Following the definition of [timely proposals](#pbts-timely0) and +proof-of-locks, we require this condition to be asserted at a specific round of +consensus, defined as `v.round`: + +#### **[PBTS-SAFETY.0]** + +If + +- there is a valid commit `C` for a value `v` +- `C` contains a `PRECOMMIT` message from at least one correct process + +then there is a correct process `p` (not necessarily the same above considered) such that: + +- `beginRound(p,v.round) <= proposalReceptionTime(p,v.round) <= beginRound(p,v.round+1)` and +- `proposalReceptionTime (p,v.round) - MSGDELAY - PRECISION <= v.time <= proposalReceptionTime(p,v.round) + PRECISION` + +That is, a correct process `p` started round `v.round` and, while still at +round `v.round`, received a `PROPOSAL` message from round `v.round` proposing +`v`. +Moreover, the reception time of the original proposal for `v`, according with +`p`'s local clock, enabled `p` to consider the proposal time `v.time` as +`timely`. +This is the requirement established by PBTS for issuing a `PREVOTE` for the +proposal `(v, v.time, v.round)`, so for the eventual decision of `v`. + +### Liveness + +The liveness of PBTS relies on correct processes accepting proposal times +assigned by correct proposers. +We thus present a set of conditions for assigning a proposal time `v.time` so +that every correct process should be able to issue a `PREVOTE` for `v`. + +#### **[PBTS-LIVENESS.0]** + +If + +- the proposer of a round `r` of consensus is correct +- and it proposes a value `v` for the first time, with associated proposal time `v.time` + +then the proposal `(v, v.time, r)` is accepted by every correct process provided that: + +- `min{p is correct : beginRound(p,r)} <= v.time <= max{p is correct : beginRound(p,r)}` and +- `max{p is correct : beginRound(p,r)} <= v.time + MSGDELAY + PRECISION <= min{p is correct : beginRound(p,r+1)}` + +The first condition establishes a range of safe proposal times `v.time` for round `r`. +This condition is trivially observed if a correct proposer `p` sets `v.time` to the time it +reads from its clock when starting round `r` and proposing `v`. +A `PROPOSAL` message sent by `p` at local time `v.time` should not be received +by any correct process before its local clock reads `v.time - PRECISION`, so +that condition 2 of [PBTS-TIMELY.0] is observed. + +The second condition establishes that every correct process should start round +`v.round` at a local time that allows `v.time` to still be considered timely, +according to condition 3. of [PBTS-TIMELY.0]. +In addition, it requires correct processes to stay long enough in round +`v.round` so that they can receive the `PROPOSAL` message of round `v.round`. +It assumed here that the proposer of `v` broadcasts a `PROPOSAL` message at +time `v.time`, according to its local clock, so that every correct process +should receive this message by time `v.time + MSGDELAY + PRECISION`, according +to their local clocks. + +Back to [main document][main]. + +[main]: ./README.md + +[algorithm]: ./pbts-algorithm_002_draft.md + +[sysmodel]: ./pbts-sysmodel_002_draft.md +[sysmodel_v1]: ./v1/pbts-sysmodel_001_draft.md + +[arXiv]: https://arxiv.org/pdf/1807.04938.pdf diff --git a/spec/consensus/proposer-based-timestamp/tla/Apalache.tla b/spec/consensus/proposer-based-timestamp/tla/Apalache.tla new file mode 100644 index 0000000000..044bff666f --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/Apalache.tla @@ -0,0 +1,109 @@ +--------------------------- MODULE Apalache ----------------------------------- +(* + * This is a standard module for use with the Apalache model checker. + * The meaning of the operators is explained in the comments. + * Many of the operators serve as additional annotations of their arguments. + * As we like to preserve compatibility with TLC and TLAPS, we define the + * operator bodies by erasure. The actual interpretation of the operators is + * encoded inside Apalache. For the moment, these operators are mirrored in + * the class at.forsyte.apalache.tla.lir.oper.ApalacheOper. + * + * Igor Konnov, Jure Kukovec, Informal Systems 2020-2021 + *) + +(** + * An assignment of an expression e to a state variable x. Typically, one + * uses the non-primed version of x in the initializing predicate Init and + * the primed version of x (that is, x') in the transition predicate Next. + * Although TLA+ does not have a concept of a variable assignment, we find + * this concept extremely useful for symbolic model checking. In pure TLA+, + * one would simply write x = e, or x \in {e}. + * + * Apalache automatically converts some expressions of the form + * x = e or x \in {e} into assignments. However, if you like to annotate + * assignments by hand, you can use this operator. + * + * For a further discussion on that matter, see: + * https://github.com/informalsystems/apalache/blob/ik/idiomatic-tla/docs/idiomatic/assignments.md + *) +x := e == x = e + +(** + * A generator of a data structure. Given a positive integer `bound`, and + * assuming that the type of the operator application is known, we + * recursively generate a TLA+ data structure as a tree, whose width is + * bound by the number `bound`. + * + * The body of this operator is redefined by Apalache. + *) +Gen(size) == {} + +(** + * Convert a set of pairs S to a function F. Note that if S contains at least + * two pairs <> and <> such that x = u and y /= v, + * then F is not uniquely defined. We use CHOOSE to resolve this ambiguity. + * Apalache implements a more efficient encoding of this operator + * than the default one. + * + * @type: Set(<>) => (a -> b); + *) +SetAsFun(S) == + LET Dom == { x: <> \in S } + Rng == { y: <> \in S } + IN + [ x \in Dom |-> CHOOSE y \in Rng: <> \in S ] + +(** + * As TLA+ is untyped, one can use function- and sequence-specific operators + * interchangeably. However, to maintain correctness w.r.t. our type-system, + * an explicit cast is needed when using functions as sequences. + *) +LOCAL INSTANCE Sequences +FunAsSeq(fn, maxSeqLen) == SubSeq(fn, 1, maxSeqLen) + +(** + * Annotating an expression \E x \in S: P as Skolemizable. That is, it can + * be replaced with an expression c \in S /\ P(c) for a fresh constant c. + * Not every exisential can be replaced with a constant, this should be done + * with care. Apalache detects Skolemizable expressions by static analysis. + *) +Skolem(e) == e + +(** + * A hint to the model checker to expand a set S, instead of dealing + * with it symbolically. Apalache finds out which sets have to be expanded + * by static analysis. + *) +Expand(S) == S + +(** + * A hint to the model checker to replace its argument Cardinality(S) >= k + * with a series of existential quantifiers for a constant k. + * Similar to Skolem, this has to be done carefully. Apalache automatically + * places this hint by static analysis. + *) +ConstCardinality(cardExpr) == cardExpr + +(** + * The folding operator, used to implement computation over a set. + * Apalache implements a more efficient encoding than the one below. + * (from the community modules). + *) +RECURSIVE FoldSet(_,_,_) +FoldSet( Op(_,_), v, S ) == IF S = {} + THEN v + ELSE LET w == CHOOSE x \in S: TRUE + IN LET T == S \ {w} + IN FoldSet( Op, Op(v,w), T ) + +(** + * The folding operator, used to implement computation over a sequence. + * Apalache implements a more efficient encoding than the one below. + * (from the community modules). + *) +RECURSIVE FoldSeq(_,_,_) +FoldSeq( Op(_,_), v, seq ) == IF seq = <<>> + THEN v + ELSE FoldSeq( Op, Op(v,Head(seq)), Tail(seq) ) + +=============================================================================== diff --git a/spec/consensus/proposer-based-timestamp/tla/MC_PBT.tla b/spec/consensus/proposer-based-timestamp/tla/MC_PBT.tla new file mode 100644 index 0000000000..53f7336fbf --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/MC_PBT.tla @@ -0,0 +1,77 @@ +----------------------------- MODULE MC_PBT ------------------------------- +CONSTANT + \* @type: ROUND -> PROCESS; + Proposer + +VARIABLES + \* @type: PROCESS -> ROUND; + round, \* a process round number + \* @type: PROCESS -> STEP; + step, \* a process step + \* @type: PROCESS -> DECISION; + decision, \* process decision + \* @type: PROCESS -> VALUE; + lockedValue, \* a locked value + \* @type: PROCESS -> ROUND; + lockedRound, \* a locked round + \* @type: PROCESS -> PROPOSAL; + validValue, \* a valid value + \* @type: PROCESS -> ROUND; + validRound \* a valid round + +\* time-related variables +VARIABLES + \* @type: PROCESS -> TIME; + localClock, \* a process local clock: Corr -> Ticks + \* @type: TIME; + realTime \* a reference Newtonian real time + +\* book-keeping variables +VARIABLES + \* @type: ROUND -> Set(PROPMESSAGE); + msgsPropose, \* PROPOSE messages broadcast in the system, Rounds -> Messages + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrevote, \* PREVOTE messages broadcast in the system, Rounds -> Messages + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrecommit, \* PRECOMMIT messages broadcast in the system, Rounds -> Messages + \* @type: Set(MESSAGE); + evidence, \* the messages that were used by the correct processes to make transitions + \* @type: ACTION; + action, \* we use this variable to see which action was taken + \* @type: PROCESS -> Set(PROPMESSAGE); + receivedTimelyProposal, \* used to keep track when a process receives a timely VALUE message + \* @type: <> -> TIME; + inspectedProposal \* used to keep track when a process tries to receive a message + +\* Invariant support +VARIABLES + \* @type: ROUND -> TIME; + beginRound, \* the minimum of the local clocks at the time any process entered a new round + \* @type: PROCESS -> TIME; + endConsensus, \* the local time when a decision is made + \* @type: ROUND -> TIME; + lastBeginRound, \* the maximum of the local clocks in each round + \* @type: ROUND -> TIME; + proposalTime, \* the real time when a proposer proposes in a round + \* @type: ROUND -> TIME; + proposalReceivedTime \* the real time when a correct process first receives a proposal message in a round + + +INSTANCE TendermintPBT_002_draft WITH + Corr <- {"c1", "c2"}, + Faulty <- {"f3", "f4"}, + N <- 4, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 5, + MaxTimestamp <- 10, + MinTimestamp <- 2, + Delay <- 2, + Precision <- 2 + +\* run Apalache with --cinit=CInit +CInit == \* the proposer is arbitrary -- works for safety + Proposer \in [Rounds -> AllProcs] + +============================================================================= diff --git a/spec/consensus/proposer-based-timestamp/tla/TendermintPBT_001_draft.tla b/spec/consensus/proposer-based-timestamp/tla/TendermintPBT_001_draft.tla new file mode 100644 index 0000000000..2bcdecb27b --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/TendermintPBT_001_draft.tla @@ -0,0 +1,597 @@ +-------------------- MODULE TendermintPBT_001_draft --------------------------- +(* + A TLA+ specification of a simplified Tendermint consensus, with added clocks + and proposer-based timestamps. This TLA+ specification extends and modifies + the Tendermint TLA+ specification for fork accountability: + https://github.com/tendermint/spec/blob/master/spec/light-client/accountability/TendermintAcc_004_draft.tla + + * Version 1. A preliminary specification. + + Zarko Milosevic, Igor Konnov, Informal Systems, 2019-2020. + Ilina Stoilkovska, Josef Widder, Informal Systems, 2021. + *) + +EXTENDS Integers, FiniteSets + +(********************* PROTOCOL PARAMETERS **********************************) +CONSTANTS + Corr, \* the set of correct processes + Faulty, \* the set of Byzantine processes, may be empty + N, \* the total number of processes: correct, defective, and Byzantine + T, \* an upper bound on the number of Byzantine processes + ValidValues, \* the set of valid values, proposed both by correct and faulty + InvalidValues, \* the set of invalid values, never proposed by the correct ones + MaxRound, \* the maximal round number + MaxTimestamp, \* the maximal value of the clock tick + Delay, \* message delay + Precision, \* clock precision: the maximal difference between two local clocks + Accuracy, \* clock accuracy: the maximal difference between a local clock and the real time + Proposer, \* the proposer function from 0..NRounds to 1..N + ClockDrift \* is there clock drift between the local clocks and the global clock + +ASSUME(N = Cardinality(Corr \union Faulty)) + +(*************************** DEFINITIONS ************************************) +AllProcs == Corr \union Faulty \* the set of all processes +Rounds == 0..MaxRound \* the set of potential rounds +Timestamps == 0..MaxTimestamp \* the set of clock ticks +NilRound == -1 \* a special value to denote a nil round, outside of Rounds +NilTimestamp == -1 \* a special value to denote a nil timestamp, outside of Ticks +RoundsOrNil == Rounds \union {NilRound} +Values == ValidValues \union InvalidValues \* the set of all values +NilValue == "None" \* a special value for a nil round, outside of Values +Proposals == Values \X Timestamps +NilProposal == <> +ValuesOrNil == Values \union {NilValue} +Decisions == Values \X Timestamps \X Rounds +NilDecision == <> + + +\* a value hash is modeled as identity +Id(v) == v + +\* The validity predicate +IsValid(v) == v \in ValidValues + +\* the two thresholds that are used in the algorithm +THRESHOLD1 == T + 1 \* at least one process is not faulty +THRESHOLD2 == 2 * T + 1 \* a quorum when having N > 3 * T + +Min(S) == CHOOSE x \in S : \A y \in S : x <= y + +Max(S) == CHOOSE x \in S : \A y \in S : y <= x + +(********************* TYPE ANNOTATIONS FOR APALACHE **************************) +\* the operator for type annotations +a <: b == a + +\* the type of message records +MT == [type |-> STRING, src |-> STRING, round |-> Int, + proposal |-> <>, validRound |-> Int, id |-> <>] + +RP == <> + +\* a type annotation for a message +AsMsg(m) == m <: MT +\* a type annotation for a set of messages +SetOfMsgs(S) == S <: {MT} +\* a type annotation for an empty set of messages +EmptyMsgSet == SetOfMsgs({}) + +SetOfRcvProp(S) == S <: {RP} +EmptyRcvProp == SetOfRcvProp({}) + +SetOfProc(S) == S <: {STRING} +EmptyProcSet == SetOfProc({}) + +(********************* PROTOCOL STATE VARIABLES ******************************) +VARIABLES + round, \* a process round number: Corr -> Rounds + localClock, \* a process local clock: Corr -> Ticks + realTime, \* a reference Newtonian real time + step, \* a process step: Corr -> { "PROPOSE", "PREVOTE", "PRECOMMIT", "DECIDED" } + decision, \* process decision: Corr -> ValuesOrNil + lockedValue, \* a locked value: Corr -> ValuesOrNil + lockedRound, \* a locked round: Corr -> RoundsOrNil + validValue, \* a valid value: Corr -> ValuesOrNil + validRound \* a valid round: Corr -> RoundsOrNil + +\* book-keeping variables +VARIABLES + msgsPropose, \* PROPOSE messages broadcast in the system, Rounds -> Messages + msgsPrevote, \* PREVOTE messages broadcast in the system, Rounds -> Messages + msgsPrecommit, \* PRECOMMIT messages broadcast in the system, Rounds -> Messages + receivedTimelyProposal, \* used to keep track when a process receives a timely PROPOSAL message, {<>} + inspectedProposal, \* used to keep track when a process tries to receive a message, [Rounds -> <>] + evidence, \* the messages that were used by the correct processes to make transitions + action, \* we use this variable to see which action was taken + beginConsensus, \* the minimum of the local clocks in the initial state, Int + endConsensus, \* the local time when a decision is made, [Corr -> Int] + lastBeginConsensus, \* the maximum of the local clocks in the initial state, Int + proposalTime, \* the real time when a proposer proposes in a round, [Rounds -> Int] + proposalReceivedTime \* the real time when a correct process first receives a proposal message in a round, [Rounds -> Int] + +(* to see a type invariant, check TendermintAccInv3 *) + +\* a handy definition used in UNCHANGED +vars == <> + +(********************* PROTOCOL INITIALIZATION ******************************) +FaultyProposals(r) == + SetOfMsgs([type: {"PROPOSAL"}, src: Faulty, + round: {r}, proposal: Proposals, validRound: RoundsOrNil]) + +AllFaultyProposals == + SetOfMsgs([type: {"PROPOSAL"}, src: Faulty, + round: Rounds, proposal: Proposals, validRound: RoundsOrNil]) + +FaultyPrevotes(r) == + SetOfMsgs([type: {"PREVOTE"}, src: Faulty, round: {r}, id: Proposals]) + +AllFaultyPrevotes == + SetOfMsgs([type: {"PREVOTE"}, src: Faulty, round: Rounds, id: Proposals]) + +FaultyPrecommits(r) == + SetOfMsgs([type: {"PRECOMMIT"}, src: Faulty, round: {r}, id: Proposals]) + +AllFaultyPrecommits == + SetOfMsgs([type: {"PRECOMMIT"}, src: Faulty, round: Rounds, id: Proposals]) + +AllProposals == + SetOfMsgs([type: {"PROPOSAL"}, src: AllProcs, + round: Rounds, proposal: Proposals, validRound: RoundsOrNil]) + +RoundProposals(r) == + SetOfMsgs([type: {"PROPOSAL"}, src: AllProcs, + round: {r}, proposal: Proposals, validRound: RoundsOrNil]) + +BenignRoundsInMessages(msgfun) == + \* the message function never contains a message for a wrong round + \A r \in Rounds: + \A m \in msgfun[r]: + r = m.round + +\* The initial states of the protocol. Some faults can be in the system already. +Init == + /\ round = [p \in Corr |-> 0] + /\ \/ /\ ~ClockDrift + /\ localClock \in [Corr -> 0..Accuracy] + \/ /\ ClockDrift + /\ localClock = [p \in Corr |-> 0] + /\ realTime = 0 + /\ step = [p \in Corr |-> "PROPOSE"] + /\ decision = [p \in Corr |-> NilDecision] + /\ lockedValue = [p \in Corr |-> NilValue] + /\ lockedRound = [p \in Corr |-> NilRound] + /\ validValue = [p \in Corr |-> NilValue] + /\ validRound = [p \in Corr |-> NilRound] + /\ msgsPropose \in [Rounds -> SUBSET AllFaultyProposals] + /\ msgsPrevote \in [Rounds -> SUBSET AllFaultyPrevotes] + /\ msgsPrecommit \in [Rounds -> SUBSET AllFaultyPrecommits] + /\ receivedTimelyProposal = EmptyRcvProp + /\ inspectedProposal = [r \in Rounds |-> EmptyProcSet] + /\ BenignRoundsInMessages(msgsPropose) + /\ BenignRoundsInMessages(msgsPrevote) + /\ BenignRoundsInMessages(msgsPrecommit) + /\ evidence = EmptyMsgSet + /\ action' = "Init" + /\ beginConsensus = Min({localClock[p] : p \in Corr}) + /\ endConsensus = [p \in Corr |-> NilTimestamp] + /\ lastBeginConsensus = Max({localClock[p] : p \in Corr}) + /\ proposalTime = [r \in Rounds |-> NilTimestamp] + /\ proposalReceivedTime = [r \in Rounds |-> NilTimestamp] + +(************************ MESSAGE PASSING ********************************) +BroadcastProposal(pSrc, pRound, pProposal, pValidRound) == + LET newMsg == + AsMsg([type |-> "PROPOSAL", src |-> pSrc, round |-> pRound, + proposal |-> pProposal, validRound |-> pValidRound]) + IN + msgsPropose' = [msgsPropose EXCEPT ![pRound] = msgsPropose[pRound] \union {newMsg}] + +BroadcastPrevote(pSrc, pRound, pId) == + LET newMsg == AsMsg([type |-> "PREVOTE", + src |-> pSrc, round |-> pRound, id |-> pId]) + IN + msgsPrevote' = [msgsPrevote EXCEPT ![pRound] = msgsPrevote[pRound] \union {newMsg}] + +BroadcastPrecommit(pSrc, pRound, pId) == + LET newMsg == AsMsg([type |-> "PRECOMMIT", + src |-> pSrc, round |-> pRound, id |-> pId]) + IN + msgsPrecommit' = [msgsPrecommit EXCEPT ![pRound] = msgsPrecommit[pRound] \union {newMsg}] + + +(***************************** TIME **************************************) + +\* [PBTS-CLOCK-PRECISION.0] +SynchronizedLocalClocks == + \A p \in Corr : \A q \in Corr : + p /= q => + \/ /\ localClock[p] >= localClock[q] + /\ localClock[p] - localClock[q] < Precision + \/ /\ localClock[p] < localClock[q] + /\ localClock[q] - localClock[p] < Precision + +\* [PBTS-PROPOSE.0] +Proposal(v, t) == + <> + +\* [PBTS-DECISION-ROUND.0] +Decision(v, t, r) == + <> + +(**************** MESSAGE PROCESSING TRANSITIONS *************************) +\* lines 12-13 +StartRound(p, r) == + /\ step[p] /= "DECIDED" \* a decided process does not participate in consensus + /\ round' = [round EXCEPT ![p] = r] + /\ step' = [step EXCEPT ![p] = "PROPOSE"] + +\* lines 14-19, a proposal may be sent later +InsertProposal(p) == + LET r == round[p] IN + /\ p = Proposer[r] + /\ step[p] = "PROPOSE" + \* if the proposer is sending a proposal, then there are no other proposals + \* by the correct processes for the same round + /\ \A m \in msgsPropose[r]: m.src /= p + /\ \E v \in ValidValues: + LET proposal == IF validValue[p] /= NilValue + THEN Proposal(validValue[p], localClock[p]) + ELSE Proposal(v, localClock[p]) IN + + /\ BroadcastProposal(p, round[p], proposal, validRound[p]) + /\ proposalTime' = [proposalTime EXCEPT ![r] = realTime] + /\ UNCHANGED <> + /\ action' = "InsertProposal" + +\* a new action used to filter messages that are not on time +\* [PBTS-RECEPTION-STEP.0] +ReceiveProposal(p) == + \E v \in Values, t \in Timestamps: + /\ LET r == round[p] IN + LET msg == + AsMsg([type |-> "PROPOSAL", src |-> Proposer[round[p]], + round |-> round[p], proposal |-> Proposal(v, t), validRound |-> NilRound]) IN + /\ msg \in msgsPropose[round[p]] + /\ p \notin inspectedProposal[r] + /\ <> \notin receivedTimelyProposal + /\ inspectedProposal' = [inspectedProposal EXCEPT ![r] = @ \union {p}] + /\ \/ /\ localClock[p] - Precision < t + /\ t < localClock[p] + Precision + Delay + /\ receivedTimelyProposal' = receivedTimelyProposal \union {<>} + /\ \/ /\ proposalReceivedTime[r] = NilTimestamp + /\ proposalReceivedTime' = [proposalReceivedTime EXCEPT ![r] = realTime] + \/ /\ proposalReceivedTime[r] /= NilTimestamp + /\ UNCHANGED proposalReceivedTime + \/ /\ \/ localClock[p] - Precision >= t + \/ t >= localClock[p] + Precision + Delay + /\ UNCHANGED <> + /\ UNCHANGED <> + /\ action' = "ReceiveProposal" + +\* lines 22-27 +UponProposalInPropose(p) == + \E v \in Values, t \in Timestamps: + /\ step[p] = "PROPOSE" (* line 22 *) + /\ LET msg == + AsMsg([type |-> "PROPOSAL", src |-> Proposer[round[p]], + round |-> round[p], proposal |-> Proposal(v, t), validRound |-> NilRound]) IN + /\ <> \in receivedTimelyProposal \* updated line 22 + /\ evidence' = {msg} \union evidence + /\ LET mid == (* line 23 *) + IF IsValid(v) /\ (lockedRound[p] = NilRound \/ lockedValue[p] = v) + THEN Id(Proposal(v, t)) + ELSE NilProposal + IN + BroadcastPrevote(p, round[p], mid) \* lines 24-26 + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ action' = "UponProposalInPropose" + +\* lines 28-33 +\* [PBTS-ALG-OLD-PREVOTE.0] +UponProposalInProposeAndPrevote(p) == + \E v \in Values, t1 \in Timestamps, t2 \in Timestamps, vr \in Rounds: + /\ step[p] = "PROPOSE" /\ 0 <= vr /\ vr < round[p] \* line 28, the while part + /\ LET msg == + AsMsg([type |-> "PROPOSAL", src |-> Proposer[round[p]], + round |-> round[p], proposal |-> Proposal(v, t1), validRound |-> vr]) + IN + /\ <> \in receivedTimelyProposal \* updated line 28 + /\ LET PV == { m \in msgsPrevote[vr]: m.id = Id(Proposal(v, t2)) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 28 + /\ evidence' = PV \union {msg} \union evidence + /\ LET mid == (* line 29 *) + IF IsValid(v) /\ (lockedRound[p] <= vr \/ lockedValue[p] = v) + THEN Id(Proposal(v, t1)) + ELSE NilProposal + IN + BroadcastPrevote(p, round[p], mid) \* lines 24-26 + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ action' = "UponProposalInProposeAndPrevote" + + \* lines 34-35 + lines 61-64 (onTimeoutPrevote) +UponQuorumOfPrevotesAny(p) == + /\ step[p] = "PREVOTE" \* line 34 and 61 + /\ \E MyEvidence \in SUBSET msgsPrevote[round[p]]: + \* find the unique voters in the evidence + LET Voters == { m.src: m \in MyEvidence } IN + \* compare the number of the unique voters against the threshold + /\ Cardinality(Voters) >= THRESHOLD2 \* line 34 + /\ evidence' = MyEvidence \union evidence + /\ BroadcastPrecommit(p, round[p], NilProposal) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + /\ UNCHANGED <> + /\ action' = "UponQuorumOfPrevotesAny" + +\* lines 36-46 +\* [PBTS-ALG-NEW-PREVOTE.0] +UponProposalInPrevoteOrCommitAndPrevote(p) == + \E v \in ValidValues, t \in Timestamps, vr \in RoundsOrNil: + /\ step[p] \in {"PREVOTE", "PRECOMMIT"} \* line 36 + /\ LET msg == + AsMsg([type |-> "PROPOSAL", src |-> Proposer[round[p]], + round |-> round[p], proposal |-> Proposal(v, t), validRound |-> vr]) IN + /\ <> \in receivedTimelyProposal \* updated line 36 + /\ LET PV == { m \in msgsPrevote[round[p]]: m.id = Id(Proposal(v, t)) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 36 + /\ evidence' = PV \union {msg} \union evidence + /\ IF step[p] = "PREVOTE" + THEN \* lines 38-41: + /\ lockedValue' = [lockedValue EXCEPT ![p] = v] + /\ lockedRound' = [lockedRound EXCEPT ![p] = round[p]] + /\ BroadcastPrecommit(p, round[p], Id(Proposal(v, t))) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + ELSE + UNCHANGED <> + \* lines 42-43 + /\ validValue' = [validValue EXCEPT ![p] = v] + /\ validRound' = [validRound EXCEPT ![p] = round[p]] + /\ UNCHANGED <> + /\ action' = "UponProposalInPrevoteOrCommitAndPrevote" + +\* lines 47-48 + 65-67 (onTimeoutPrecommit) +UponQuorumOfPrecommitsAny(p) == + /\ \E MyEvidence \in SUBSET msgsPrecommit[round[p]]: + \* find the unique committers in the evidence + LET Committers == { m.src: m \in MyEvidence } IN + \* compare the number of the unique committers against the threshold + /\ Cardinality(Committers) >= THRESHOLD2 \* line 47 + /\ evidence' = MyEvidence \union evidence + /\ round[p] + 1 \in Rounds + /\ StartRound(p, round[p] + 1) + /\ UNCHANGED <> + /\ action' = "UponQuorumOfPrecommitsAny" + +\* lines 49-54 +\* [PBTS-ALG-DECIDE.0] +UponProposalInPrecommitNoDecision(p) == + /\ decision[p] = NilDecision \* line 49 + /\ \E v \in ValidValues, t \in Timestamps (* line 50*) , r \in Rounds, vr \in RoundsOrNil: + /\ LET msg == AsMsg([type |-> "PROPOSAL", src |-> Proposer[r], + round |-> r, proposal |-> Proposal(v, t), validRound |-> vr]) IN + /\ msg \in msgsPropose[r] \* line 49 + /\ p \in inspectedProposal[r] + /\ LET PV == { m \in msgsPrecommit[r]: m.id = Id(Proposal(v, t)) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 49 + /\ evidence' = PV \union {msg} \union evidence + /\ decision' = [decision EXCEPT ![p] = Decision(v, t, round[p])] \* update the decision, line 51 + \* The original algorithm does not have 'DECIDED', but it increments the height. + \* We introduced 'DECIDED' here to prevent the process from changing its decision. + /\ endConsensus' = [endConsensus EXCEPT ![p] = localClock[p]] + /\ step' = [step EXCEPT ![p] = "DECIDED"] + /\ UNCHANGED <> + /\ action' = "UponProposalInPrecommitNoDecision" + +\* the actions below are not essential for safety, but added for completeness + +\* lines 20-21 + 57-60 +OnTimeoutPropose(p) == + /\ step[p] = "PROPOSE" + /\ p /= Proposer[round[p]] + /\ BroadcastPrevote(p, round[p], NilProposal) + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ action' = "OnTimeoutPropose" + +\* lines 44-46 +OnQuorumOfNilPrevotes(p) == + /\ step[p] = "PREVOTE" + /\ LET PV == { m \in msgsPrevote[round[p]]: m.id = Id(NilProposal) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 36 + /\ evidence' = PV \union evidence + /\ BroadcastPrecommit(p, round[p], Id(NilProposal)) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + /\ UNCHANGED <> + /\ action' = "OnQuorumOfNilPrevotes" + +\* lines 55-56 +OnRoundCatchup(p) == + \E r \in {rr \in Rounds: rr > round[p]}: + LET RoundMsgs == msgsPropose[r] \union msgsPrevote[r] \union msgsPrecommit[r] IN + \E MyEvidence \in SUBSET RoundMsgs: + LET Faster == { m.src: m \in MyEvidence } IN + /\ Cardinality(Faster) >= THRESHOLD1 + /\ evidence' = MyEvidence \union evidence + /\ StartRound(p, r) + /\ UNCHANGED <> + /\ action' = "OnRoundCatchup" + + +(********************* PROTOCOL TRANSITIONS ******************************) +\* advance the global clock +AdvanceRealTime == + /\ realTime < MaxTimestamp + /\ realTime' = realTime + 1 + /\ \/ /\ ~ClockDrift + /\ localClock' = [p \in Corr |-> localClock[p] + 1] + \/ /\ ClockDrift + /\ UNCHANGED localClock + /\ UNCHANGED <> + /\ action' = "AdvanceRealTime" + +\* advance the local clock of node p +AdvanceLocalClock(p) == + /\ localClock[p] < MaxTimestamp + /\ localClock' = [localClock EXCEPT ![p] = @ + 1] + /\ UNCHANGED <> + /\ action' = "AdvanceLocalClock" + +\* process timely messages +MessageProcessing(p) == + \* start round + \/ InsertProposal(p) + \* reception step + \/ ReceiveProposal(p) + \* processing step + \/ UponProposalInPropose(p) + \/ UponProposalInProposeAndPrevote(p) + \/ UponQuorumOfPrevotesAny(p) + \/ UponProposalInPrevoteOrCommitAndPrevote(p) + \/ UponQuorumOfPrecommitsAny(p) + \/ UponProposalInPrecommitNoDecision(p) + \* the actions below are not essential for safety, but added for completeness + \/ OnTimeoutPropose(p) + \/ OnQuorumOfNilPrevotes(p) + \/ OnRoundCatchup(p) + +(* + * A system transition. In this specificatiom, the system may eventually deadlock, + * e.g., when all processes decide. This is expected behavior, as we focus on safety. + *) +Next == + \/ AdvanceRealTime + \/ /\ ClockDrift + /\ \E p \in Corr: AdvanceLocalClock(p) + \/ /\ SynchronizedLocalClocks + /\ \E p \in Corr: MessageProcessing(p) + +----------------------------------------------------------------------------- + +(*************************** INVARIANTS *************************************) + +\* [PBTS-INV-AGREEMENT.0] +AgreementOnValue == + \A p, q \in Corr: + /\ decision[p] /= NilDecision + /\ decision[q] /= NilDecision + => \E v \in ValidValues, t1 \in Timestamps, t2 \in Timestamps, r1 \in Rounds, r2 \in Rounds : + /\ decision[p] = Decision(v, t1, r1) + /\ decision[q] = Decision(v, t2, r2) + +\* [PBTS-INV-TIME-AGR.0] +AgreementOnTime == + \A p, q \in Corr: + \A v1 \in ValidValues, v2 \in ValidValues, t1 \in Timestamps, t2 \in Timestamps, r \in Rounds : + /\ decision[p] = Decision(v1, t1, r) + /\ decision[q] = Decision(v2, t2, r) + => t1 = t2 + +\* [PBTS-CONSENSUS-TIME-VALID.0] +ConsensusTimeValid == + \A p \in Corr, t \in Timestamps : + \* if a process decides on v and t + (\E v \in ValidValues, r \in Rounds : decision[p] = Decision(v, t, r)) + \* then + => /\ beginConsensus - Precision <= t + /\ t < endConsensus[p] + Precision + Delay + +\* [PBTS-CONSENSUS-SAFE-VALID-CORR-PROP.0] +ConsensusSafeValidCorrProp == + \A v \in ValidValues, t \in Timestamps : + \* if the proposer in the first round is correct + (/\ Proposer[0] \in Corr + \* and there exists a process that decided on v, t + /\ \E p \in Corr, r \in Rounds : decision[p] = Decision(v, t, r)) + \* then t is between the minimal and maximal initial local time + => /\ beginConsensus <= t + /\ t <= lastBeginConsensus + +\* [PBTS-CONSENSUS-REALTIME-VALID-CORR.0] +ConsensusRealTimeValidCorr == + \A t \in Timestamps, r \in Rounds : + (/\ \E p \in Corr, v \in ValidValues : decision[p] = Decision(v, t, r) + /\ proposalTime[r] /= NilTimestamp) + => /\ proposalTime[r] - Accuracy < t + /\ t < proposalTime[r] + Accuracy + +\* [PBTS-CONSENSUS-REALTIME-VALID.0] +ConsensusRealTimeValid == + \A t \in Timestamps, r \in Rounds : + (\E p \in Corr, v \in ValidValues : decision[p] = Decision(v, t, r)) + => /\ proposalReceivedTime[r] - Accuracy - Precision < t + /\ t < proposalReceivedTime[r] + Accuracy + Precision + Delay + +\* [PBTS-MSG-FAIR.0] +BoundedDelay == + \A r \in Rounds : + (/\ proposalTime[r] /= NilTimestamp + /\ proposalTime[r] + Delay < realTime) + => inspectedProposal[r] = Corr + +\* [PBTS-CONSENSUS-TIME-LIVE.0] +ConsensusTimeLive == + \A r \in Rounds, p \in Corr : + (/\ proposalTime[r] /= NilTimestamp + /\ proposalTime[r] + Delay < realTime + /\ Proposer[r] \in Corr + /\ round[p] <= r) + => \E msg \in RoundProposals(r) : <> \in receivedTimelyProposal + +\* a conjunction of all invariants +Inv == + /\ AgreementOnValue + /\ AgreementOnTime + /\ ConsensusTimeValid + /\ ConsensusSafeValidCorrProp + /\ ConsensusRealTimeValid + /\ ConsensusRealTimeValidCorr + /\ BoundedDelay + +Liveness == + ConsensusTimeLive + +============================================================================= diff --git a/spec/consensus/proposer-based-timestamp/tla/TendermintPBT_002_draft.tla b/spec/consensus/proposer-based-timestamp/tla/TendermintPBT_002_draft.tla new file mode 100644 index 0000000000..983c7351b7 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/TendermintPBT_002_draft.tla @@ -0,0 +1,885 @@ +-------------------- MODULE TendermintPBT_002_draft --------------------------- +(* + A TLA+ specification of a simplified Tendermint consensus, with added clocks + and proposer-based timestamps. This TLA+ specification extends and modifies + the Tendermint TLA+ specification for fork accountability: + https://github.com/tendermint/spec/blob/master/spec/light-client/accountability/TendermintAcc_004_draft.tla + + * Version 2. A preliminary specification. + + Zarko Milosevic, Igor Konnov, Informal Systems, 2019-2020. + Ilina Stoilkovska, Josef Widder, Informal Systems, 2021. + Jure Kukovec, Informal Systems, 2022. + *) + +EXTENDS Integers, FiniteSets, Apalache, typedefs + +(********************* PROTOCOL PARAMETERS **********************************) +\* General protocol parameters +CONSTANTS + \* @type: Set(PROCESS); + Corr, \* the set of correct processes + \* @type: Set(PROCESS); + Faulty, \* the set of Byzantine processes, may be empty + \* @type: Int; + N, \* the total number of processes: correct, defective, and Byzantine + \* @type: Int; + T, \* an upper bound on the number of Byzantine processes + \* @type: Set(VALUE); + ValidValues, \* the set of valid values, proposed both by correct and faulty + \* @type: Set(VALUE); + InvalidValues, \* the set of invalid values, never proposed by the correct ones + \* @type: ROUND; + MaxRound, \* the maximal round number + \* @type: ROUND -> PROCESS; + Proposer \* the proposer function from Rounds to AllProcs + +\* Time-related parameters +CONSTANTS + \* @type: TIME; + MaxTimestamp, \* the maximal value of the clock tick + \* @type: TIME; + MinTimestamp, \* the minimal value of the clock tick + \* @type: TIME; + Delay, \* message delay + \* @type: TIME; + Precision \* clock precision: the maximal difference between two local clocks + +ASSUME(N = Cardinality(Corr \union Faulty)) + +(*************************** DEFINITIONS ************************************) +\* @type: Set(PROCESS); +AllProcs == Corr \union Faulty \* the set of all processes +\* @type: Set(ROUND); +Rounds == 0..MaxRound \* the set of potential rounds +\* @type: Set(TIME); +Timestamps == 0..MaxTimestamp \* the set of clock ticks +\* @type: ROUND; +NilRound == -1 \* a special value to denote a nil round, outside of Rounds +\* @type: TIME; +NilTimestamp == -1 \* a special value to denote a nil timestamp, outside of Ticks +\* @type: Set(ROUND); +RoundsOrNil == Rounds \union {NilRound} +\* @type: Set(VALUE); +Values == ValidValues \union InvalidValues \* the set of all values +\* @type: VALUE; +NilValue == "None" \* a special value for a nil round, outside of Values +\* @type: Set(PROPOSAL); +Proposals == Values \X Timestamps \X Rounds +\* @type: PROPOSAL; +NilProposal == <> +\* @type: Set(VALUE); +ValuesOrNil == Values \union {NilValue} +\* @type: Set(DECISION); +Decisions == Proposals \X Rounds +\* @type: DECISION; +NilDecision == <> + +ValidProposals == ValidValues \X (MinTimestamp..MaxTimestamp) \X Rounds +\* a value hash is modeled as identity +\* @type: (t) => t; +Id(v) == v + +\* The validity predicate +\* @type: (PROPOSAL) => Bool; +IsValid(p) == p \in ValidProposals + +\* Time validity check. If we want MaxTimestamp = \infty, set ValidTime(t) == TRUE +ValidTime(t) == t < MaxTimestamp + +\* @type: (PROPMESSAGE) => VALUE; +MessageValue(msg) == msg.proposal[1] +\* @type: (PROPMESSAGE) => TIME; +MessageTime(msg) == msg.proposal[2] +\* @type: (PROPMESSAGE) => ROUND; +MessageRound(msg) == msg.proposal[3] + +\* @type: (TIME, TIME) => Bool; +IsTimely(processTime, messageTime) == + /\ processTime >= messageTime - Precision + /\ processTime <= messageTime + Precision + Delay + +\* the two thresholds that are used in the algorithm +\* @type: Int; +THRESHOLD1 == T + 1 \* at least one process is not faulty +\* @type: Int; +THRESHOLD2 == 2 * T + 1 \* a quorum when having N > 3 * T + +\* @type: (TIME, TIME) => TIME; +Min2(a,b) == IF a <= b THEN a ELSE b +\* @type: (Set(TIME)) => TIME; +Min(S) == FoldSet( Min2, MaxTimestamp, S ) +\* Min(S) == CHOOSE x \in S : \A y \in S : x <= y + +\* @type: (TIME, TIME) => TIME; +Max2(a,b) == IF a >= b THEN a ELSE b +\* @type: (Set(TIME)) => TIME; +Max(S) == FoldSet( Max2, NilTimestamp, S ) +\* Max(S) == CHOOSE x \in S : \A y \in S : y <= x + +\* @type: (Set(MESSAGE)) => Int; +Card(S) == + LET + \* @type: (Int, MESSAGE) => Int; + PlusOne(i, m) == i + 1 + IN FoldSet( PlusOne, 0, S ) + +(********************* PROTOCOL STATE VARIABLES ******************************) +VARIABLES + \* @type: PROCESS -> ROUND; + round, \* a process round number + \* @type: PROCESS -> STEP; + step, \* a process step + \* @type: PROCESS -> DECISION; + decision, \* process decision + \* @type: PROCESS -> VALUE; + lockedValue, \* a locked value + \* @type: PROCESS -> ROUND; + lockedRound, \* a locked round + \* @type: PROCESS -> PROPOSAL; + validValue, \* a valid value + \* @type: PROCESS -> ROUND; + validRound \* a valid round + +coreVars == + <> + +\* time-related variables +VARIABLES + \* @type: PROCESS -> TIME; + localClock, \* a process local clock: Corr -> Ticks + \* @type: TIME; + realTime \* a reference Newtonian real time + +temporalVars == <> + +\* book-keeping variables +VARIABLES + \* @type: ROUND -> Set(PROPMESSAGE); + msgsPropose, \* PROPOSE messages broadcast in the system, Rounds -> Messages + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrevote, \* PREVOTE messages broadcast in the system, Rounds -> Messages + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrecommit, \* PRECOMMIT messages broadcast in the system, Rounds -> Messages + \* @type: Set(MESSAGE); + evidence, \* the messages that were used by the correct processes to make transitions + \* @type: ACTION; + action, \* we use this variable to see which action was taken + \* @type: PROCESS -> Set(PROPMESSAGE); + receivedTimelyProposal, \* used to keep track when a process receives a timely PROPOSAL message + \* @type: <> -> TIME; + inspectedProposal \* used to keep track when a process tries to receive a message + +\* Action is excluded from the tuple, because it always changes +bookkeepingVars == + <> + +\* Invariant support +VARIABLES + \* @type: ROUND -> TIME; + beginRound, \* the minimum of the local clocks at the time any process entered a new round + \* @type: PROCESS -> TIME; + endConsensus, \* the local time when a decision is made + \* @type: ROUND -> TIME; + lastBeginRound, \* the maximum of the local clocks in each round + \* @type: ROUND -> TIME; + proposalTime, \* the real time when a proposer proposes in a round + \* @type: ROUND -> TIME; + proposalReceivedTime \* the real time when a correct process first receives a proposal message in a round + +invariantVars == + <> + +(* to see a type invariant, check TendermintAccInv3 *) + +(********************* PROTOCOL INITIALIZATION ******************************) +\* @type: (ROUND) => Set(PROPMESSAGE); +FaultyProposals(r) == + [ + type : {"PROPOSAL"}, + src : Faulty, + round : {r}, + proposal : Proposals, + validRound: RoundsOrNil + ] + +\* @type: Set(PROPMESSAGE); +AllFaultyProposals == + [ + type : {"PROPOSAL"}, + src : Faulty, + round : Rounds, + proposal : Proposals, + validRound: RoundsOrNil + ] + +\* @type: (ROUND) => Set(PREMESSAGE); +FaultyPrevotes(r) == + [ + type : {"PREVOTE"}, + src : Faulty, + round: {r}, + id : Proposals + ] + +\* @type: Set(PREMESSAGE); +AllFaultyPrevotes == + [ + type : {"PREVOTE"}, + src : Faulty, + round: Rounds, + id : Proposals + ] + +\* @type: (ROUND) => Set(PREMESSAGE); +FaultyPrecommits(r) == + [ + type : {"PRECOMMIT"}, + src : Faulty, + round: {r}, + id : Proposals + ] + +\* @type: Set(PREMESSAGE); +AllFaultyPrecommits == + [ + type : {"PRECOMMIT"}, + src : Faulty, + round: Rounds, + id : Proposals + ] + +\* @type: Set(PROPMESSAGE); +AllProposals == + [ + type : {"PROPOSAL"}, + src : AllProcs, + round : Rounds, + proposal : Proposals, + validRound: RoundsOrNil + ] + +\* @type: (ROUND) => Set(PROPMESSAGE); +RoundProposals(r) == + [ + type : {"PROPOSAL"}, + src : AllProcs, + round : {r}, + proposal : Proposals, + validRound: RoundsOrNil + ] + +\* @type: (ROUND -> Set(MESSAGE)) => Bool; +BenignRoundsInMessages(msgfun) == + \* the message function never contains a message for a wrong round + \A r \in Rounds: + \A m \in msgfun[r]: + r = m.round + +\* The initial states of the protocol. Some faults can be in the system already. +Init == + /\ round = [p \in Corr |-> 0] + /\ localClock \in [Corr -> MinTimestamp..(MinTimestamp + Precision)] + /\ realTime = 0 + /\ step = [p \in Corr |-> "PROPOSE"] + /\ decision = [p \in Corr |-> NilDecision] + /\ lockedValue = [p \in Corr |-> NilValue] + /\ lockedRound = [p \in Corr |-> NilRound] + /\ validValue = [p \in Corr |-> NilProposal] + /\ validRound = [p \in Corr |-> NilRound] + /\ msgsPropose \in [Rounds -> SUBSET AllFaultyProposals] + /\ msgsPrevote \in [Rounds -> SUBSET AllFaultyPrevotes] + /\ msgsPrecommit \in [Rounds -> SUBSET AllFaultyPrecommits] + /\ receivedTimelyProposal = [p \in Corr |-> {}] + /\ inspectedProposal = [r \in Rounds, p \in Corr |-> NilTimestamp] + /\ BenignRoundsInMessages(msgsPropose) + /\ BenignRoundsInMessages(msgsPrevote) + /\ BenignRoundsInMessages(msgsPrecommit) + /\ evidence = {} + /\ action' = "Init" + /\ beginRound = + [r \in Rounds |-> + IF r = 0 + THEN Min({localClock[p] : p \in Corr}) + ELSE MaxTimestamp + ] + /\ endConsensus = [p \in Corr |-> NilTimestamp] + /\ lastBeginRound = + [r \in Rounds |-> + IF r = 0 + THEN Max({localClock[p] : p \in Corr}) + ELSE NilTimestamp + ] + /\ proposalTime = [r \in Rounds |-> NilTimestamp] + /\ proposalReceivedTime = [r \in Rounds |-> NilTimestamp] + +(************************ MESSAGE PASSING ********************************) +\* @type: (PROCESS, ROUND, PROPOSAL, ROUND) => Bool; +BroadcastProposal(pSrc, pRound, pProposal, pValidRound) == + LET + \* @type: PROPMESSAGE; + newMsg == + [ + type |-> "PROPOSAL", + src |-> pSrc, + round |-> pRound, + proposal |-> pProposal, + validRound |-> pValidRound + ] + IN + /\ msgsPropose' = [msgsPropose EXCEPT ![pRound] = msgsPropose[pRound] \union {newMsg}] + +\* @type: (PROCESS, ROUND, PROPOSAL) => Bool; +BroadcastPrevote(pSrc, pRound, pId) == + LET + \* @type: PREMESSAGE; + newMsg == + [ + type |-> "PREVOTE", + src |-> pSrc, + round |-> pRound, + id |-> pId + ] + IN + /\ msgsPrevote' = [msgsPrevote EXCEPT ![pRound] = msgsPrevote[pRound] \union {newMsg}] + +\* @type: (PROCESS, ROUND, PROPOSAL) => Bool; +BroadcastPrecommit(pSrc, pRound, pId) == + LET + \* @type: PREMESSAGE; + newMsg == + [ + type |-> "PRECOMMIT", + src |-> pSrc, + round |-> pRound, + id |-> pId + ] + IN + /\ msgsPrecommit' = [msgsPrecommit EXCEPT ![pRound] = msgsPrecommit[pRound] \union {newMsg}] + +(***************************** TIME **************************************) + +\* [PBTS-CLOCK-PRECISION.0] +\* @type: Bool; +SynchronizedLocalClocks == + \A p \in Corr : \A q \in Corr : + p /= q => + \/ /\ localClock[p] >= localClock[q] + /\ localClock[p] - localClock[q] < Precision + \/ /\ localClock[p] < localClock[q] + /\ localClock[q] - localClock[p] < Precision + +\* [PBTS-PROPOSE.0] +\* @type: (VALUE, TIME, ROUND) => PROPOSAL; +Proposal(v, t, r) == + <> + +\* [PBTS-DECISION-ROUND.0] +\* @type: (PROPOSAL, ROUND) => DECISION; +Decision(p, r) == + <> + +(**************** MESSAGE PROCESSING TRANSITIONS *************************) +\* lines 12-13 +\* @type: (PROCESS, ROUND) => Bool; +StartRound(p, r) == + /\ step[p] /= "DECIDED" \* a decided process does not participate in consensus + /\ round' = [round EXCEPT ![p] = r] + /\ step' = [step EXCEPT ![p] = "PROPOSE"] + \* We only need to update (last)beginRound[r] once a process enters round `r` + /\ beginRound' = [beginRound EXCEPT ![r] = Min2(@, localClock[p])] + /\ lastBeginRound' = [lastBeginRound EXCEPT ![r] = Max2(@, localClock[p])] + +\* lines 14-19, a proposal may be sent later +\* @type: (PROCESS) => Bool; +InsertProposal(p) == + LET r == round[p] IN + /\ p = Proposer[r] + /\ step[p] = "PROPOSE" + \* if the proposer is sending a proposal, then there are no other proposals + \* by the correct processes for the same round + /\ \A m \in msgsPropose[r]: m.src /= p + \* /\ localClock[p] > + /\ \E v \in ValidValues: + LET proposal == + IF validValue[p] /= NilProposal + THEN validValue[p] + ELSE Proposal(v, localClock[p], r) + IN + /\ BroadcastProposal(p, r, proposal, validRound[p]) + /\ proposalTime' = [proposalTime EXCEPT ![r] = realTime] + /\ UNCHANGED <> + /\ UNCHANGED + <<(*msgsPropose,*) msgsPrevote, msgsPrecommit, + evidence, receivedTimelyProposal, inspectedProposal>> + /\ UNCHANGED + <> + /\ action' = "InsertProposal" + +\* a new action used to filter messages that are not on time +\* [PBTS-RECEPTION-STEP.0] +\* @type: (PROCESS) => Bool; +ReceiveProposal(p) == + \E v \in Values, t \in Timestamps: + /\ LET r == round[p] IN + LET + \* @type: PROPMESSAGE; + msg == + [ + type |-> "PROPOSAL", + src |-> Proposer[round[p]], + round |-> round[p], + proposal |-> Proposal(v, t, r), + validRound |-> NilRound + ] + IN + /\ msg \in msgsPropose[round[p]] + /\ inspectedProposal[r,p] = NilTimestamp + /\ msg \notin receivedTimelyProposal[p] + /\ inspectedProposal' = [inspectedProposal EXCEPT ![r,p] = localClock[p]] + /\ LET + isTimely == IsTimely(localClock[p], t) + IN + \/ /\ isTimely + /\ receivedTimelyProposal' = [receivedTimelyProposal EXCEPT ![p] = @ \union {msg}] + /\ LET + isNilTimestamp == proposalReceivedTime[r] = NilTimestamp + IN + \/ /\ isNilTimestamp + /\ proposalReceivedTime' = [proposalReceivedTime EXCEPT ![r] = realTime] + \/ /\ ~isNilTimestamp + /\ UNCHANGED proposalReceivedTime + \/ /\ ~isTimely + /\ UNCHANGED <> + /\ UNCHANGED <> + /\ UNCHANGED + <> + /\ UNCHANGED + <> + /\ action' = "ReceiveProposal" + +\* lines 22-27 +\* @type: (PROCESS) => Bool; +UponProposalInPropose(p) == + \E v \in Values, t \in Timestamps: + LET + r == round[p] + IN LET + \* @type: PROPOSAL; + prop == Proposal(v,t,r) + IN + /\ step[p] = "PROPOSE" (* line 22 *) + /\ LET + \* @type: PROPMESSAGE; + msg == + [ + type |-> "PROPOSAL", + src |-> Proposer[r], + round |-> r, + proposal |-> prop, + validRound |-> NilRound + ] + IN + /\ msg \in receivedTimelyProposal[p] \* updated line 22 + /\ evidence' = {msg} \union evidence + /\ LET mid == (* line 23 *) + IF IsValid(prop) /\ (lockedRound[p] = NilRound \/ lockedValue[p] = v) + THEN Id(prop) + ELSE NilProposal + IN + BroadcastPrevote(p, r, mid) \* lines 24-26 + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ UNCHANGED + <> + /\ UNCHANGED + <> + /\ action' = "UponProposalInPropose" + +\* lines 28-33 +\* [PBTS-ALG-OLD-PREVOTE.0] +\* @type: (PROCESS) => Bool; +UponProposalInProposeAndPrevote(p) == + \E v \in Values, t \in Timestamps, vr \in Rounds, pr \in Rounds: + LET + r == round[p] + IN LET + \* @type: PROPOSAL; + prop == Proposal(v,t,pr) + IN + /\ step[p] = "PROPOSE" /\ 0 <= vr /\ vr < r \* line 28, the while part + /\ pr <= vr + /\ LET + \* @type: PROPMESSAGE; + msg == + [ + type |-> "PROPOSAL", + src |-> Proposer[r], + round |-> r, + proposal |-> prop, + validRound |-> vr + ] + IN + \* Changed from 001: no need to re-check timeliness + /\ msg \in msgsPropose[r] \* line 28 + /\ LET PV == { m \in msgsPrevote[vr]: m.id = Id(prop) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 28 + /\ evidence' = PV \union {msg} \union evidence + /\ LET mid == (* line 29 *) + IF IsValid(prop) /\ (lockedRound[p] <= vr \/ lockedValue[p] = v) + THEN Id(prop) + ELSE NilProposal + IN + BroadcastPrevote(p, r, mid) \* lines 24-26 + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ UNCHANGED + <> + /\ UNCHANGED + <> + /\ action' = "UponProposalInProposeAndPrevote" + +\* lines 34-35 + lines 61-64 (onTimeoutPrevote) +\* @type: (PROCESS) => Bool; +UponQuorumOfPrevotesAny(p) == + /\ step[p] = "PREVOTE" \* line 34 and 61 + /\ \E MyEvidence \in SUBSET msgsPrevote[round[p]]: + \* find the unique voters in the evidence + LET Voters == { m.src: m \in MyEvidence } IN + \* compare the number of the unique voters against the threshold + /\ Cardinality(Voters) >= THRESHOLD2 \* line 34 + /\ evidence' = MyEvidence \union evidence + /\ BroadcastPrecommit(p, round[p], NilProposal) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + /\ UNCHANGED <> + /\ UNCHANGED + <> + /\ UNCHANGED + <> + /\ action' = "UponQuorumOfPrevotesAny" + +\* lines 36-46 +\* [PBTS-ALG-NEW-PREVOTE.0] +\* @type: (PROCESS) => Bool; +UponProposalInPrevoteOrCommitAndPrevote(p) == + \E v \in ValidValues, t \in Timestamps, vr \in RoundsOrNil: + LET + r == round[p] + IN LET + \* @type: PROPOSAL; + prop == Proposal(v,t,r) + IN + /\ step[p] \in {"PREVOTE", "PRECOMMIT"} \* line 36 + /\ LET + \* @type: PROPMESSAGE; + msg == + [ + type |-> "PROPOSAL", + src |-> Proposer[r], + round |-> r, + proposal |-> prop, + validRound |-> vr + ] + IN + \* Changed from 001: no need to re-check timeliness + /\ msg \in msgsPropose[r] \* line 36 + /\ LET PV == { m \in msgsPrevote[r]: m.id = Id(prop) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 36 + /\ evidence' = PV \union {msg} \union evidence + /\ IF step[p] = "PREVOTE" + THEN \* lines 38-41: + /\ lockedValue' = [lockedValue EXCEPT ![p] = v] + /\ lockedRound' = [lockedRound EXCEPT ![p] = r] + /\ BroadcastPrecommit(p, r, Id(prop)) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + ELSE + UNCHANGED <> + \* lines 42-43 + /\ validValue' = [validValue EXCEPT ![p] = prop] + /\ validRound' = [validRound EXCEPT ![p] = r] + /\ UNCHANGED <> + /\ UNCHANGED + <> + /\ UNCHANGED + <> + /\ action' = "UponProposalInPrevoteOrCommitAndPrevote" + +\* lines 47-48 + 65-67 (onTimeoutPrecommit) +\* @type: (PROCESS) => Bool; +UponQuorumOfPrecommitsAny(p) == + /\ \E MyEvidence \in SUBSET msgsPrecommit[round[p]]: + \* find the unique committers in the evidence + LET Committers == { m.src: m \in MyEvidence } IN + \* compare the number of the unique committers against the threshold + /\ Cardinality(Committers) >= THRESHOLD2 \* line 47 + /\ evidence' = MyEvidence \union evidence + /\ round[p] + 1 \in Rounds + /\ StartRound(p, round[p] + 1) + /\ UNCHANGED temporalVars + /\ UNCHANGED + <<(*beginRound,*) endConsensus, (*lastBeginRound,*) + proposalTime, proposalReceivedTime>> + /\ UNCHANGED + <<(*round, step,*) decision, lockedValue, + lockedRound, validValue, validRound>> + /\ UNCHANGED + <> + /\ action' = "UponQuorumOfPrecommitsAny" + +\* lines 49-54 +\* [PBTS-ALG-DECIDE.0] +\* @type: (PROCESS) => Bool; +UponProposalInPrecommitNoDecision(p) == + /\ decision[p] = NilDecision \* line 49 + /\ \E v \in ValidValues, t \in Timestamps (* line 50*) , r \in Rounds, pr \in Rounds, vr \in RoundsOrNil: + LET + \* @type: PROPOSAL; + prop == Proposal(v,t,pr) + IN + /\ LET + \* @type: PROPMESSAGE; + msg == + [ + type |-> "PROPOSAL", + src |-> Proposer[r], + round |-> r, + proposal |-> prop, + validRound |-> vr + ] + IN + /\ msg \in msgsPropose[r] \* line 49 + /\ inspectedProposal[r,p] /= NilTimestamp \* Keep? + /\ LET PV == { m \in msgsPrecommit[r]: m.id = Id(prop) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 49 + /\ evidence' = PV \union {msg} \union evidence + /\ decision' = [decision EXCEPT ![p] = Decision(prop, r)] \* update the decision, line 51 + \* The original algorithm does not have 'DECIDED', but it increments the height. + \* We introduced 'DECIDED' here to prevent the process from changing its decision. + /\ endConsensus' = [endConsensus EXCEPT ![p] = localClock[p]] + /\ step' = [step EXCEPT ![p] = "DECIDED"] + /\ UNCHANGED temporalVars + /\ UNCHANGED + <> + /\ UNCHANGED + <> + /\ UNCHANGED + <> + /\ action' = "UponProposalInPrecommitNoDecision" + +\* the actions below are not essential for safety, but added for completeness + +\* lines 20-21 + 57-60 +\* @type: (PROCESS) => Bool; +OnTimeoutPropose(p) == + /\ step[p] = "PROPOSE" + /\ p /= Proposer[round[p]] + /\ BroadcastPrevote(p, round[p], NilProposal) + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ UNCHANGED + <> + /\ UNCHANGED + <> + /\ action' = "OnTimeoutPropose" + +\* lines 44-46 +\* @type: (PROCESS) => Bool; +OnQuorumOfNilPrevotes(p) == + /\ step[p] = "PREVOTE" + /\ LET PV == { m \in msgsPrevote[round[p]]: m.id = Id(NilProposal) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 36 + /\ evidence' = PV \union evidence + /\ BroadcastPrecommit(p, round[p], Id(NilProposal)) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + /\ UNCHANGED <> + /\ UNCHANGED + <> + /\ UNCHANGED + <> + /\ action' = "OnQuorumOfNilPrevotes" + +\* lines 55-56 +\* @type: (PROCESS) => Bool; +OnRoundCatchup(p) == + \E r \in {rr \in Rounds: rr > round[p]}: + LET RoundMsgs == msgsPropose[r] \union msgsPrevote[r] \union msgsPrecommit[r] IN + \E MyEvidence \in SUBSET RoundMsgs: + LET Faster == { m.src: m \in MyEvidence } IN + /\ Cardinality(Faster) >= THRESHOLD1 + /\ evidence' = MyEvidence \union evidence + /\ StartRound(p, r) + /\ UNCHANGED temporalVars + /\ UNCHANGED + <<(*beginRound,*) endConsensus, (*lastBeginRound,*) + proposalTime, proposalReceivedTime>> + /\ UNCHANGED + <<(*round, step,*) decision, lockedValue, + lockedRound, validValue, validRound>> + /\ UNCHANGED + <> + /\ action' = "OnRoundCatchup" + + +(********************* PROTOCOL TRANSITIONS ******************************) +\* advance the global clock +\* @type: Bool; +AdvanceRealTime == + /\ ValidTime(realTime) + /\ \E t \in Timestamps: + /\ t > realTime + /\ realTime' = t + /\ localClock' = [p \in Corr |-> localClock[p] + (t - realTime)] + /\ UNCHANGED <> + /\ action' = "AdvanceRealTime" + +\* advance the local clock of node p to some larger time t, not necessarily by 1 +\* #type: (PROCESS) => Bool; +\* AdvanceLocalClock(p) == +\* /\ ValidTime(localClock[p]) +\* /\ \E t \in Timestamps: +\* /\ t > localClock[p] +\* /\ localClock' = [localClock EXCEPT ![p] = t] +\* /\ UNCHANGED <> +\* /\ UNCHANGED realTime +\* /\ action' = "AdvanceLocalClock" + +\* process timely messages +\* @type: (PROCESS) => Bool; +MessageProcessing(p) == + \* start round + \/ InsertProposal(p) + \* reception step + \/ ReceiveProposal(p) + \* processing step + \/ UponProposalInPropose(p) + \/ UponProposalInProposeAndPrevote(p) + \/ UponQuorumOfPrevotesAny(p) + \/ UponProposalInPrevoteOrCommitAndPrevote(p) + \/ UponQuorumOfPrecommitsAny(p) + \/ UponProposalInPrecommitNoDecision(p) + \* the actions below are not essential for safety, but added for completeness + \/ OnTimeoutPropose(p) + \/ OnQuorumOfNilPrevotes(p) + \/ OnRoundCatchup(p) + +(* + * A system transition. In this specificatiom, the system may eventually deadlock, + * e.g., when all processes decide. This is expected behavior, as we focus on safety. + *) +Next == + \/ AdvanceRealTime + \/ /\ SynchronizedLocalClocks + /\ \E p \in Corr: MessageProcessing(p) + +----------------------------------------------------------------------------- + +(*************************** INVARIANTS *************************************) + +\* [PBTS-INV-AGREEMENT.0] +AgreementOnValue == + \A p, q \in Corr: + /\ decision[p] /= NilDecision + /\ decision[q] /= NilDecision + => \E v \in ValidValues, t \in Timestamps, pr \in Rounds, r1 \in Rounds, r2 \in Rounds : + LET prop == Proposal(v,t,pr) + IN + /\ decision[p] = Decision(prop, r1) + /\ decision[q] = Decision(prop, r2) + +\* [PBTS-CONSENSUS-TIME-VALID.0] +ConsensusTimeValid == + \A p \in Corr: + \* if a process decides on v and t + \E v \in ValidValues, t \in Timestamps, pr \in Rounds, dr \in Rounds : + decision[p] = Decision(Proposal(v,t,pr), dr) + \* then + \* TODO: consider tighter bound where beginRound[pr] is replaced + \* w/ MedianOfRound[pr] + => (/\ beginRound[pr] - Precision - Delay <= t + /\ t <= endConsensus[p] + Precision) + +\* [PBTS-CONSENSUS-SAFE-VALID-CORR-PROP.0] +ConsensusSafeValidCorrProp == + \A v \in ValidValues: + \* and there exists a process that decided on v, t + /\ \E p \in Corr, t \in Timestamps, pr \in Rounds, dr \in Rounds : + \* if the proposer in the round is correct + (/\ Proposer[pr] \in Corr + /\ decision[p] = Decision(Proposal(v,t,pr), dr)) + \* then t is between the minimal and maximal initial local time + => /\ beginRound[pr] <= t + /\ t <= lastBeginRound[pr] + +\* [PBTS-CONSENSUS-REALTIME-VALID-CORR.0] +ConsensusRealTimeValidCorr == + \A r \in Rounds : + \E p \in Corr, v \in ValidValues, t \in Timestamps, pr \in Rounds: + (/\ decision[p] = Decision(Proposal(v,t,pr), r) + /\ proposalTime[r] /= NilTimestamp) + => (/\ proposalTime[r] - Precision <= t + /\ t <= proposalTime[r] + Precision) + +\* [PBTS-CONSENSUS-REALTIME-VALID.0] +ConsensusRealTimeValid == + \A t \in Timestamps, r \in Rounds : + (\E p \in Corr, v \in ValidValues, pr \in Rounds : + decision[p] = Decision(Proposal(v,t,pr), r)) + => /\ proposalReceivedTime[r] - Precision < t + /\ t < proposalReceivedTime[r] + Precision + Delay + +DecideAfterMin == TRUE + \* if decide => time > min + +\* [PBTS-MSG-FAIR.0] +BoundedDelay == + \A r \in Rounds : + (/\ proposalTime[r] /= NilTimestamp + /\ proposalTime[r] + Delay < realTime) + => \A p \in Corr: inspectedProposal[r,p] /= NilTimestamp + +\* [PBTS-CONSENSUS-TIME-LIVE.0] +ConsensusTimeLive == + \A r \in Rounds, p \in Corr : + (/\ proposalTime[r] /= NilTimestamp + /\ proposalTime[r] + Delay < realTime + /\ Proposer[r] \in Corr + /\ round[p] <= r) + => \E msg \in RoundProposals(r) : msg \in receivedTimelyProposal[p] + +\* a conjunction of all invariants +Inv == + /\ AgreementOnValue + /\ ConsensusTimeValid + /\ ConsensusSafeValidCorrProp + \* /\ ConsensusRealTimeValid + \* /\ ConsensusRealTimeValidCorr + \* /\ BoundedDelay + +\* Liveness == +\* ConsensusTimeLive + +============================================================================= diff --git a/spec/consensus/proposer-based-timestamp/tla/typedefs.tla b/spec/consensus/proposer-based-timestamp/tla/typedefs.tla new file mode 100644 index 0000000000..72e76df54b --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/tla/typedefs.tla @@ -0,0 +1,39 @@ +-------------------- MODULE typedefs --------------------------- +(* + @typeAlias: PROCESS = Str; + @typeAlias: VALUE = Str; + @typeAlias: STEP = Str; + @typeAlias: ROUND = Int; + @typeAlias: ACTION = Str; + @typeAlias: TRACE = Seq(Str); + @typeAlias: TIME = Int; + @typeAlias: PROPOSAL = <>; + @typeAlias: DECISION = <>; + @typeAlias: PROPMESSAGE = + [ + type: STEP, + src: PROCESS, + round: ROUND, + proposal: PROPOSAL, + validRound: ROUND + ]; + @typeAlias: PREMESSAGE = + [ + type: STEP, + src: PROCESS, + round: ROUND, + id: PROPOSAL + ]; + @typeAlias: MESSAGE = + [ + type: STEP, + src: PROCESS, + round: ROUND, + proposal: PROPOSAL, + validRound: ROUND, + id: PROPOSAL + ]; +*) +TypeAliases == TRUE + +============================================================================= \ No newline at end of file diff --git a/spec/consensus/proposer-based-timestamp/v1/pbts-algorithm_001_draft.md b/spec/consensus/proposer-based-timestamp/v1/pbts-algorithm_001_draft.md new file mode 100644 index 0000000000..c8fd08ef49 --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/v1/pbts-algorithm_001_draft.md @@ -0,0 +1,162 @@ +# PBTS: Protocol Specification (first draft) + +This specification is **OUTDATED**. Please refer to the [new version][algorithm]. + +## Updated Consensus Algorithm + +### Outline + +The algorithm in the [arXiv paper][arXiv] evaluates rules of the received messages without making explicit how these messages are received. In our solution, we will make some message filtering explicit. We will assume that there are message reception steps (where messages are received and possibly stored locally for later evaluation of rules) and processing steps (the latter roughly as described in a way similar to the pseudo code of the arXiv paper). + +In contrast to the original algorithm the field `proposal` in the `PROPOSE` message is a pair `(v, time)`, of the proposed consensus value `v` and the proposed time `time`. + +#### **[PBTS-RECEPTION-STEP.0]** + +In the reception step at process `p` at local time `now_p`, upon receiving a message `m`: + +- if the message `m` is of type `PROPOSE` and satisfies `now_p - PRECISION < m.time < now_p + PRECISION + MSGDELAY`, then mark the message as `timely` + +> if `m` does not satisfy the constraint consider it `untimely` + + +#### **[PBTS-PROCESSING-STEP.0]** + +In the processing step, based on the messages stored, the rules of the algorithms are +executed. Note that the processing step only operates on messages +for the current height. The consensus algorithm rules are defined by the following updates to arXiv paper. + +#### New `StartRound` + +There are two additions + +- in case the proposer's local time is smaller than the time of the previous block, the proposer waits until this is not the case anymore (to ensure the block time is monotonically increasing) +- the proposer sends its time `now_p` as part of its proposal + +We update the timeout for the `PROPOSE` step according to the following reasoning: + +- If a correct proposer needs to wait to make sure its proposed time is larger than the `blockTime` of the previous block, then it sends by realtime `blockTime + ACCURACY` (By this time, its local clock must exceed `blockTime`) +- the receiver will receive a `PROPOSE` message by `blockTime + ACCURACY + MSGDELAY` +- the receiver's local clock will be `<= blockTime + 2 * ACCURACY + MSGDELAY` +- thus when the receiver `p` enters this round it can set its timeout to a value `waitingTime => blockTime + 2 * ACCURACY + MSGDELAY - now_p` + +So we should set the timeout to `max(timeoutPropose(round_p), waitingTime)`. + +> If, in the future, a block delay parameter `BLOCKDELAY` is introduced, this means +that the proposer should wait for `now_p > blockTime + BLOCKDELAY` before sending a `PROPOSE` message. +Also, `BLOCKDELAY` needs to be added to `waitingTime`. + +#### **[PBTS-ALG-STARTROUND.0]** + +```go +function StartRound(round) { + blockTime ← block time of block h_p - 1 + waitingTime ← blockTime + 2 * ACCURACY + MSGDELAY - now_p + round_p ← round + step_p ← propose + if proposer(h_p, round_p) = p { + wait until now_p > blockTime // new wait condition + if validValue_p != nil { + proposal ← (validValue_p, now_p) // added "now_p" + } + else { + proposal ← (getValue(), now_p) // added "now_p" + } + broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ + } + else { + schedule OnTimeoutPropose(h_p,round_p) to be executed after max(timeoutPropose(round_p), waitingTime) + } +} +``` + +#### New Rule Replacing Lines 22 - 27 + +- a validator prevotes for the consensus value `v` **and** the time `t` +- the code changes as the `PROPOSAL` message carries time (while `lockedValue` does not) + +#### **[PBTS-ALG-UPON-PROP.0]** + +```go +upon timely(⟨PROPOSAL, h_p, round_p, (v,t), −1⟩) from proposer(h_p, round_p) while step_p = propose do { + if valid(v) ∧ (lockedRound_p = −1 ∨ lockedValue_p = v) { + broadcast ⟨PREVOTE, h_p, round_p, id(v,t)⟩ + } + else { + broadcast ⟨PREVOTE, h_p, round_p, nil⟩ + } + step_p ← prevote +} +``` + +#### New Rule Replacing Lines 28 - 33 + +In case consensus is not reached in round 1, in `StartRound` the proposer of future rounds may propose the same value but with a different time. +Thus, the time `tprop` in the `PROPOSAL` message need not match the time `tvote` in the (old) `PREVOTE` messages. +A validator may send `PREVOTE` for the current round as long as the value `v` matches. +This gives the following rule: + +#### **[PBTS-ALG-OLD-PREVOTE.0]** + +```go +upon timely(⟨PROPOSAL, h_p, round_p, (v, tprop), vr⟩) from proposer(h_p, round_p) AND 2f + 1 ⟨PREVOTE, h_p, vr, id((v, tvote)⟩ +while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { + if valid(v) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { + broadcast ⟨PREVOTE, h_p, roundp, id(v, tprop)⟩ + } + else { + broadcast ⟨PREVOTE, hp, roundp, nil⟩ + } + step_p ← prevote +} +``` + +#### New Rule Replacing Lines 36 - 43 + +- As above, in the following `(v,t)` is part of the message rather than `v` +- the stored values (i.e., `lockedValue`, `validValue`) do not contain the time + +#### **[PBTS-ALG-NEW-PREVOTE.0]** + +```go +upon timely(⟨PROPOSAL, h_p, round_p, (v,t), ∗⟩) from proposer(h_p, round_p) AND 2f + 1 ⟨PREVOTE, h_p, round_p, id(v,t)⟩ while valid(v) ∧ step_p ≥ prevote for the first time do { + if step_p = prevote { + lockedValue_p ← v + lockedRound_p ← round_p + broadcast ⟨PRECOMMIT, h_p, round_p, id(v,t))⟩ + step_p ← precommit + } + validValue_p ← v + validRound_p ← round_p +} +``` + +#### New Rule Replacing Lines 49 - 54 + +- we decide on `v` as well as on the time from the proposal message +- here we do not care whether the proposal was received timely. + +> In particular we need to take care of the case where the proposer is untimely to one correct validator only. We need to ensure that this validator decides if all decide. + +#### **[PBTS-ALG-DECIDE.0]** + +```go +upon ⟨PROPOSAL, h_p, r, (v,t), ∗⟩ from proposer(h_p, r) AND 2f + 1 ⟨PRECOMMIT, h_p, r, id(v,t)⟩ while decisionp[h_p] = nil do { + if valid(v) { + decision_p [h_p] = (v,t) // decide on time too + h_p ← h_p + 1 + reset lockedRound_p , lockedValue_p, validRound_p and validValue_p to initial values and empty message log + StartRound(0) + } +} +``` + +**All other rules remains unchanged.** + +Back to [main document][main_v1]. + +[main_v1]: ./pbts_001_draft.md + +[algorithm]: ../pbts-algorithm_002_draft.md +[algorithm_v1]: ./pbts-algorithm_001_draft.md + +[arXiv]: https://arxiv.org/abs/1807.04938 diff --git a/spec/consensus/proposer-based-timestamp/v1/pbts-sysmodel_001_draft.md b/spec/consensus/proposer-based-timestamp/v1/pbts-sysmodel_001_draft.md new file mode 100644 index 0000000000..e721fe07ed --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/v1/pbts-sysmodel_001_draft.md @@ -0,0 +1,194 @@ +# PBTS: System Model and Properties (first draft) + +This specification is **OUTDATED**. Please refer to the [new version][sysmodel]. + +## System Model + +### Time and Clocks + +#### **[PBTS-CLOCK-NEWTON.0]** + +There is a reference Newtonian real-time `t` (UTC). + +Every correct validator `V` maintains a synchronized clock `C_V` that ensures: + +#### **[PBTS-CLOCK-PRECISION.0]** + +There exists a system parameter `PRECISION` such that for any two correct validators `V` and `W`, and at any real-time `t`, +`|C_V(t) - C_W(t)| < PRECISION` + + +### Message Delays + +We do not want to interfere with the Tendermint timing assumptions. We will postulate a timing restriction, which, if satisfied, ensures that liveness is preserved. + +In general the local clock may drift from the global time. (It may progress faster, e.g., one second of clock time might take 1.005 seconds of real-time). As a result the local clock and the global clock may be measured in different time units. Usually, the message delay is measured in global clock time units. To estimate the correct local timeout precisely, we would need to estimate the clock time duration of a message delay taking into account the clock drift. For simplicity we ignore this, and directly postulate the message delay assumption in terms of local time. + + +#### **[PBTS-MSG-D.0]** + +There exists a system parameter `MSGDELAY` for message end-to-end delays **counted in clock-time**. + +> Observe that [PBTS-MSG-D.0] imposes constraints on message delays as well as on the clock. + +#### **[PBTS-MSG-FAIR.0]** + +The message end-to-end delay between a correct proposer and a correct validator (for `PROPOSE` messages) is less than `MSGDELAY`. + + +## Problem Statement + +In this section we define the properties of Tendermint consensus (cf. the [arXiv paper][arXiv]) in this new system model. + +#### **[PBTS-PROPOSE.0]** + +A proposer proposes a pair `(v,t)` of consensus value `v` and time `t`. + +> We then restrict the allowed decisions along the following lines: + +#### **[PBTS-INV-AGREEMENT.0]** + +[Agreement] No two correct validators decide on different values `v`. + +#### **[PBTS-INV-TIME-VAL.0]** + +[Time-Validity] If a correct validator decides on `t` then `t` is "OK" (we will formalize this below), even if up to `2f` validators are faulty. + +However, the properties of Tendermint consensus are of more interest with respect to the blocks, that is, what is written into a block and when. We therefore, in the following, will give the safety and liveness properties from this block-centric viewpoint. +For this, observe that the time `t` decided at consensus height `k` will be written in the block of height `k+1`, and will be supported by `2f + 1` `PRECOMMIT` messages of the same consensus round `r`. The time written in the block, we will denote by `b.time` (to distinguish it from the term `bfttime` used for median-based time). For this, it is important to have the following consensus algorithm property: + +#### **[PBTS-INV-TIME-AGR.0]** + +[Time-Agreement] If two correct validators decide in the same round, then they decide on the same `t`. + +#### **[PBTS-DECISION-ROUND.0]** + +Note that the relation between consensus decisions, on the one hand, and blocks, on the other hand, is not immediate; in particular if we consider time: In the proposed solution, +as validators may decide in different rounds, they may decide on different times. +The proposer of the next block, may pick a commit (at least `2f + 1` `PRECOMMIT` messages from one round), and thus it picks a decision round that is going to become "canonic". +As a result, the proposer implicitly has a choice of one of the times that belong to rounds in which validators decided. Observe that this choice was implicitly the case already in the median-based `bfttime`. +However, as most consensus instances terminate within one round on the Cosmos hub, this is hardly ever observed in practice. + + + +Finally, observe that the agreement ([Agreement] and [Time-Agreement]) properties are based on the Tendermint security model [TMBC-FM-2THIRDS.0] of more than 2/3 correct validators, while [Time-Validity] is based on more than 1/3 correct validators. + +### SAFETY + +Here we will provide specifications that relate local time to block time. However, since we do not assume (by now) that local time is linked to real-time, these specifications also do not provide a relation between block time and real-time. Such properties are given [later](#REAL-TIME-SAFETY). + +For a correct validator `V`, let `beginConsensus(V,k)` be the local time when it sets its height to `k`, and let `endConsensus(V,k)` be the time when it sets its height to `k + 1`. + +Let + +- `beginConsensus(k)` be the minimum over `beginConsensus(V,k)`, and +- `last-beginConsensus(k)` be the maximum over `beginConsensus(V,k)`, and +- `endConsensus(k)` the maximum over `endConsensus(V,k)` + +for all correct validators `V`. + +> Observe that `beginConsensus(k) <= last-beginConsensus(k)` and if local clocks are monotonic, then `last-beginConsensus(k) <= endConsensus(k)`. + +#### **[PBTS-CLOCK-GROW.0]** + +We assume that during one consensus instance, local clocks are not set back, in particular for each correct validator `V` and each height `k`, we have `beginConsensus(V,k) < endConsensus(V,k)`. + + +#### **[PBTS-CONSENSUS-TIME-VALID.0]** + +If + +- there is a valid commit `c` for height `k`, and +- `c` contains a `PRECOMMIT` message by at least one correct validator, + +then the time `b.time` in the block `b` that is signed by `c` satisfies + +- `beginConsensus(k) - PRECISION <= b.time < endConsensus(k) + PRECISION + MSGDELAY`. + + +> [PBTS-CONSENSUS-TIME-VALID.0] is based on an analysis where the proposer is faulty (and does does not count towards `beginConsensus(k)` and `endConsensus(k)`), and we estimate the times at which correct validators receive and `accept` the `propose` message. If the proposer is correct we obtain + +#### **[PBTS-CONSENSUS-LIVE-VALID-CORR-PROP.0]** + +If the proposer of round 1 is correct, and + +- [TMBC-FM-2THIRDS.0] holds for a block of height `k - 1`, and +- [PBTS-MSG-FAIR.0], and +- [PBTS-CLOCK-PRECISION.0], and +- [PBTS-CLOCK-GROW.0] (**TODO:** is that enough?) + +then eventually (within bounded time) every correct validator decides in round 1. + +#### **[PBTS-CONSENSUS-SAFE-VALID-CORR-PROP.0]** + +If the proposer of round 1 is correct, and + +- [TMBC-FM-2THIRDS.0] holds for a block of height `k - 1`, and +- [PBTS-MSG-FAIR.0], and +- [PBTS-CLOCK-PRECISION.0], and +- [PBTS-CLOCK-GROW.0] (**TODO:** is that enough?) + +then `beginConsensus_k <= b.time <= last-beginConsensus_k`. + + +> For the above two properties we will assume that a correct proposer `v` sends its `PROPOSAL` at its local time `beginConsensus(v,k)`. + +### LIVENESS + +If + +- [TMBC-FM-2THIRDS.0] holds for a block of height `k - 1`, and +- [PBTS-MSG-FAIR.0], +- [PBTS-CLOCK.0], and +- [PBTS-CLOCK-GROW.0] (**TODO:** is that enough?) + +then eventually there is a valid commit `c` for height `k`. + + +### REAL-TIME SAFETY + +> We want to give a property that can be exploited from the outside, that is, given a block with some time stored in it, what is the estimate at which real-time the block was generated. To do so, we need to link clock-time to real-time; which is not the case with [PBTS-CLOCK.0]. For this, we introduce the following assumption on the clocks: + +#### **[PBTS-CLOCKSYNC-EXTERNAL.0]** + +There is a system parameter `ACCURACY`, such that for all real-times `t` and all correct validators `V`, + +- `| C_V(t) - t | < ACCURACY`. + +> `ACCURACY` is not necessarily visible at the code level. The properties below just show that the smaller +its value, the closer the block time will be to real-time + +#### **[PBTS-CONSENSUS-PTIME.0]** + +LET `m` be a propose message. We consider the following two real-times `proposalTime(m)` and `propRecvTime(m)`: + +- if the proposer is correct and sends `m` at time `t`, we write `proposalTime(m)` for real-time `t`. +- if first correct validator receives `m` at time `t`, we write `propRecvTime(m)` for real-time `t`. + + +#### **[PBTS-CONSENSUS-REALTIME-VALID.0]** + +Let `b` be a block with a valid commit that contains at least one `precommit` message by a correct validator (and `proposalTime` is the time for the height/round `propose` message `m` that triggered the `precommit`). Then: + +`propRecvTime(m) - ACCURACY - PRECISION < b.time < propRecvTime(m) + ACCURACY + PRECISION + MSGDELAY` + + +#### **[PBTS-CONSENSUS-REALTIME-VALID-CORR.0]** + +Let `b` be a block with a valid commit that contains at least one `precommit` message by a correct validator (and `proposalTime` is the time for the height/round `propose` message `m` that triggered the `precommit`). Then, if the proposer is correct: + +`proposalTime(m) - ACCURACY < b.time < proposalTime(m) + ACCURACY` + +> by the algorithm at time `proposalTime(m)` the proposer fixes `m.time <- now_p(proposalTime(m))` + +> "triggered the `PRECOMMIT`" implies that the data in `m` and `b` are "matching", that is, `m` proposed the values that are actually stored in `b`. + +Back to [main document][main_v1]. + +[main_v1]: ./pbts_001_draft.md + +[algorithm_v1]: ./pbts-algorithm_001_draft.md + +[sysmodel]: ../pbts-sysmodel_002_draft.md + +[arXiv]: https://arxiv.org/abs/1807.04938 diff --git a/spec/consensus/proposer-based-timestamp/v1/pbts_001_draft.md b/spec/consensus/proposer-based-timestamp/v1/pbts_001_draft.md new file mode 100644 index 0000000000..21d7d6a2ae --- /dev/null +++ b/spec/consensus/proposer-based-timestamp/v1/pbts_001_draft.md @@ -0,0 +1,267 @@ + + +# Proposer-Based Time (first draft) + +## Current BFTTime + +### Description + +In Tendermint consensus, the first version of how time is computed and stored in a block works as follows: + +- validators send their current local time as part of `precommit` messages +- upon collecting the `precommit` messages that the proposer uses to build a commit to be put in the next block, the proposer computes the `time` of the next block as the median (weighted over voting power) of the times in the `precommit` messages. + +### Analysis + +1. **Fault tolerance.** The computed median time is called [`bfttime`][bfttime] as it is indeed fault-tolerant: if **less than a third** of the validators is faulty (counted in voting power), it is guaranteed that the computed time lies between the minimum and the maximum times sent by correct validators. +1. **Effect of faulty validators.** If more than `1/2` of the voting power (which is in fact more than one third and less than two thirds of the voting power) is held by faulty validators, then the time is under total control of the faulty validators. (This is particularly challenging in the context of [lightclient][lcspec] security.) +1. **Proposer influence on block time.** The proposer of the next block has a degree of freedom in choosing the `bfttime`, since it computes the median time based on the timestamps from `precommit` messages sent by + `2f + 1` correct validators. + 1. If there are `n` different timestamps in the `precommit` messages, the proposer can use any subset of timestamps that add up to `2f + 1` + of the voting power in order to compute the median. + 1. If the validators decide in different rounds, the proposer can decide on which round the median computation is based. +1. **Liveness.** The liveness of the protocol: + 1. does not depend on clock synchronization, + 1. depends on bounded message delays. +1. **Relation to real time.** There is no clock synchronizaton, which implies that there is **no relation** between the computed block `time` and real time. +1. **Aggregate signatures.** As the `precommit` messages contain the local times, all these `precommit` messages typically differ in the time field, which **prevents** the use of aggregate signatures. + +## Suggested Proposer-Based Time + +### Outline + +An alternative approach to time has been discussed: Rather than having the validators send the time in the `precommit` messages, the proposer in the consensus algorithm sends its time in the `propose` message, and the validators locally check whether the time is OK (by comparing to their local clock). + +This proposed solution adds the requirement of having synchronized clocks, and other implicit assumptions. + +### Comparison of the Suggested Method to the Old One + +1. **Fault tolerance.** Maintained in the suggested protocol. +1. **Effect of faulty validators.** Eliminated in the suggested protocol, + that is, the block `time` can be corrupted only in the extreme case when + `>2/3` of the validators are faulty. +1. **Proposer influence on block time.** The proposer of the next block + has less freedom when choosing the block time. + 1. This scenario is eliminated in the suggested protocol, provided that there are `<1/3` faulty validators. + 1. This scenario is still there. +1. **Liveness.** The liveness of the suggested protocol: + 1. depends on the introduced assumptions on synchronized clocks (see below), + 1. still depends on the message delays (unavoidable). +1. **Relation to real time.** We formalize clock synchronization, and obtain a **well-defined relation** between the block `time` and real time. +1. **Aggregate signatures.** The `precommit` messages free of time, which **allows** for aggregate signatures. + +### Protocol Overview + +#### Proposed Time + +We assume that the field `proposal` in the `PROPOSE` message is a pair `(v, time)`, of the proposed consensus value `v` and the proposed time `time`. + +#### Reception Step + +In the reception step at node `p` at local time `now_p`, upon receiving a message `m`: + +- **if** the message `m` is of type `PROPOSE` and satisfies `now_p - PRECISION < m.time < now_p + PRECISION + MSGDELAY`, then mark the message as `timely`. +(`PRECISION` and `MSGDELAY` being system parameters, see [below](#safety-and-liveness)) + +> after the presentation in the dev session, we realized that different semantics for the reception step is closer aligned to the implementation. Instead of dropping propose messages, we keep all of them, and mark timely ones. + +#### Processing Step + +- Start round + + + + + + + + + + + + +
arXiv paperProposer-based time
+ +```go +function StartRound(round) { + round_p ← round + step_p ← propose + if proposer(h_p, round_p) = p { + + + if validValue_p != nil { + + proposal ← validValue_p + } else { + + proposal ← getValue() + } + broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ + } else { + schedule OnTimeoutPropose(h_p,round_p) to + be executed after timeoutPropose(round_p) + } +} +``` + + + +```go +function StartRound(round) { + round_p ← round + step_p ← propose + if proposer(h_p, round_p) = p { + // new wait condition + wait until now_p > block time of block h_p - 1 + if validValue_p != nil { + // add "now_p" + proposal ← (validValue_p, now_p) + } else { + // add "now_p" + proposal ← (getValue(), now_p) + } + broadcast ⟨PROPOSAL, h_p, round_p, proposal, validRound_p⟩ + } else { + schedule OnTimeoutPropose(h_p,round_p) to + be executed after timeoutPropose(round_p) + } +} +``` + +
+ +- Rule on lines 28-35 + + + + + + + + + + + + +
arXiv paperProposer-based time
+ +```go +upon timely(⟨PROPOSAL, h_p, round_p, v, vr⟩) + from proposer(h_p, round_p) + AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v)⟩ +while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { + if valid(v) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { + + broadcast ⟨PREVOTE, h_p, round_p, id(v)⟩ + } else { + broadcast ⟨PREVOTE, hp, round_p, nil⟩ + } +} +``` + + + +```go +upon timely(⟨PROPOSAL, h_p, round_p, (v, tprop), vr⟩) + from proposer(h_p, round_p) + AND 2f + 1 ⟨PREVOTE, h_p, vr, id(v, tvote)⟩ + while step_p = propose ∧ (vr ≥ 0 ∧ vr < round_p) do { + if valid(v) ∧ (lockedRound_p ≤ vr ∨ lockedValue_p = v) { + // send hash of v and tprop in PREVOTE message + broadcast ⟨PREVOTE, h_p, round_p, id(v, tprop)⟩ + } else { + broadcast ⟨PREVOTE, hp, round_p, nil⟩ + } + } +``` + +
+ +- Rule on lines 49-54 + + + + + + + + + + + + +
arXiv paperProposer-based time
+ +```go +upon ⟨PROPOSAL, h_p, r, v, ∗⟩ from proposer(h_p, r) + AND 2f + 1 ⟨PRECOMMIT, h_p, r, id(v)⟩ + while decisionp[h_p] = nil do { + if valid(v) { + + decision_p [h_p] = v + h_p ← h_p + 1 + reset lockedRound_p , lockedValue_p, validRound_p and + validValue_p to initial values and empty message log + StartRound(0) + } + } +``` + + + +```go +upon ⟨PROPOSAL, h_p, r, (v,t), ∗⟩ from proposer(h_p, r) + AND 2f + 1 ⟨PRECOMMIT, h_p, r, id(v,t)⟩ + while decisionp[h_p] = nil do { + if valid(v) { + // decide on time too + decision_p [h_p] = (v,t) + h_p ← h_p + 1 + reset lockedRound_p , lockedValue_p, validRound_p and + validValue_p to initial values and empty message log + StartRound(0) + } + } +``` + +
+ +- Other rules are extended in a similar way, or remain unchanged + +### Property Overview + +#### Safety and Liveness + +For safety (Point 1, Point 2, Point 3i) and liveness (Point 4) we need +the following assumptions: + +- There exists a system parameter `PRECISION` such that for any two correct validators `V` and `W`, and at any real-time `t`, their local times `C_V(t)` and `C_W(t)` differ by less than `PRECISION` time units, +i.e., `|C_V(t) - C_W(t)| < PRECISION` +- The message end-to-end delay between a correct proposer and a correct validator (for `PROPOSE` messages) is less than `MSGDELAY`. + +#### Relation to Real-Time + +For analyzing real-time safety (Point 5), we use a system parameter `ACCURACY`, such that for all real-times `t` and all correct validators `V`, we have `| C_V(t) - t | < ACCURACY`. + +> `ACCURACY` is not necessarily visible at the code level. We might even view `ACCURACY` as variable over time. The smaller it is during a consensus instance, the closer the block time will be to real-time. +> +> Note that `PRECISION` and `MSGDELAY` show up in the code. + +### Detailed Specification + +This specification describes the changes needed to be done to the Tendermint consensus algorithm as described in the [arXiv paper][arXiv] and the simplified specification in [TLA+][tlatender], and makes precise the underlying assumptions and the required properties. + +- [Part I - System Model and Properties][sysmodel_v1] +- [Part II - Protocol specification][algorithm_v1] +- [TLA+ Specification][proposertla] + +[algorithm_v1]: ./pbts-algorithm_001_draft.md + +[sysmodel_v1]: ./pbts-sysmodel_001_draft.md + +[proposertla]: ../tla/TendermintPBT_001_draft.tla + +[bfttime]: ../../bft-time.md +[tlatender]: https://github.com/tendermint/spec/blob/master/rust-spec/tendermint-accountability/README.md +[lcspec]: ../../light-client/ +[arXiv]: https://arxiv.org/abs/1807.04938 diff --git a/spec/consensus/proposer-selection.md b/spec/consensus/proposer-selection.md new file mode 100644 index 0000000000..3cea3d5cde --- /dev/null +++ b/spec/consensus/proposer-selection.md @@ -0,0 +1,323 @@ +--- +order: 3 +--- + +# Proposer Selection Procedure + +This document specifies the Proposer Selection Procedure that is used in Tendermint to choose a round proposer. +As Tendermint is “leader-based protocol”, the proposer selection is critical for its correct functioning. + +At a given block height, the proposer selection algorithm runs with the same validator set at each round . +Between heights, an updated validator set may be specified by the application as part of the ABCIResponses' EndBlock. + +## Requirements for Proposer Selection + +This sections covers the requirements with Rx being mandatory and Ox optional requirements. +The following requirements must be met by the Proposer Selection procedure: + +### R1: Determinism + +Given a validator set `V`, and two honest validators `p` and `q`, for each height `h` and each round `r` the following must hold: + + `proposer_p(h,r) = proposer_q(h,r)` + +where `proposer_p(h,r)` is the proposer returned by the Proposer Selection Procedure at process `p`, at height `h` and round `r`. + +### R2: Fairness + +Given a validator set with total voting power P and a sequence S of elections. In any sub-sequence of S with length C*P, a validator v must be elected as proposer P/VP(v) times, i.e. with frequency: + + f(v) ~ VP(v) / P + +where C is a tolerance factor for validator set changes with following values: + +- C == 1 if there are no validator set changes +- C ~ k when there are validator changes + +*[this needs more work]* + +## Basic Algorithm + +At its core, the proposer selection procedure uses a weighted round-robin algorithm. + +A model that gives a good intuition on how/ why the selection algorithm works and it is fair is that of a priority queue. The validators move ahead in this queue according to their voting power (the higher the voting power the faster a validator moves towards the head of the queue). When the algorithm runs the following happens: + +- all validators move "ahead" according to their powers: for each validator, increase the priority by the voting power +- first in the queue becomes the proposer: select the validator with highest priority +- move the proposer back in the queue: decrease the proposer's priority by the total voting power + +Notation: + +- vset - the validator set +- n - the number of validators +- VP(i) - voting power of validator i +- A(i) - accumulated priority for validator i +- P - total voting power of set +- avg - average of all validator priorities +- prop - proposer + +Simple view at the Selection Algorithm: + +```md + def ProposerSelection (vset): + + // compute priorities and elect proposer + for each validator i in vset: + A(i) += VP(i) + prop = max(A) + A(prop) -= P +``` + +## Stable Set + +Consider the validator set: + +Validator | p1 | p2 +----------|----|--- +VP | 1 | 3 + +Assuming no validator changes, the following table shows the proposer priority computation over a few runs. Four runs of the selection procedure are shown, starting with the 5th the same values are computed. +Each row shows the priority queue and the process place in it. The proposer is the closest to the head, the rightmost validator. As priorities are updated, the validators move right in the queue. The proposer moves left as its priority is reduced after election. + +| Priority Run | -2 | -1 | 0 | 1 | 2 | 3 | 4 | 5 | Alg step | +|----------------|----|----|-------|----|-------|----|----|----|------------------| +| | | | p1,p2 | | | | | | Initialized to 0 | +| run 1 | | | | p1 | | p2 | | | A(i)+=VP(i) | +| | | p2 | | p1 | | | | | A(p2)-= P | +| run 2 | | | | | p1,p2 | | | | A(i)+=VP(i) | +| | p1 | | | | p2 | | | | A(p1)-= P | +| run 3 | | p1 | | | | | | p2 | A(i)+=VP(i) | +| | | p1 | | p2 | | | | | A(p2)-= P | +| run 4 | | | p1 | | | | p2 | | A(i)+=VP(i) | +| | | | p1,p2 | | | | | | A(p2)-= P | + +It can be shown that: + +- At the end of each run k+1 the sum of the priorities is the same as at end of run k. If a new set's priorities are initialized to 0 then the sum of priorities will be 0 at each run while there are no changes. +- The max distance between priorites is (n-1) *P.*[formal proof not finished]* + +## Validator Set Changes + +Between proposer selection runs the validator set may change. Some changes have implications on the proposer election. + +### Voting Power Change + +Consider again the earlier example and assume that the voting power of p1 is changed to 4: + +Validator | p1 | p2 +----------|----|--- +VP | 4 | 3 + +Let's also assume that before this change the proposer priorites were as shown in first row (last run). As it can be seen, the selection could run again, without changes, as before. + +| Priority Run | -2 | -1 | 0 | 1 | 2 | Comment | +|----------------|----|----|---|----|----|-------------------| +| last run | | p2 | | p1 | | __update VP(p1)__ | +| next run | | | | | p2 | A(i)+=VP(i) | +| | p1 | | | | p2 | A(p1)-= P | + +However, when a validator changes power from a high to a low value, some other validator remain far back in the queue for a long time. This scenario is considered again in the Proposer Priority Range section. + +As before: + +- At the end of each run k+1 the sum of the priorities is the same as at run k. +- The max distance between priorites is (n-1) * P. + +### Validator Removal + +Consider a new example with set: + +Validator | p1 | p2 | p3 +----------|----|----|--- +VP | 1 | 2 | 3 + +Let's assume that after the last run the proposer priorities were as shown in first row with their sum being 0. After p2 is removed, at the end of next proposer selection run (penultimate row) the sum of priorities is -2 (minus the priority of the removed process). + +The procedure could continue without modifications. However, after a sufficiently large number of modifications in validator set, the priority values would migrate towards maximum or minimum allowed values causing truncations due to overflow detection. +For this reason, the selection procedure adds another __new step__ that centers the current priority values such that the priority sum remains close to 0. + +| Priority Run | -3 | -2 | -1 | 0 | 1 | 2 | 4 | Comment | +|----------------|----|----|----|---|----|----|---|-----------------------| +| last run | p3 | | | | p1 | p2 | | __remove p2__ | +| nextrun | | | | | | | | | +| __new step__ | | p3 | | | | p1 | | A(i) -= avg, avg = -1 | +| | | | | | p3 | p1 | | A(i)+=VP(i) | +| | | | p1 | | p3 | | | A(p1)-= P | + +The modified selection algorithm is: + +```md + def ProposerSelection (vset): + + // center priorities around zero + avg = sum(A(i) for i in vset)/len(vset) + for each validator i in vset: + A(i) -= avg + + // compute priorities and elect proposer + for each validator i in vset: + A(i) += VP(i) + prop = max(A) + A(prop) -= P +``` + +Observations: + +- The sum of priorities is now close to 0. Due to integer division the sum is an integer in (-n, n), where n is the number of validators. + +### New Validator + +When a new validator is added, same problem as the one described for removal appears, the sum of priorities in the new set is not zero. This is fixed with the centering step introduced above. + +One other issue that needs to be addressed is the following. A validator V that has just been elected is moved to the end of the queue. If the validator set is large and/ or other validators have significantly higher power, V will have to wait many runs to be elected. If V removes and re-adds itself to the set, it would make a significant (albeit unfair) "jump" ahead in the queue. + +In order to prevent this, when a new validator is added, its initial priority is set to: + +```md + A(V) = -1.125 * P +``` + +where P is the total voting power of the set including V. + +Curent implementation uses the penalty factor of 1.125 because it provides a small punishment that is efficient to calculate. See [here](https://github.com/tendermint/tendermint/pull/2785#discussion_r235038971) for more details. + +If we consider the validator set where p3 has just been added: + +Validator | p1 | p2 | p3 +----------|----|----|--- +VP | 1 | 3 | 8 + +then p3 will start with proposer priority: + +```md + A(p3) = -1.125 * (1 + 3 + 8) ~ -13 +``` + +Note that since current computation uses integer division there is penalty loss when sum of the voting power is less than 8. + +In the next run, p3 will still be ahead in the queue, elected as proposer and moved back in the queue. + +| Priority Run | -13 | -9 | -5 | -2 | -1 | 0 | 1 | 2 | 5 | 6 | 7 | Alg step | +|----------------|-----|----|----|----|----|---|---|----|----|----|----|-----------------------| +| last run | | | | p2 | | | | p1 | | | | __add p3__ | +| | p3 | | | p2 | | | | p1 | | | | A(p3) = -4 | +| next run | | p3 | | | | | | p2 | | p1 | | A(i) -= avg, avg = -4 | +| | | | | | p3 | | | | p2 | | p1 | A(i)+=VP(i) | +| | | | p1 | | p3 | | | | p2 | | | A(p1)-=P | + +## Proposer Priority Range + +With the introduction of centering, some interesting cases occur. Low power validators that bind early in a set that includes high power validator(s) benefit from subsequent additions to the set. This is because these early validators run through more right shift operations during centering, operations that increase their priority. + +As an example, consider the set where p2 is added after p1, with priority -1.125 * 80k = -90k. After the selection procedure runs once: + +Validator | p1 | p2 | Comment +----------|------|------|------------------ +VP | 80k | 10 | +A | 0 | -90k | __added p2__ +A | -45k | 45k | __run selection__ + +Then execute the following steps: + +1. Add a new validator p3: + + Validator | p1 | p2 | p3 + ----------|-----|----|--- + VP | 80k | 10 | 10 + +2. Run selection once. The notation '..p'/'p..' means very small deviations compared to column priority. + + | Priority Run | -90k.. | -60k | -45k | -15k | 0 | 45k | 75k | 155k | Comment | + |---------------|--------|------|------|------|---|-----|-----|------|--------------| + | last run | p3 | | p2 | | | p1 | | | __added p3__ | + | next run + | *right_shift*| | p3 | | p2 | | | p1 | | A(i) -= avg,avg=-30k + | | | ..p3| | ..p2| | | | p1 | A(i)+=VP(i) + | | | ..p3| | ..p2| | | p1.. | | A(p1)-=P, P=80k+20 + +3. Remove p1 and run selection once: + + Validator | p3 | p2 | Comment + ----------|--------|-------|------------------ + VP | 10 | 10 | + A | -60k | -15k | + A | -22.5k | 22.5k | __run selection__ + +At this point, while the total voting power is 20, the distance between priorities is 45k. It will take 4500 runs for p3 to catch up with p2. + +In order to prevent these types of scenarios, the selection algorithm performs scaling of priorities such that the difference between min and max values is smaller than two times the total voting power. + +The modified selection algorithm is: + +```md + def ProposerSelection (vset): + + // scale the priority values + diff = max(A)-min(A) + threshold = 2 * P + if diff > threshold: + scale = diff/threshold + for each validator i in vset: + A(i) = A(i)/scale + + // center priorities around zero + avg = sum(A(i) for i in vset)/len(vset) + for each validator i in vset: + A(i) -= avg + + // compute priorities and elect proposer + for each validator i in vset: + A(i) += VP(i) + prop = max(A) + A(prop) -= P +``` + +Observations: + +- With this modification, the maximum distance between priorites becomes 2 * P. + +Note also that even during steady state the priority range may increase beyond 2 * P. The scaling introduced here helps to keep the range bounded. + +## Wrinkles + +### Validator Power Overflow Conditions + +The validator voting power is a positive number stored as an int64. When a validator is added the `1.125 * P` computation must not overflow. As a consequence the code handling validator updates (add and update) checks for overflow conditions making sure the total voting power is never larger than the largest int64 `MAX`, with the property that `1.125 * MAX` is still in the bounds of int64. Fatal error is return when overflow condition is detected. + +### Proposer Priority Overflow/ Underflow Handling + +The proposer priority is stored as an int64. The selection algorithm performs additions and subtractions to these values and in the case of overflows and underflows it limits the values to: + +```go + MaxInt64 = 1 << 63 - 1 + MinInt64 = -1 << 63 +``` + +## Requirement Fulfillment Claims + +__[R1]__ + +The proposer algorithm is deterministic giving consistent results across executions with same transactions and validator set modifications. +[WIP - needs more detail] + +__[R2]__ + +Given a set of processes with the total voting power P, during a sequence of elections of length P, the number of times any process is selected as proposer is equal to its voting power. The sequence of the P proposers then repeats. If we consider the validator set: + +Validator | p1 | p2 +----------|----|--- +VP | 1 | 3 + +With no other changes to the validator set, the current implementation of proposer selection generates the sequence: +`p2, p1, p2, p2, p2, p1, p2, p2,...` or [`p2, p1, p2, p2`]* +A sequence that starts with any circular permutation of the [`p2, p1, p2, p2`] sub-sequence would also provide the same degree of fairness. In fact these circular permutations show in the sliding window (over the generated sequence) of size equal to the length of the sub-sequence. + +Assigning priorities to each validator based on the voting power and updating them at each run ensures the fairness of the proposer selection. In addition, every time a validator is elected as proposer its priority is decreased with the total voting power. + +Intuitively, a process v jumps ahead in the queue at most (max(A) - min(A))/VP(v) times until it reaches the head and is elected. The frequency is then: + +```md + f(v) ~ VP(v)/(max(A)-min(A)) = 1/k * VP(v)/P +``` + +For current implementation, this means v should be proposer at least VP(v) times out of k * P runs, with scaling factor k=2. diff --git a/spec/consensus/readme.md b/spec/consensus/readme.md new file mode 100644 index 0000000000..4e13b7a9b5 --- /dev/null +++ b/spec/consensus/readme.md @@ -0,0 +1,32 @@ +--- +order: 1 +parent: + title: Consensus + order: 4 +--- + +# Consensus + +Specification of the Tendermint consensus protocol. + +## Contents + +- [Consensus Paper](./consensus-paper) - Latex paper on + [arxiv](https://arxiv.org/abs/1807.04938) describing the + core Tendermint consensus state machine with proofs of safety and termination. +- [BFT Time](./bft-time.md) - How the timestamp in a Tendermint + block header is computed in a Byzantine Fault Tolerant manner +- [Creating Proposal](./creating-proposal.md) - How a proposer + creates a block proposal for consensus +- [Light Client Protocol](./light-client) - A protocol for light weight consensus + verification and syncing to the latest state +- [Signing](./signing.md) - Rules for cryptographic signatures + produced by validators. +- [Write Ahead Log](./wal.md) - Write ahead log used by the + consensus state machine to recover from crashes. + +The protocol used to gossip consensus messages between peers, which is critical +for liveness, is described in the [reactors section](./consensus.md). + +There is also a [stale markdown description](consensus.md) of the consensus state machine +(TODO update this). diff --git a/spec/consensus/signing.md b/spec/consensus/signing.md new file mode 100644 index 0000000000..907a5a01af --- /dev/null +++ b/spec/consensus/signing.md @@ -0,0 +1,229 @@ +# Validator Signing + +Here we specify the rules for validating a proposal and vote before signing. +First we include some general notes on validating data structures common to both types. +We then provide specific validation rules for each. Finally, we include validation rules to prevent double-sigining. + +## SignedMsgType + +The `SignedMsgType` is a single byte that refers to the type of the message +being signed. It is defined in Go as follows: + +```go +// SignedMsgType is a type of signed message in the consensus. +type SignedMsgType byte + +const ( + // Votes + PrevoteType SignedMsgType = 0x01 + PrecommitType SignedMsgType = 0x02 + + // Proposals + ProposalType SignedMsgType = 0x20 +) +``` + +All signed messages must correspond to one of these types. + +## Timestamp + +Timestamp validation is subtle and there are currently no bounds placed on the +timestamp included in a proposal or vote. It is expected that validators will honestly +report their local clock time. The median of all timestamps +included in a commit is used as the timestamp for the next block height. + +Timestamps are expected to be strictly monotonic for a given validator, though +this is not currently enforced. + +## ChainID + +ChainID is an unstructured string with a max length of 50-bytes. +In the future, the ChainID may become structured, and may take on longer lengths. +For now, it is recommended that signers be configured for a particular ChainID, +and to only sign votes and proposals corresponding to that ChainID. + +## BlockID + +BlockID is the structure used to represent the block: + +```go +type BlockID struct { + Hash []byte + PartsHeader PartSetHeader +} + +type PartSetHeader struct { + Hash []byte + Total int +} +``` + +To be included in a valid vote or proposal, BlockID must either represent a `nil` block, or a complete one. +We introduce two methods, `BlockID.IsZero()` and `BlockID.IsComplete()` for these cases, respectively. + +`BlockID.IsZero()` returns true for BlockID `b` if each of the following +are true: + +```go +b.Hash == nil +b.PartsHeader.Total == 0 +b.PartsHeader.Hash == nil +``` + +`BlockID.IsComplete()` returns true for BlockID `b` if each of the following +are true: + +```go +len(b.Hash) == 32 +b.PartsHeader.Total > 0 +len(b.PartsHeader.Hash) == 32 +``` + +## Proposals + +The structure of a proposal for signing looks like: + +```go +type CanonicalProposal struct { + Type SignedMsgType // type alias for byte + Height int64 `binary:"fixed64"` + Round int64 `binary:"fixed64"` + POLRound int64 `binary:"fixed64"` + BlockID BlockID + Timestamp time.Time + ChainID string +} +``` + +A proposal is valid if each of the following lines evaluates to true for proposal `p`: + +```go +p.Type == 0x20 +p.Height > 0 +p.Round >= 0 +p.POLRound >= -1 +p.BlockID.IsComplete() +``` + +In other words, a proposal is valid for signing if it contains the type of a Proposal +(0x20), has a positive, non-zero height, a +non-negative round, a POLRound not less than -1, and a complete BlockID. + +## Votes + +The structure of a vote for signing looks like: + +```go +type CanonicalVote struct { + Type SignedMsgType // type alias for byte + Height int64 `binary:"fixed64"` + Round int64 `binary:"fixed64"` + BlockID BlockID + Timestamp time.Time + ChainID string +} +``` + +A vote is valid if each of the following lines evaluates to true for vote `v`: + +```go +v.Type == 0x1 || v.Type == 0x2 +v.Height > 0 +v.Round >= 0 +v.BlockID.IsZero() || v.BlockID.IsComplete() +``` + +In other words, a vote is valid for signing if it contains the type of a Prevote +or Precommit (0x1 or 0x2, respectively), has a positive, non-zero height, a +non-negative round, and an empty or valid BlockID. + +## Invalid Votes and Proposals + +Votes and proposals which do not satisfy the above rules are considered invalid. +Peers gossipping invalid votes and proposals may be disconnected from other peers on the network. +Note, however, that there is not currently any explicit mechanism to punish validators signing votes or proposals that fail +these basic validation rules. + +## Double Signing + +Signers must be careful not to sign conflicting messages, also known as "double signing" or "equivocating". +Tendermint has mechanisms to publish evidence of validators that signed conflicting votes, so they can be punished +by the application. Note Tendermint does not currently handle evidence of conflciting proposals, though it may in the future. + +### State + +To prevent such double signing, signers must track the height, round, and type of the last message signed. +Assume the signer keeps the following state, `s`: + +```go +type LastSigned struct { + Height int64 + Round int64 + Type SignedMsgType // byte +} +``` + +After signing a vote or proposal `m`, the signer sets: + +```go +s.Height = m.Height +s.Round = m.Round +s.Type = m.Type +``` + +### Proposals + +A signer should only sign a proposal `p` if any of the following lines are true: + +```go +p.Height > s.Height +p.Height == s.Height && p.Round > s.Round +``` + +In other words, a proposal should only be signed if it's at a higher height, or a higher round for the same height. +Once a proposal or vote has been signed for a given height and round, a proposal should never be signed for the same height and round. + +### Votes + +A signer should only sign a vote `v` if any of the following lines are true: + +```go +v.Height > s.Height +v.Height == s.Height && v.Round > s.Round +v.Height == s.Height && v.Round == s.Round && v.Step == 0x1 && s.Step == 0x20 +v.Height == s.Height && v.Round == s.Round && v.Step == 0x2 && s.Step != 0x2 +``` + +In other words, a vote should only be signed if it's: + +- at a higher height +- at a higher round for the same height +- a prevote for the same height and round where we haven't signed a prevote or precommit (but have signed a proposal) +- a precommit for the same height and round where we haven't signed a precommit (but have signed a proposal and/or a prevote) + +This means that once a validator signs a prevote for a given height and round, the only other message it can sign for that height and round is a precommit. +And once a validator signs a precommit for a given height and round, it must not sign any other message for that same height and round. + +Note this includes votes for `nil`, ie. where `BlockID.IsZero()` is true. If a +signer has already signed a vote where `BlockID.IsZero()` is true, it cannot +sign another vote with the same type for the same height and round where +`BlockID.IsComplete()` is true. Thus only a single vote of a particular type +(ie. 0x01 or 0x02) can be signed for the same height and round. + +### Other Rules + +According to the rules of Tendermint consensus, once a validator precommits for +a block, they become "locked" on that block, which means they can't prevote for +another block unless they see sufficient justification (ie. a polka from a +higher round). For more details, see the [consensus +spec](https://arxiv.org/abs/1807.04938). + +Violating this rule is known as "amnesia". In contrast to equivocation, +which is easy to detect, amnesia is difficult to detect without access to votes +from all the validators, as this is what constitutes the justification for +"unlocking". Hence, amnesia is not punished within the protocol, and cannot +easily be prevented by a signer. If enough validators simultaneously commit an +amnesia attack, they may cause a fork of the blockchain, at which point an +off-chain protocol must be engaged to collect votes from all the validators and +determine who misbehaved. For more details, see [fork +detection](https://github.com/tendermint/tendermint/pull/3978). diff --git a/spec/consensus/wal.md b/spec/consensus/wal.md new file mode 100644 index 0000000000..95d1bad126 --- /dev/null +++ b/spec/consensus/wal.md @@ -0,0 +1,32 @@ +# WAL + +Consensus module writes every message to the WAL (write-ahead log). + +It also issues fsync syscall through +[File#Sync](https://golang.org/pkg/os/#File.Sync) for messages signed by this +node (to prevent double signing). + +Under the hood, it uses +[autofile.Group](https://godoc.org/github.com/tendermint/tmlibs/autofile#Group), +which rotates files when those get too big (> 10MB). + +The total maximum size is 1GB. We only need the latest block and the block before it, +but if the former is dragging on across many rounds, we want all those rounds. + +## Replay + +Consensus module will replay all the messages of the last height written to WAL +before a crash (if such occurs). + +The private validator may try to sign messages during replay because it runs +somewhat autonomously and does not know about replay process. + +For example, if we got all the way to precommit in the WAL and then crash, +after we replay the proposal message, the private validator will try to sign a +prevote. But it will fail. That's ok because we’ll see the prevote later in the +WAL. Then it will go to precommit, and that time it will work because the +private validator contains the `LastSignBytes` and then we’ll replay the +precommit from the WAL. + +Make sure to read about [WAL corruption](https://github.com/tendermint/tendermint/blob/master/docs/tendermint-core/running-in-production.md#wal-corruption) +and recovery strategies. diff --git a/spec/core/data_structures.md b/spec/core/data_structures.md new file mode 100644 index 0000000000..dde3ec3542 --- /dev/null +++ b/spec/core/data_structures.md @@ -0,0 +1,478 @@ +# Data Structures + +Here we describe the data structures in the Tendermint blockchain and the rules for validating them. + +The Tendermint blockchains consists of a short list of data types: + +- [Data Structures](#data-structures) + - [Block](#block) + - [Execution](#execution) + - [Header](#header) + - [Version](#version) + - [BlockID](#blockid) + - [PartSetHeader](#partsetheader) + - [Part](#part) + - [Time](#time) + - [Data](#data) + - [Commit](#commit) + - [CommitSig](#commitsig) + - [BlockIDFlag](#blockidflag) + - [Vote](#vote) + - [CanonicalVote](#canonicalvote) + - [Proposal](#proposal) + - [SignedMsgType](#signedmsgtype) + - [Signature](#signature) + - [EvidenceList](#evidencelist) + - [Evidence](#evidence) + - [DuplicateVoteEvidence](#duplicatevoteevidence) + - [LightClientAttackEvidence](#lightclientattackevidence) + - [LightBlock](#lightblock) + - [SignedHeader](#signedheader) + - [ValidatorSet](#validatorset) + - [Validator](#validator) + - [Address](#address) + - [ConsensusParams](#consensusparams) + - [BlockParams](#blockparams) + - [EvidenceParams](#evidenceparams) + - [ValidatorParams](#validatorparams) + - [VersionParams](#versionparams) + - [SynchronyParams](#synchronyparams) + - [TimeoutParams](#timeoutparams) + - [Proof](#proof) + +## Block + +A block consists of a header, transactions, votes (the commit), +and a list of evidence of malfeasance (ie. signing conflicting votes). + +| Name | Type | Description | Validation | +|--------|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------| +| Header | [Header](#header) | Header corresponding to the block. This field contains information used throughout consensus and other areas of the protocol. To find out what it contains, visit [header] (#header) | Must adhere to the validation rules of [header](#header) | +| Data | [Data](#data) | Data contains a list of transactions. The contents of the transaction is unknown to Tendermint. | This field can be empty or populated, but no validation is performed. Applications can perform validation on individual transactions prior to block creation using [checkTx](../abci/abci.md#checktx). +| Evidence | [EvidenceList](#evidence_list) | Evidence contains a list of infractions committed by validators. | Can be empty, but when populated the validations rules from [evidenceList](#evidence_list) apply | +| LastCommit | [Commit](#commit) | `LastCommit` includes one vote for every validator. All votes must either be for the previous block, nil or absent. If a vote is for the previous block it must have a valid signature from the corresponding validator. The sum of the voting power of the validators that voted must be greater than 2/3 of the total voting power of the complete validator set. The number of votes in a commit is limited to 10000 (see `types.MaxVotesCount`). | Must be empty for the initial height and must adhere to the validation rules of [commit](#commit). | + +## Execution + +Once a block is validated, it can be executed against the state. + +The state follows this recursive equation: + +```go +state(initialHeight) = InitialState +state(h+1) <- Execute(state(h), ABCIApp, block(h)) +``` + +where `InitialState` includes the initial consensus parameters and validator set, +and `ABCIApp` is an ABCI application that can return results and changes to the validator +set (TODO). Execute is defined as: + +```go +func Execute(s State, app ABCIApp, block Block) State { + // Fuction ApplyBlock executes block of transactions against the app and returns the new root hash of the app state, + // modifications to the validator set and the changes of the consensus parameters. + AppHash, ValidatorChanges, ConsensusParamChanges := app.ApplyBlock(block) + + nextConsensusParams := UpdateConsensusParams(state.ConsensusParams, ConsensusParamChanges) + return State{ + ChainID: state.ChainID, + InitialHeight: state.InitialHeight, + LastResults: abciResponses.DeliverTxResults, + AppHash: AppHash, + InitialHeight: state.InitialHeight, + LastValidators: state.Validators, + Validators: state.NextValidators, + NextValidators: UpdateValidators(state.NextValidators, ValidatorChanges), + ConsensusParams: nextConsensusParams, + Version: { + Consensus: { + AppVersion: nextConsensusParams.Version.AppVersion, + }, + }, + } +} +``` + +Validating a new block is first done prior to the `prevote`, `precommit` & `finalizeCommit` stages. + +The steps to validate a new block are: + +- Check the validity rules of the block and its fields. +- Check the versions (Block & App) are the same as in local state. +- Check the chainID's match. +- Check the height is correct. +- Check the `LastBlockID` corresponds to BlockID currently in state. +- Check the hashes in the header match those in state. +- Verify the LastCommit against state, this step is skipped for the initial height. + - This is where checking the signatures correspond to the correct block will be made. +- Make sure the proposer is part of the validator set. +- Validate bock time. + - Make sure the new blocks time is after the previous blocks time. + - Calculate the medianTime and check it against the blocks time. + - If the blocks height is the initial height then check if it matches the genesis time. +- Validate the evidence in the block. Note: Evidence can be empty + +## Header + +A block header contains metadata about the block and about the consensus, as well as commitments to +the data in the current block, the previous block, and the results returned by the application: + +| Name | Type | Description | Validation | +|-------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| Version | [Version](#version) | Version defines the application and protocol version being used. | Must adhere to the validation rules of [Version](#version) | +| ChainID | String | ChainID is the ID of the chain. This must be unique to your chain. | ChainID must be less than 50 bytes. | +| Height | uint64 | Height is the height for this header. | Must be > 0, >= initialHeight, and == previous Height+1 | +| Time | [Time](#time) | The timestamp is equal to the weighted median of validators present in the last commit. Read more on time in the [BFT-time section](../consensus/bft-time.md). Note: the timestamp of a vote must be greater by at least one millisecond than that of the block being voted on. | Time must be >= previous header timestamp + consensus parameters TimeIotaMs. The timestamp of the first block must be equal to the genesis time (since there's no votes to compute the median). | +| LastBlockID | [BlockID](#blockid) | BlockID of the previous block. | Must adhere to the validation rules of [blockID](#blockid). The first block has `block.Header.LastBlockID == BlockID{}`. | +| LastCommitHash | slice of bytes (`[]byte`) | MerkleRoot of the lastCommit's signatures. The signatures represent the validators that committed to the last block. The first block has an empty slices of bytes for the hash. | Must be of length 32 | +| DataHash | slice of bytes (`[]byte`) | MerkleRoot of the hash of transactions. **Note**: The transactions are hashed before being included in the merkle tree, the leaves of the Merkle tree are the hashes, not the transactions themselves. | Must be of length 32 | +| ValidatorHash | slice of bytes (`[]byte`) | MerkleRoot of the current validator set. The validators are first sorted by voting power (descending), then by address (ascending) prior to computing the MerkleRoot. | Must be of length 32 | +| NextValidatorHash | slice of bytes (`[]byte`) | MerkleRoot of the next validator set. The validators are first sorted by voting power (descending), then by address (ascending) prior to computing the MerkleRoot. | Must be of length 32 | +| ConsensusHash | slice of bytes (`[]byte`) | Hash of the protobuf encoded consensus parameters. | Must be of length 32 | +| AppHash | slice of bytes (`[]byte`) | Arbitrary byte array returned by the application after executing and commiting the previous block. It serves as the basis for validating any merkle proofs that comes from the ABCI application and represents the state of the actual application rather than the state of the blockchain itself. The first block's `block.Header.AppHash` is given by `ResponseInitChain.app_hash`. | This hash is determined by the application, Tendermint can not perform validation on it. | +| LastResultHash | slice of bytes (`[]byte`) | `LastResultsHash` is the root hash of a Merkle tree built from `ResponseDeliverTx` responses (`Log`,`Info`, `Codespace` and `Events` fields are ignored). | Must be of length 32. The first block has `block.Header.ResultsHash == MerkleRoot(nil)`, i.e. the hash of an empty input, for RFC-6962 conformance. | +| EvidenceHash | slice of bytes (`[]byte`) | MerkleRoot of the evidence of Byzantine behaviour included in this block. | Must be of length 32 | +| ProposerAddress | slice of bytes (`[]byte`) | Address of the original proposer of the block. Validator must be in the current validatorSet. | Must be of length 20 | + +## Version + +NOTE: that this is more specifically the consensus version and doesn't include information like the +P2P Version. (TODO: we should write a comprehensive document about +versioning that this can refer to) + +| Name | type | Description | Validation | +|-------|--------|-----------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------| +| Block | uint64 | This number represents the version of the block protocol and must be the same throughout an operational network | Must be equal to protocol version being used in a network (`block.Version.Block == state.Version.Consensus.Block`) | +| App | uint64 | App version is decided on by the application. Read [here](../abci/abci.md#info) | `block.Version.App == state.Version.Consensus.App` | + +## BlockID + +The `BlockID` contains two distinct Merkle roots of the block. The `BlockID` includes these two hashes, as well as the number of parts (ie. `len(MakeParts(block))`) + +| Name | Type | Description | Validation | +|---------------|---------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| Hash | slice of bytes (`[]byte`) | MerkleRoot of all the fields in the header (ie. `MerkleRoot(header)`. | hash must be of length 32 | +| PartSetHeader | [PartSetHeader](#PartSetHeader) | Used for secure gossiping of the block during consensus, is the MerkleRoot of the complete serialized block cut into parts (ie. `MerkleRoot(MakeParts(block))`). | Must adhere to the validation rules of [PartSetHeader](#PartSetHeader) | + +See [MerkleRoot](./encoding.md#MerkleRoot) for details. + +## PartSetHeader + +| Name | Type | Description | Validation | +|-------|---------------------------|-----------------------------------|----------------------| +| Total | int32 | Total amount of parts for a block | Must be > 0 | +| Hash | slice of bytes (`[]byte`) | MerkleRoot of a serialized block | Must be of length 32 | + +## Part + +Part defines a part of a block. In Tendermint blocks are broken into `parts` for gossip. + +| Name | Type | Description | Validation | +|-------|-----------------|-----------------------------------|----------------------| +| index | int32 | Total amount of parts for a block | Must be > 0 | +| bytes | bytes | MerkleRoot of a serialized block | Must be of length 32 | +| proof | [Proof](#proof) | MerkleRoot of a serialized block | Must be of length 32 | + +## Time + +Tendermint uses the [Google.Protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) +format, which uses two integers, one 64 bit integer for Seconds and a 32 bit integer for Nanoseconds. + +## Data + +Data is just a wrapper for a list of transactions, where transactions are arbitrary byte arrays: + +| Name | Type | Description | Validation | +|------|----------------------------|------------------------|-----------------------------------------------------------------------------| +| Txs | Matrix of bytes ([][]byte) | Slice of transactions. | Validation does not occur on this field, this data is unknown to Tendermint | + +## Commit + +Commit is a simple wrapper for a list of signatures, with one for each validator. It also contains the relevant BlockID, height and round: + +| Name | Type | Description | Validation | +|------------|----------------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------| +| Height | uint64 | Height at which this commit was created. | Must be > 0 | +| Round | int32 | Round that the commit corresponds to. | Must be > 0 | +| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | Must adhere to the validation rules of [BlockID](#blockid). | +| Signatures | Array of [CommitSig](#commitsig) | Array of commit signatures that correspond to current validator set. | Length of signatures must be > 0 and adhere to the validation of each individual [Commitsig](#commitsig) | + +## CommitSig + +`CommitSig` represents a signature of a validator, who has voted either for nil, +a particular `BlockID` or was absent. It's a part of the `Commit` and can be used +to reconstruct the vote set given the validator set. + +| Name | Type | Description | Validation | +|------------------|-----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------| +| BlockIDFlag | [BlockIDFlag](#blockidflag) | Represents the validators participation in consensus: Either voted for the block that received the majority, voted for another block, voted nil or did not vote | Must be one of the fields in the [BlockIDFlag](#blockidflag) enum | +| ValidatorAddress | [Address](#address) | Address of the validator | Must be of length 20 | +| Timestamp | [Time](#time) | This field will vary from `CommitSig` to `CommitSig`. It represents the timestamp of the validator. | [Time](#time) | +| Signature | [Signature](#signature) | Signature corresponding to the validators participation in consensus. | The length of the signature must be > 0 and < than 64 | + +NOTE: `ValidatorAddress` and `Timestamp` fields may be removed in the future +(see [ADR-25](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-025-commit.md)). + +## BlockIDFlag + +BlockIDFlag represents which BlockID the [signature](#commitsig) is for. + +```go +enum BlockIDFlag { + BLOCK_ID_FLAG_UNKNOWN = 0; + BLOCK_ID_FLAG_ABSENT = 1; // signatures for other blocks are also considered absent + BLOCK_ID_FLAG_COMMIT = 2; + BLOCK_ID_FLAG_NIL = 3; +} +``` + +## Vote + +A vote is a signed message from a validator for a particular block. +The vote includes information about the validator signing it. When stored in the blockchain or propagated over the network, votes are encoded in Protobuf. +The vote extension is not part of the [`CanonicalVote`](#canonicalvote). + +| Name | Type | Description | Validation | +|--------------------|---------------------------------|---------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------| +| Type | [SignedMsgType](#signedmsgtype) | Either prevote or precommit. [SignedMsgType](#signedmsgtype) | A Vote is valid if its corresponding fields are included in the enum [signedMsgType](#signedmsgtype) | +| Height | uint64 | Height for which this vote was created. | Must be > 0 | +| Round | int32 | Round that the commit corresponds to. | Must be > 0 | +| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | [BlockID](#blockid) | +| Timestamp | [Time](#Time) | The time at which a validator signed. | [Time](#time) | +| ValidatorAddress | slice of bytes (`[]byte`) | Address of the validator | Length must be equal to 20 | +| ValidatorIndex | int32 | Index at a specific block height that corresponds to the Index of the validator in the set. | must be > 0 | +| Signature | slice of bytes (`[]byte`) | Signature by the validator if they participated in consensus for the associated bock. | Length of signature must be > 0 and < 64 | +| Extension | slice of bytes (`[]byte`) | The vote extension provided by the Application. Only valid for precommit messages. | Length must be 0 if Type != `SIGNED_MSG_TYPE_PRECOMMIT` | +| ExtensionSignature | slice of bytes (`[]byte`) | Signature by the validator if they participated in consensus for the associated bock. | Length must be 0 if Type != `SIGNED_MSG_TYPE_PRECOMMIT`; else length must be > 0 and < 64 | + +## CanonicalVote + +CanonicalVote is for validator signing. This type will not be present in a block. Votes are represented via `CanonicalVote` and also encoded using protobuf via `type.SignBytes` which includes the `ChainID`, and uses a different ordering of +the fields. + +```proto +message CanonicalVote { + SignedMsgType type = 1; + fixed64 height = 2; + sfixed64 round = 3; + CanonicalBlockID block_id = 4; + google.protobuf.Timestamp timestamp = 5; + string chain_id = 6; +} +``` + +For signing, votes are represented via [`CanonicalVote`](#canonicalvote) and also encoded using protobuf via +`type.SignBytes` which includes the `ChainID`, and uses a different ordering of +the fields. + +We define a method `Verify` that returns `true` if the signature verifies against the pubkey for the `SignBytes` +using the given ChainID: + +```go +func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error { + if !bytes.Equal(pubKey.Address(), vote.ValidatorAddress) { + return ErrVoteInvalidValidatorAddress + } + + if !pubKey.VerifyBytes(types.VoteSignBytes(chainID), vote.Signature) { + return ErrVoteInvalidSignature + } + return nil +} +``` + +## Proposal + +Proposal contains height and round for which this proposal is made, BlockID as a unique identifier +of proposed block, timestamp, and POLRound (a so-called Proof-of-Lock (POL) round) that is needed for +termination of the consensus. If POLRound >= 0, then BlockID corresponds to the block that +is locked in POLRound. The message is signed by the validator private key. + +| Name | Type | Description | Validation | +|-----------|---------------------------------|---------------------------------------------------------------------------------------|---------------------------------------------------------| +| Type | [SignedMsgType](#signedmsgtype) | Represents a Proposal [SignedMsgType](#signedmsgtype) | Must be `ProposalType` [signedMsgType](#signedmsgtype) | +| Height | uint64 | Height for which this vote was created for | Must be > 0 | +| Round | int32 | Round that the commit corresponds to. | Must be > 0 | +| POLRound | int64 | Proof of lock | Must be > 0 | +| BlockID | [BlockID](#blockid) | The blockID of the corresponding block. | [BlockID](#blockid) | +| Timestamp | [Time](#Time) | Timestamp represents the time at which a validator signed. | [Time](#time) | +| Signature | slice of bytes (`[]byte`) | Signature by the validator if they participated in consensus for the associated bock. | Length of signature must be > 0 and < 64 | + +## SignedMsgType + +Signed message type represents a signed messages in consensus. + +```proto +enum SignedMsgType { + + SIGNED_MSG_TYPE_UNKNOWN = 0; + // Votes + SIGNED_MSG_TYPE_PREVOTE = 1; + SIGNED_MSG_TYPE_PRECOMMIT = 2; + + // Proposal + SIGNED_MSG_TYPE_PROPOSAL = 32; +} +``` + +## Signature + +Signatures in Tendermint are raw bytes representing the underlying signature. + +See the [signature spec](./encoding.md#key-types) for more. + +## EvidenceList + +EvidenceList is a simple wrapper for a list of evidence: + +| Name | Type | Description | Validation | +|----------|--------------------------------|----------------------------------------|-----------------------------------------------------------------| +| Evidence | Array of [Evidence](#evidence) | List of verified [evidence](#evidence) | Validation adheres to individual types of [Evidence](#evidence) | + +## Evidence + +Evidence in Tendermint is used to indicate breaches in the consensus by a validator. + +More information on how evidence works in Tendermint can be found [here](../consensus/evidence.md) + +### DuplicateVoteEvidence + +`DuplicateVoteEvidence` represents a validator that has voted for two different blocks +in the same round of the same height. Votes are lexicographically sorted on `BlockID`. + +| Name | Type | Description | Validation | +|------------------|---------------|--------------------------------------------------------------------|-----------------------------------------------------| +| VoteA | [Vote](#vote) | One of the votes submitted by a validator when they equivocated | VoteA must adhere to [Vote](#vote) validation rules | +| VoteB | [Vote](#vote) | The second vote submitted by a validator when they equivocated | VoteB must adhere to [Vote](#vote) validation rules | +| TotalVotingPower | int64 | The total power of the validator set at the height of equivocation | Must be equal to nodes own copy of the data | +| ValidatorPower | int64 | Power of the equivocating validator at the height | Must be equal to the nodes own copy of the data | +| Timestamp | [Time](#Time) | Time of the block where the equivocation occurred | Must be equal to the nodes own copy of the data | + +### LightClientAttackEvidence + +`LightClientAttackEvidence` is a generalized evidence that captures all forms of known attacks on +a light client such that a full node can verify, propose and commit the evidence on-chain for +punishment of the malicious validators. There are three forms of attacks: Lunatic, Equivocation +and Amnesia. These attacks are exhaustive. You can find a more detailed overview of this [here](../light-client/accountability#the_misbehavior_of_faulty_validators) + +| Name | Type | Description | Validation | +|----------------------|------------------------------------|----------------------------------------------------------------------|------------------------------------------------------------------| +| ConflictingBlock | [LightBlock](#LightBlock) | Read Below | Must adhere to the validation rules of [lightBlock](#lightblock) | +| CommonHeight | int64 | Read Below | must be > 0 | +| Byzantine Validators | Array of [Validators](#Validators) | validators that acted maliciously | Read Below | +| TotalVotingPower | int64 | The total power of the validator set at the height of the infraction | Must be equal to the nodes own copy of the data | +| Timestamp | [Time](#Time) | Time of the block where the infraction occurred | Must be equal to the nodes own copy of the data | + +## LightBlock + +LightBlock is the core data structure of the [light client](../light-client/README.md). It combines two data structures needed for verification ([signedHeader](#signedheader) & [validatorSet](#validatorset)). + +| Name | Type | Description | Validation | +|--------------|-------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| SignedHeader | [SignedHeader](#signedheader) | The header and commit, these are used for verification purposes. To find out more visit [light client docs](../light-client/README.md) | Must not be nil and adhere to the validation rules of [signedHeader](#signedheader) | +| ValidatorSet | [ValidatorSet](#validatorset) | The validatorSet is used to help with verify that the validators in that committed the infraction were truly in the validator set. | Must not be nil and adhere to the validation rules of [validatorSet](#validatorset) | + +The `SignedHeader` and `ValidatorSet` are linked by the hash of the validator set(`SignedHeader.ValidatorsHash == ValidatorSet.Hash()`. + +## SignedHeader + +The SignedhHeader is the [header](#header) accompanied by the commit to prove it. + +| Name | Type | Description | Validation | +|--------|-------------------|-------------------|-----------------------------------------------------------------------------------| +| Header | [Header](#Header) | [Header](#header) | Header cannot be nil and must adhere to the [Header](#Header) validation criteria | +| Commit | [Commit](#commit) | [Commit](#commit) | Commit cannot be nil and must adhere to the [Commit](#commit) criteria | + +## ValidatorSet + +| Name | Type | Description | Validation | +|------------|----------------------------------|----------------------------------------------------|-------------------------------------------------------------------------------------------------------------------| +| Validators | Array of [validator](#validator) | List of the active validators at a specific height | The list of validators can not be empty or nil and must adhere to the validation rules of [validator](#validator) | +| Proposer | [validator](#validator) | The block proposer for the corresponding block | The proposer cannot be nil and must adhere to the validation rules of [validator](#validator) | + +## Validator + +| Name | Type | Description | Validation | +|------------------|---------------------------|---------------------------------------------------------------------------------------------------|---------------------------------------------------| +| Address | [Address](#address) | Validators Address | Length must be of size 20 | +| Pubkey | slice of bytes (`[]byte`) | Validators Public Key | must be a length greater than 0 | +| VotingPower | int64 | Validators voting power | cannot be < 0 | +| ProposerPriority | int64 | Validators proposer priority. This is used to gauge when a validator is up next to propose blocks | No validation, value can be negative and positive | + +## Address + +Address is a type alias of a slice of bytes. The address is calculated by hashing the public key using sha256 and truncating it to only use the first 20 bytes of the slice. + +```go +const ( + TruncatedSize = 20 +) + +func SumTruncated(bz []byte) []byte { + hash := sha256.Sum256(bz) + return hash[:TruncatedSize] +} +``` + +## ConsensusParams + +| Name | Type | Description | Field Number | +|-----------|-------------------------------------|------------------------------------------------------------------------------|--------------| +| block | [BlockParams](#blockparams) | Parameters limiting the size of a block and time between consecutive blocks. | 1 | +| evidence | [EvidenceParams](#evidenceparams) | Parameters limiting the validity of evidence of byzantine behaviour. | 2 | +| validator | [ValidatorParams](#validatorparams) | Parameters limiting the types of public keys validators can use. | 3 | +| version | [BlockParams](#blockparams) | The ABCI application version. | 4 | + +### BlockParams + +| Name | Type | Description | Field Number | +|--------------|-------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| +| max_bytes | int64 | Max size of a block, in bytes. | 1 | +| max_gas | int64 | Max sum of `GasWanted` in a proposed block. NOTE: blocks that violate this may be committed if there are Byzantine proposers. It's the application's responsibility to handle this when processing a block! | 2 | + +### EvidenceParams + +| Name | Type | Description | Field Number | +|--------------------|------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------| +| max_age_num_blocks | int64 | Max age of evidence, in blocks. | 1 | +| max_age_duration | [google.protobuf.Duration](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration) | Max age of evidence, in time. It should correspond with an app's "unbonding period" or other similar mechanism for handling [Nothing-At-Stake attacks](https://github.com/ethereum/wiki/wiki/Proof-of-Stake-FAQ#what-is-the-nothing-at-stake-problem-and-how-can-it-be-fixed). | 2 | +| max_bytes | int64 | maximum size in bytes of total evidence allowed to be entered into a block | 3 | + +### ValidatorParams + +| Name | Type | Description | Field Number | +|---------------|-----------------|-----------------------------------------------------------------------|--------------| +| pub_key_types | repeated string | List of accepted public key types. Uses same naming as `PubKey.Type`. | 1 | + +### VersionParams + +| Name | Type | Description | Field Number | +|-------------|--------|-------------------------------|--------------| +| app_version | uint64 | The ABCI application version. | 1 | + +### SynchronyParams + +| Name | Type | Description | Field Number | +|---------------|--------|-------------------------------|--------------| +| message_delay | [google.protobuf.Duration](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration) | Bound for how long a proposal message may take to reach all validators on a newtork and still be considered valid. | 1 | +| precision | [google.protobuf.Duration](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration) | Bound for how skewed a proposer's clock may be from any validator on the network while still producing valid proposals. | 2 | + +### TimeoutParams + +| Name | Type | Description | Field Number | +|---------------|--------|-------------------------------|--------------| +| propose | [google.protobuf.Duration](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration) | Parameter that, along with propose_delta, configures the timeout for the propose step of the consensus algorithm. | 1 | +| propose_delta | [google.protobuf.Duration](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration) | Parameter that, along with propose, configures the timeout for the propose step of the consensus algorithm. | 2 | +| vote | [google.protobuf.Duration](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration)| Parameter that, along with vote_delta, configures the timeout for the prevote and precommit step of the consensus algorithm. | 3 | +| vote_delta | [google.protobuf.Duration](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration)| Parameter that, along with vote, configures the timeout for the prevote and precommit step of the consensus algorithm. | 4 | +| commit | [google.protobuf.Duration](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration) | Parameter that configures how long Tendermint will wait after receiving a quorum of precommits before beginning consensus for the next height.| 5 | +| bypass_commit_timeout | bool | Parameter that, if enabled, configures the node to proceed immediately to the next height once the node has received all precommits for a block, forgoing the commit timeout. | 6 | + +## Proof + +| Name | Type | Description | Field Number | +|-----------|----------------|-----------------------------------------------|--------------| +| total | int64 | Total number of items. | 1 | +| index | int64 | Index item to prove. | 2 | +| leaf_hash | bytes | Hash of item value. | 3 | +| aunts | repeated bytes | Hashes from leaf's sibling to a root's child. | 4 | diff --git a/spec/core/encoding.md b/spec/core/encoding.md new file mode 100644 index 0000000000..c137575d78 --- /dev/null +++ b/spec/core/encoding.md @@ -0,0 +1,300 @@ +# Encoding + +## Protocol Buffers + +Tendermint uses [Protocol Buffers](https://developers.google.com/protocol-buffers), specifically proto3, for all data structures. + +Please see the [Proto3 language guide](https://developers.google.com/protocol-buffers/docs/proto3) for more details. + +## Byte Arrays + +The encoding of a byte array is simply the raw-bytes prefixed with the length of +the array as a `UVarint` (what proto calls a `Varint`). + +For details on varints, see the [protobuf +spec](https://developers.google.com/protocol-buffers/docs/encoding#varints). + +For example, the byte-array `[0xA, 0xB]` would be encoded as `0x020A0B`, +while a byte-array containing 300 entires beginning with `[0xA, 0xB, ...]` would +be encoded as `0xAC020A0B...` where `0xAC02` is the UVarint encoding of 300. + +## Hashing + +Tendermint uses `SHA256` as its hash function. +Objects are always serialized before being hashed. +So `SHA256(obj)` is short for `SHA256(ProtoEncoding(obj))`. + +## Public Key Cryptography + +Tendermint uses Protobuf [Oneof](https://developers.google.com/protocol-buffers/docs/proto3#oneof) +to distinguish between different types public keys, and signatures. +Additionally, for each public key, Tendermint +defines an Address function that can be used as a more compact identifier in +place of the public key. Here we list the concrete types, their names, +and prefix bytes for public keys and signatures, as well as the address schemes +for each PubKey. Note for brevity we don't +include details of the private keys beyond their type and name. + +### Key Types + +Each type specifies it's own pubkey, address, and signature format. + +#### Ed25519 + +The address is the first 20-bytes of the SHA256 hash of the raw 32-byte public key: + +```go +address = SHA256(pubkey)[:20] +``` + +The signature is the raw 64-byte ED25519 signature. + +Tendermint adopted [zip215](https://zips.z.cash/zip-0215) for verification of ed25519 signatures. + +> Note: This change will be released in the next major release of Tendermint-Go (0.35). + +#### Secp256k1 + +The address is the first 20-bytes of the SHA256 hash of the raw 32-byte public key: + +```go +address = SHA256(pubkey)[:20] +``` + +## Other Common Types + +### BitArray + +The BitArray is used in some consensus messages to represent votes received from +validators, or parts received in a block. It is represented +with a struct containing the number of bits (`Bits`) and the bit-array itself +encoded in base64 (`Elems`). + +| Name | Type | +|-------|----------------------------| +| bits | int64 | +| elems | slice of int64 (`[]int64`) | + +Note BitArray receives a special JSON encoding in the form of `x` and `_` +representing `1` and `0`. Ie. the BitArray `10110` would be JSON encoded as +`"x_xx_"` + +### Part + +Part is used to break up blocks into pieces that can be gossiped in parallel +and securely verified using a Merkle tree of the parts. + +Part contains the index of the part (`Index`), the actual +underlying data of the part (`Bytes`), and a Merkle proof that the part is contained in +the set (`Proof`). + +| Name | Type | +|-------|---------------------------| +| index | uint32 | +| bytes | slice of bytes (`[]byte`) | +| proof | [proof](#merkle-proof) | + +See details of SimpleProof, below. + +### MakeParts + +Encode an object using Protobuf and slice it into parts. +Tendermint uses a part size of 65536 bytes, and allows a maximum of 1601 parts +(see `types.MaxBlockPartsCount`). This corresponds to the hard-coded block size +limit of 100MB. + +```go +func MakeParts(block Block) []Part +``` + +## Merkle Trees + +For an overview of Merkle trees, see +[wikipedia](https://en.wikipedia.org/wiki/Merkle_tree) + +We use the RFC 6962 specification of a merkle tree, with sha256 as the hash function. +Merkle trees are used throughout Tendermint to compute a cryptographic digest of a data structure. +The differences between RFC 6962 and the simplest form a merkle tree are that: + +1. leaf nodes and inner nodes have different hashes. + This is for "second pre-image resistance", to prevent the proof to an inner node being valid as the proof of a leaf. + The leaf nodes are `SHA256(0x00 || leaf_data)`, and inner nodes are `SHA256(0x01 || left_hash || right_hash)`. + +2. When the number of items isn't a power of two, the left half of the tree is as big as it could be. + (The largest power of two less than the number of items) This allows new leaves to be added with less + recomputation. For example: + +```md + Simple Tree with 6 items Simple Tree with 7 items + + * * + / \ / \ + / \ / \ + / \ / \ + / \ / \ + * * * * + / \ / \ / \ / \ + / \ / \ / \ / \ + / \ / \ / \ / \ + * * h4 h5 * * * h6 + / \ / \ / \ / \ / \ +h0 h1 h2 h3 h0 h1 h2 h3 h4 h5 +``` + +### MerkleRoot + +The function `MerkleRoot` is a simple recursive function defined as follows: + +```go +// SHA256([]byte{}) +func emptyHash() []byte { + return tmhash.Sum([]byte{}) +} + +// SHA256(0x00 || leaf) +func leafHash(leaf []byte) []byte { + return tmhash.Sum(append(0x00, leaf...)) +} + +// SHA256(0x01 || left || right) +func innerHash(left []byte, right []byte) []byte { + return tmhash.Sum(append(0x01, append(left, right...)...)) +} + +// largest power of 2 less than k +func getSplitPoint(k int) { ... } + +func MerkleRoot(items [][]byte) []byte{ + switch len(items) { + case 0: + return empthHash() + case 1: + return leafHash(items[0]) + default: + k := getSplitPoint(len(items)) + left := MerkleRoot(items[:k]) + right := MerkleRoot(items[k:]) + return innerHash(left, right) + } +} +``` + +Note: `MerkleRoot` operates on items which are arbitrary byte arrays, not +necessarily hashes. For items which need to be hashed first, we introduce the +`Hashes` function: + +```go +func Hashes(items [][]byte) [][]byte { + return SHA256 of each item +} +``` + +Note: we will abuse notion and invoke `MerkleRoot` with arguments of type `struct` or type `[]struct`. +For `struct` arguments, we compute a `[][]byte` containing the protobuf encoding of each +field in the struct, in the same order the fields appear in the struct. +For `[]struct` arguments, we compute a `[][]byte` by protobuf encoding the individual `struct` elements. + +### Merkle Proof + +Proof that a leaf is in a Merkle tree is composed as follows: + +| Name | Type | +|----------|----------------------------| +| total | int64 | +| index | int64 | +| leafHash | slice of bytes (`[]byte`) | +| aunts | Matrix of bytes ([][]byte) | + +Which is verified as follows: + +```golang +func (proof Proof) Verify(rootHash []byte, leaf []byte) bool { + assert(proof.LeafHash, leafHash(leaf) + + computedHash := computeHashFromAunts(proof.Index, proof.Total, proof.LeafHash, proof.Aunts) + return computedHash == rootHash +} + +func computeHashFromAunts(index, total int, leafHash []byte, innerHashes [][]byte) []byte{ + assert(index < total && index >= 0 && total > 0) + + if total == 1{ + assert(len(proof.Aunts) == 0) + return leafHash + } + + assert(len(innerHashes) > 0) + + numLeft := getSplitPoint(total) // largest power of 2 less than total + if index < numLeft { + leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + assert(leftHash != nil) + return innerHash(leftHash, innerHashes[len(innerHashes)-1]) + } + rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + assert(rightHash != nil) + return innerHash(innerHashes[len(innerHashes)-1], rightHash) +} +``` + +The number of aunts is limited to 100 (`MaxAunts`) to protect the node against DOS attacks. +This limits the tree size to 2^100 leaves, which should be sufficient for any +conceivable purpose. + +### IAVL+ Tree + +Because Tendermint only uses a Simple Merkle Tree, application developers are expect to use their own Merkle tree in their applications. For example, the IAVL+ Tree - an immutable self-balancing binary tree for persisting application state is used by the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk/blob/ae77f0080a724b159233bd9b289b2e91c0de21b5/docs/interfaces/lite/specification.md) + +## JSON + +Tendermint has its own JSON encoding in order to keep backwards compatibility with the previous RPC layer. + +Registered types are encoded as: + +```json +{ + "type": "", + "value": +} +``` + +For instance, an ED25519 PubKey would look like: + +```json +{ + "type": "tendermint/PubKeyEd25519", + "value": "uZ4h63OFWuQ36ZZ4Bd6NF+/w9fWUwrOncrQsackrsTk=" +} +``` + +Where the `"value"` is the base64 encoding of the raw pubkey bytes, and the +`"type"` is the type name for Ed25519 pubkeys. + +### Signed Messages + +Signed messages (eg. votes, proposals) in the consensus are encoded using protobuf. + +When signing, the elements of a message are re-ordered so the fixed-length fields +are first, making it easy to quickly check the type, height, and round. +The `ChainID` is also appended to the end. +We call this encoding the SignBytes. For instance, SignBytes for a vote is the protobuf encoding of the following struct: + +```protobuf +message CanonicalVote { + SignedMsgType type = 1; + sfixed64 height = 2; // canonicalization requires fixed size encoding here + sfixed64 round = 3; // canonicalization requires fixed size encoding here + CanonicalBlockID block_id = 4; + google.protobuf.Timestamp timestamp = 5; + string chain_id = 6; +} +``` + +The field ordering and the fixed sized encoding for the first three fields is optimized to ease parsing of SignBytes +in HSMs. It creates fixed offsets for relevant fields that need to be read in this context. + +> Note: All canonical messages are length prefixed. + +For more details, see the [signing spec](../consensus/signing.md). +Also, see the motivating discussion in +[#1622](https://github.com/tendermint/tendermint/issues/1622). diff --git a/spec/core/genesis.md b/spec/core/genesis.md new file mode 100644 index 0000000000..5bf6156fd8 --- /dev/null +++ b/spec/core/genesis.md @@ -0,0 +1,35 @@ +# Genesis + +The genesis file is the starting point of a chain. An application will populate the `app_state` field in the genesis with their required fields. Tendermint is not able to validate this section because it is unaware what application state consists of. + +## Genesis Fields + +- `genesis_time`: The time the blockchain started or will start. If nodes are started before this time they will sit idle until the time specified. +- `chain_id`: The chain identifier. Every chain should have a unique identifier. When conducting a fork based upgrade, we recommend changing the chainid to avoid network or consensus errors. +- `initial_height`: The starting height of the blockchain. When conducting a chain restart to avoid restarting at height 1, the network is able to start at a specified height. +- `consensus_params` + - `block` + - `max_bytes`: The max amount of bytes a block can be. + - `max_gas`: The maximum amount of gas that a block can have. + - `evidence` + - `max_age_num_blocks`: After this preset amount of blocks has passed a single piece of evidence is considered invalid. + - `max_age_duration`: After this preset amount of time has passed a single piece of evidence is considered invalid. + - `max_bytes`: The max amount of bytes of all evidence included in a block. + - `validator` + - `pub_key_types`: Defines which curves are to be accepted as a valid validator consensus key. Tendermint supports ed25519, sr25519 and secp256k1. + - `version` + - `app_version`: The version of the application. This is set by the application and is used to identify which version of the app a user should be using in order to operate a node. + - `synchrony` + - `message_delay`: A bound on how long a proposal message may take to reach all validators on a network and still be considered valid. + - `precision`: A bound on how skewed the proposer's clock may be from any validator on the network while still producing valid proposals. + - `timeout` + - `propose`: How long the Tendermint consensus engine will wait for a proposal block before prevoting nil. + - `propose_delta`: How much the propose timeout increase with each round. + - `vote`: How long the consensus engine will wait after receiving +2/3 votes in a round. + - `vote_delta`: How much the vote timeout increases with each round. + - `commit`: How long the consensus engine will wait after receiving +2/3 precommits before beginning the next height. + - `bypass_commit_timeout`: Configures if the consensus engine will wait for the full commit timeout before proceeding to the next height. If this field is set to true, the conesnsus engine will proceed to the next height as soon as the node has gathered votes from all of the validators on the network. +- `validators` + - This is an array of validators. This validator set is used as the starting validator set of the chain. This field can be empty, if the application sets the validator set in `InitChain`. +- `app_hash`: The applications state root hash. This field does not need to be populated at the start of the chain, the application may provide the needed information via `Initchain`. +- `app_state`: This section is filled in by the application and is unknown to Tendermint. diff --git a/spec/core/readme.md b/spec/core/readme.md new file mode 100644 index 0000000000..46f95f1b76 --- /dev/null +++ b/spec/core/readme.md @@ -0,0 +1,13 @@ +--- +order: 1 +parent: + title: Core + order: 3 +--- + +This section describes the core types and functionality of the Tendermint protocol implementation. + +- [Core Data Structures](./data_structures.md) +- [Encoding](./encoding.md) +- [Genesis](./genesis.md) +- [State](./state.md) diff --git a/spec/core/state.md b/spec/core/state.md new file mode 100644 index 0000000000..5138c09506 --- /dev/null +++ b/spec/core/state.md @@ -0,0 +1,121 @@ +# State + +The state contains information whose cryptographic digest is included in block headers, and thus is +necessary for validating new blocks. For instance, the validators set and the results of +transactions are never included in blocks, but their Merkle roots are: +the state keeps track of them. + +The `State` object itself is an implementation detail, since it is never +included in a block or gossiped over the network, and we never compute +its hash. The persistence or query interface of the `State` object +is an implementation detail and not included in the specification. +However, the types in the `State` object are part of the specification, since +the Merkle roots of the `State` objects are included in blocks and values are used during +validation. + +```go +type State struct { + ChainID string + InitialHeight int64 + + LastBlockHeight int64 + LastBlockID types.BlockID + LastBlockTime time.Time + + Version Version + LastResults []Result + AppHash []byte + + LastValidators ValidatorSet + Validators ValidatorSet + NextValidators ValidatorSet + + ConsensusParams ConsensusParams +} +``` + +The chain ID and initial height are taken from the genesis file, and not changed again. The +initial height will be `1` in the typical case, `0` is an invalid value. + +Note there is a hard-coded limit of 10000 validators. This is inherited from the +limit on the number of votes in a commit. + +Further information on [`Validator`'s](./data_structures.md#validator), +[`ValidatorSet`'s](./data_structures.md#validatorset) and +[`ConsensusParams`'s](./data_structures.md#consensusparams) can +be found in [data structures](./data_structures.md) + +## Execution + +State gets updated at the end of executing a block. Of specific interest is `ResponseEndBlock` and +`ResponseCommit` + +```go +type ResponseEndBlock struct { + ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` + ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` + Events []Event `protobuf:"bytes,3,rep,name=events,proto3" json:"events,omitempty"` +} +``` + +where + +```go +type ValidatorUpdate struct { + PubKey crypto.PublicKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey,proto3" json:"pub_key"` + Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"` +} +``` + +and + +```go +type ResponseCommit struct { + // reserve 1 + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` +} +``` + +`ValidatorUpdates` are used to add and remove validators to the current set as well as update +validator power. Setting validator power to 0 in `ValidatorUpdate` will cause the validator to be +removed. `ConsensusParams` are safely copied across (i.e. if a field is nil it gets ignored) and the +`Data` from the `ResponseCommit` is used as the `AppHash` + +## Version + +```go +type Version struct { + consensus Consensus + software string +} +``` + +[`Consensus`](./data_structures.md#version) contains the protocol version for the blockchain and the +application. + +## Block + +The total size of a block is limited in bytes by the `ConsensusParams.Block.MaxBytes`. +Proposed blocks must be less than this size, and will be considered invalid +otherwise. + +Blocks should additionally be limited by the amount of "gas" consumed by the +transactions in the block, though this is not yet implemented. + +## Evidence + +For evidence in a block to be valid, it must satisfy: + +```go +block.Header.Time-evidence.Time < ConsensusParams.Evidence.MaxAgeDuration && + block.Header.Height-evidence.Height < ConsensusParams.Evidence.MaxAgeNumBlocks +``` + +A block must not contain more than `ConsensusParams.Evidence.MaxBytes` of evidence. This is +implemented to mitigate spam attacks. + +## Validator + +Validators from genesis file and `ResponseEndBlock` must have pubkeys of type ∈ +`ConsensusParams.Validator.PubKeyTypes`. diff --git a/spec/ivy-proofs/Dockerfile b/spec/ivy-proofs/Dockerfile new file mode 100644 index 0000000000..be60151fd2 --- /dev/null +++ b/spec/ivy-proofs/Dockerfile @@ -0,0 +1,37 @@ +# we need python2 support, which was dropped after buster: +FROM debian:buster + +RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections +RUN apt-get update +RUN apt-get install -y apt-utils + +# Install and configure locale `en_US.UTF-8` +RUN apt-get install -y locales && \ + sed -i -e "s/# $en_US.*/en_US.UTF-8 UTF-8/" /etc/locale.gen && \ + dpkg-reconfigure --frontend=noninteractive locales && \ + update-locale LANG=en_US.UTF-8 +ENV LANG=en_US.UTF-8 + +RUN apt-get update +RUN apt-get install -y git python2 python-pip g++ cmake python-ply python-tk tix pkg-config libssl-dev python-setuptools + +# create a user: +RUN useradd -ms /bin/bash user +USER user +WORKDIR /home/user + +RUN git clone --recurse-submodules https://github.com/kenmcmil/ivy.git +WORKDIR /home/user/ivy/ +RUN git checkout 271ee38980699115508eb90a0dd01deeb750a94b + +RUN python2.7 build_submodules.py +RUN mkdir -p "/home/user/python/lib/python2.7/site-packages" +ENV PYTHONPATH="/home/user/python/lib/python2.7/site-packages" +# need to install pyparsing manually because otherwise wrong version found +RUN pip install pyparsing +RUN python2.7 setup.py install --prefix="/home/user/python/" +ENV PATH=$PATH:"/home/user/python/bin/" +WORKDIR /home/user/tendermint-proof/ + +ENTRYPOINT ["/home/user/tendermint-proof/check_proofs.sh"] + diff --git a/spec/ivy-proofs/README.md b/spec/ivy-proofs/README.md new file mode 100644 index 0000000000..00a4bed259 --- /dev/null +++ b/spec/ivy-proofs/README.md @@ -0,0 +1,33 @@ +# Ivy Proofs + +```copyright +Copyright (c) 2020 Galois, Inc. +SPDX-License-Identifier: Apache-2.0 +``` + +## Contents + +This folder contains: + +* `tendermint.ivy`, a specification of Tendermint algorithm as described in *The latest gossip on BFT consensus* by E. Buchman, J. Kwon, Z. Milosevic. +* `abstract_tendermint.ivy`, a more abstract specification of Tendermint that is more verification-friendly. +* `classic_safety.ivy`, a proof that Tendermint satisfies the classic safety property of BFT consensus: if every two quorums have a well-behaved node in common, then no two well-behaved nodes ever disagree. +* `accountable_safety_1.ivy`, a proof that, assuming every quorum contains at least one well-behaved node, if two well-behaved nodes disagree, then there is evidence demonstrating at least f+1 nodes misbehaved. +* `accountable_safety_2.ivy`, a proof that, regardless of any assumption about quorums, well-behaved nodes cannot be framed by malicious nodes. In other words, malicious nodes can never construct evidence that incriminates a well-behaved node. +* `network_shim.ivy`, the network model and a convenience `shim` object to interface with the Tendermint specification. +* `domain_model.ivy`, a specification of the domain model underlying the Tendermint specification, i.e. rounds, value, quorums, etc. + +All specifications and proofs are written in [Ivy](https://github.com/kenmcmil/ivy). + +The license above applies to all files in this folder. + + +## Building and running + +The easiest way to check the proofs is to use [Docker](https://www.docker.com/). + +1. Install [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/). +2. Build a Docker image: `docker-compose build` +3. Run the proofs inside the Docker container: `docker-compose run +tendermint-proof`. This will check all the proofs with the `ivy_check` +command and write the output of `ivy_check` to a subdirectory of `./output/' diff --git a/spec/ivy-proofs/abstract_tendermint.ivy b/spec/ivy-proofs/abstract_tendermint.ivy new file mode 100644 index 0000000000..4a160be2a7 --- /dev/null +++ b/spec/ivy-proofs/abstract_tendermint.ivy @@ -0,0 +1,178 @@ +#lang ivy1.7 +# --- +# layout: page +# title: Abstract specification of Tendermint in Ivy +# --- + +# Here we define an abstract version of the Tendermint specification. We use +# two main forms of abstraction: a) We abstract over how information is +# transmitted (there is no network). b) We abstract functions using relations. +# For example, we abstract over a node's current round, instead only tracking +# with a relation which rounds the node has left. We do something similar for +# the `lockedRound` variable. This is in order to avoid using a function from +# node to round, and it allows us to emit verification conditions that are +# efficiently solvable by Z3. + +# This specification also defines the observations that are used to adjudicate +# misbehavior. Well-behaved nodes faithfully observe every message that they +# use to take a step, while Byzantine nodes can fake observations about +# themselves (including withholding observations). Misbehavior is defined using +# the collection of all observations made (in reality, those observations must +# be collected first, but we do not model this process). + +include domain_model + +module abstract_tendermint = { + +# Protocol state +# ############## + + relation left_round(N:node, R:round) + relation prevoted(N:node, R:round, V:value) + relation precommitted(N:node, R:round, V:value) + relation decided(N:node, R:round, V:value) + relation locked(N:node, R:round, V:value) + +# Accountability relations +# ######################## + + relation observed_prevoted(N:node, R:round, V:value) + relation observed_precommitted(N:node, R:round, V:value) + +# relations that are defined in terms of the previous two: + relation observed_equivocation(N:node) + relation observed_unlawful_prevote(N:node) + relation agreement + relation accountability_violation + + object defs = { # we hide those definitions and use them only when needed + private { + definition [observed_equivocation_def] observed_equivocation(N) = exists V1,V2,R . + V1 ~= V2 & (observed_precommitted(N,R,V1) & observed_precommitted(N,R,V2) | observed_prevoted(N,R,V1) & observed_prevoted(N,R,V2)) + + definition [observed_unlawful_prevote_def] observed_unlawful_prevote(N) = exists V1,V2,R1,R2 . + V1 ~= value.nil & V2 ~= value.nil & V1 ~= V2 & R1 < R2 & observed_precommitted(N,R1,V1) & observed_prevoted(N,R2,V2) + & forall Q,R . R1 <= R & R < R2 & nset.is_quorum(Q) -> exists N2 . nset.member(N2,Q) & ~observed_prevoted(N2,R,V2) + + definition [agreement_def] agreement = forall N1,N2,R1,R2,V1,V2 . well_behaved(N1) & well_behaved(N2) & decided(N1,R1,V1) & decided(N2,R2,V2) -> V1 = V2 + + definition [accountability_violation_def] accountability_violation = exists Q1,Q2 . nset.is_quorum(Q1) & nset.is_quorum(Q2) & (forall N . nset.member(N,Q1) & nset.member(N,Q2) -> observed_equivocation(N) | observed_unlawful_prevote(N)) + } + } + +# Protocol transitions +# #################### + + after init { + left_round(N,R) := R < 0; + prevoted(N,R,V) := false; + precommitted(N,R,V) := false; + decided(N,R,V) := false; + locked(N,R,V) := false; + + observed_prevoted(N,R,V) := false; + observed_precommitted(N,R,V) := false; + } + +# Actions are named after the corresponding line numbers in the Tendermint +# arXiv paper. + + action l_11(n:node, r:round) = { # start round r + require ~left_round(n,r); + left_round(n,R) := R < r; + } + + action l_22(n:node, rp:round, v:value) = { + require ~left_round(n,rp); + require ~prevoted(n,rp,V) & ~precommitted(n,rp,V); + require (forall R,V . locked(n,R,V) -> V = v) | v = value.nil; + prevoted(n, rp, v) := true; + left_round(n, R) := R < rp; # leave all lower rounds. + + observed_prevoted(n, rp, v) := observed_prevoted(n, rp, v) | well_behaved(n); # the node observes itself + } + + action l_28(n:node, rp:round, v:value, vr:round, q:nset) = { + require ~left_round(n,rp) & ~prevoted(n,rp,V); + require ~prevoted(n,rp,V) & ~precommitted(n,rp,V); + require vr < rp; + require nset.is_quorum(q) & (forall N . nset.member(N,q) -> (prevoted(N,vr,v) | ~well_behaved(N))); + var proposal:value; + if value.valid(v) & ((forall R0,V0 . locked(n,R0,V0) -> R0 <= vr) | (forall R,V . locked(n,R,V) -> V = v)) { + proposal := v; + } + else { + proposal := value.nil; + }; + prevoted(n, rp, proposal) := true; + left_round(n, R) := R < rp; # leave all lower rounds + + observed_prevoted(N, vr, v) := observed_prevoted(N, vr, v) | (well_behaved(n) & nset.member(N,q)); # the node observes the prevotes of quorum q + observed_prevoted(n, rp, proposal) := observed_prevoted(n, rp, proposal) | well_behaved(n); # the node observes itself + } + + action l_36(n:node, rp:round, v:value, q:nset) = { + require v ~= value.nil; + require ~left_round(n,rp); + require exists V . prevoted(n,rp,V); + require ~precommitted(n,rp,V); + require nset.is_quorum(q) & (forall N . nset.member(N,q) -> (prevoted(N,rp,v) | ~well_behaved(N))); + precommitted(n, rp, v) := true; + left_round(n, R) := R < rp; # leave all lower rounds + locked(n,R,V) := R <= rp & V = v; + + observed_prevoted(N, rp, v) := observed_prevoted(N, rp, v) | (well_behaved(n) & nset.member(N,q)); # the node observes the prevotes of quorum q + observed_precommitted(n, rp, v) := observed_precommitted(n, rp, v) | well_behaved(n); # the node observes itself + } + + action l_44(n:node, rp:round, q:nset) = { + require ~left_round(n,rp); + require ~precommitted(n,rp,V); + require nset.is_quorum(q) & (forall N .nset.member(N,q) -> (prevoted(N,rp,value.nil) | ~well_behaved(N))); + precommitted(n, rp, value.nil) := true; + left_round(n, R) := R < rp; # leave all lower rounds + + observed_prevoted(N, rp, value.nil) := observed_prevoted(N, rp, value.nil) | (well_behaved(n) & nset.member(N,q)); # the node observes the prevotes of quorum q + observed_precommitted(n, rp, value.nil) := observed_precommitted(n, rp, value.nil) | well_behaved(n); # the node observes itself + } + + action l_57(n:node, rp:round) = { + require ~left_round(n,rp); + require ~prevoted(n,rp,V); + prevoted(n, rp, value.nil) := true; + left_round(n, R) := R < rp; # leave all lower rounds + + observed_prevoted(n, rp, value.nil) := observed_prevoted(n, rp, value.nil) | well_behaved(n); # the node observes itself + } + + action l_61(n:node, rp:round) = { + require ~left_round(n,rp); + require ~precommitted(n,rp,V); + precommitted(n, rp, value.nil) := true; + left_round(n, R) := R < rp; # leave all lower rounds + + observed_precommitted(n, rp, value.nil) := observed_precommitted(n, rp, value.nil) | well_behaved(n); # the node observes itself + } + + action decide(n:node, r:round, v:value, q:nset) = { + require v ~= value.nil; + require nset.is_quorum(q) & (forall N . nset.member(N, q) -> (precommitted(N, r, v) | ~well_behaved(N))); + decided(n, r, v) := true; + + observed_precommitted(N, r, v) := observed_precommitted(N, r, v) | (well_behaved(n) & nset.member(N,q)); # the node observes the precommits of quorum q + + } + + action misbehave = { +# Byzantine nodes can claim they observed whatever they want about themselves, +# but they cannot remove observations. Note that we use assume because we don't +# want those to be checked; we just want them to be true (that's the model of +# Byzantine behavior). + observed_prevoted(N,R,V) := *; + assume (old observed_prevoted(N,R,V)) -> observed_prevoted(N,R,V); + assume well_behaved(N) -> old observed_prevoted(N,R,V) = observed_prevoted(N,R,V); + observed_precommitted(N,R,V) := *; + assume (old observed_precommitted(N,R,V)) -> observed_precommitted(N,R,V); + assume well_behaved(N) -> old observed_precommitted(N,R,V) = observed_precommitted(N,R,V); + } +} diff --git a/spec/ivy-proofs/accountable_safety_1.ivy b/spec/ivy-proofs/accountable_safety_1.ivy new file mode 100644 index 0000000000..02bdf1add8 --- /dev/null +++ b/spec/ivy-proofs/accountable_safety_1.ivy @@ -0,0 +1,143 @@ +#lang ivy1.7 +# --- +# layout: page +# title: Proof of Classic Safety +# --- + +include tendermint +include abstract_tendermint + +# Here we prove the first accountability property: if two well-behaved nodes +# disagree, then there are two quorums Q1 and Q2 such that all members of the +# intersection of Q1 and Q2 have violated the accountability properties. + +# The proof is done in two steps: first we prove the abstract specification +# satisfies the property, and then we show by refinement that this property +# also holds in the concrete specification. + +# To see what is checked in the refinement proof, use `ivy_show isolate=accountable_safety_1 accountable_safety_1.ivy` +# To see what is checked in the abstract correctness proof, use `ivy_show isolate=abstract_accountable_safety_1 accountable_safety_1.ivy` +# To check the whole proof, use `ivy_check accountable_safety_1.ivy`. + + +# Proof of the accountability property in the abstract specification +# ================================================================== + +# We prove with tactics (see `lemma_1` and `lemma_2`) that, if some basic +# invariants hold (see `invs` below), then the accountability property holds. + +isolate abstract_accountable_safety = { + + instantiate abstract_tendermint + +# The main property +# ----------------- + +# If there is disagreement, then there is evidence that a third of the nodes +# have violated the protocol: + invariant [accountability] agreement | accountability_violation + proof { + apply lemma_1.thm # this reduces to goal to three subgoals: p1, p2, and p3 (see their definition below) + proof [p1] { + assume invs.inv1 + } + proof [p2] { + assume invs.inv2 + } + proof [p3] { + assume invs.inv3 + } + } + +# The invariants +# -------------- + + isolate invs = { + + # well-behaved nodes observe their own actions faithfully: + invariant [inv1] well_behaved(N) -> (observed_precommitted(N,R,V) = precommitted(N,R,V)) + # if a value is precommitted by a well-behaved node, then a quorum is observed to prevote it: + invariant [inv2] (exists N . well_behaved(N) & precommitted(N,R,V)) & V ~= value.nil -> exists Q . nset.is_quorum(Q) & forall N2 . nset.member(N2,Q) -> observed_prevoted(N2,R,V) + # if a value is decided by a well-behaved node, then a quorum is observed to precommit it: + invariant [inv3] (exists N . well_behaved(N) & decided(N,R,V)) -> 0 <= R & V ~= value.nil & exists Q . nset.is_quorum(Q) & forall N2 . nset.member(N2,Q) -> observed_precommitted(N2,R,V) + private { + invariant (precommitted(N,R,V) | prevoted(N,R,V)) -> 0 <= R + invariant R < 0 -> left_round(N,R) + } + + } with this, nset, round, accountable_bft.max_2f_byzantine + +# The theorems proved with tactics +# -------------------------------- + +# Using complete induction on rounds, we prove that, assuming that the +# invariants inv1, inv2, and inv3 hold, the accountability property holds. + +# For technical reasons, we separate the proof in two steps + isolate lemma_1 = { + + specification { + theorem [thm] { + property [p1] forall N,R,V . well_behaved(N) -> (observed_precommitted(N,R,V) = precommitted(N,R,V)) + property [p2] forall R,V . (exists N . well_behaved(N) & precommitted(N,R,V)) & V ~= value.nil -> exists Q . nset.is_quorum(Q) & forall N2 . nset.member(N2,Q) -> observed_prevoted(N2,R,V) + property [p3] forall R,V. (exists N . well_behaved(N) & decided(N,R,V)) -> 0 <= R & V ~= value.nil & exists Q . nset.is_quorum(Q) & forall N2 . nset.member(N2,Q) -> observed_precommitted(N2,R,V) + #------------------------------------------------------------------------------------------------------------------------------------------- + property agreement | accountability_violation + } + proof { + assume inductive_property # the theorem follows from what we prove by induction below + } + } + + implementation { + # complete induction is not built-in, so we introduce it with an axiom. Note that this only holds for a type where 0 is the smallest element + axiom [complete_induction] { + relation p(X:round) + { # base case + property p(0) + } + { # inductive step: show that if the property is true for all X lower or equal to x and y=x+1, then the property is true of y + individual a:round + individual b:round + property (forall X. 0 <= X & X <= a -> p(X)) & round.succ(a,b) -> p(b) + } + #-------------------------- + property forall X . 0 <= X -> p(X) + } + + # The main lemma: if inv1 and inv2 below hold and a quorum is observed to + # precommit V1 at R1 and another quorum is observed to precommit V2~=V1 at + # R2>=R1, then the intersection of two quorums (i.e. f+1 nodes) is observed to + # violate the protocol. We prove this by complete induction on R2. + theorem [inductive_property] { + property [p1] forall N,R,V . well_behaved(N) -> (observed_precommitted(N,R,V) = precommitted(N,R,V)) + property [p2] forall R,V . (exists N . well_behaved(N) & precommitted(N,R,V)) -> V = value.nil | exists Q . nset.is_quorum(Q) & forall N2 . nset.member(N2,Q) -> observed_prevoted(N2,R,V) + #----------------------------------------------------------------------------------------------------------------------- + property forall R2. 0 <= R2 -> ((exists V2,Q1,R1,V1,Q1 . V1 ~= value.nil & V2 ~= value.nil & V1 ~= V2 & 0 <= R1 & R1 <= R2 & nset.is_quorum(Q1) & (forall N . nset.member(N,Q1) -> observed_precommitted(N,R1,V1)) & (exists Q2 . nset.is_quorum(Q2) & forall N . nset.member(N,Q2) -> observed_prevoted(N,R2,V2))) -> accountability_violation) + } + proof { + apply complete_induction # the two subgoals (base case and inductive case) are then discharged automatically + # NOTE: this can take a long time depending on the SMT random seed (to try a different seed, use `ivy_check seed=$RANDOM` + } + } + } with this, round, nset, accountable_bft.max_2f_byzantine, defs.observed_equivocation_def, defs.observed_unlawful_prevote_def, defs.accountability_violation_def, defs.agreement_def + +} with round + +# The final proof +# =============== + +isolate accountable_safety_1 = { + +# First we instantiate the concrete protocol: + instantiate tendermint(abstract_accountable_safety) + +# We then define what we mean by agreement + relation agreement + definition [agreement_def] agreement = forall N1,N2. well_behaved(N1) & well_behaved(N2) & server.decision(N1) ~= value.nil & server.decision(N2) ~= value.nil -> server.decision(N1) = server.decision(N2) + + invariant abstract_accountable_safety.agreement -> agreement + + invariant [accountability] agreement | abstract_accountable_safety.accountability_violation + +} with value, round, proposers, shim, abstract_accountable_safety, abstract_accountable_safety.defs.agreement_def, accountable_safety_1.agreement_def diff --git a/spec/ivy-proofs/accountable_safety_2.ivy b/spec/ivy-proofs/accountable_safety_2.ivy new file mode 100644 index 0000000000..7fb928909a --- /dev/null +++ b/spec/ivy-proofs/accountable_safety_2.ivy @@ -0,0 +1,52 @@ +#lang ivy1.7 + +include tendermint +include abstract_tendermint + +# Here we prove the second accountability property: no well-behaved node is +# ever observed to violate the accountability properties. + +# The proof is done in two steps: first we prove the the abstract specification +# satisfies the property, and then we show by refinement that this property +# also holds in the concrete specification. + +# To see what is checked in the refinement proof, use `ivy_show isolate=accountable_safety_2 accountable_safety_2.ivy` +# To see what is checked in the abstract correctness proof, use `ivy_show isolate=abstract_accountable_safety_2 accountable_safety_2.ivy` +# To check the whole proof, use `ivy_check complete=fo accountable_safety_2.ivy`. + +# Proof that the property holds in the abstract specification +# ============================================================ + +isolate abstract_accountable_safety_2 = { + + instantiate abstract_tendermint + +# the main property: + invariant [wb_never_punished] well_behaved(N) -> ~(observed_equivocation(N) | observed_unlawful_prevote(N)) + +# the main invariant for proving wb_not_punished: + invariant well_behaved(N) & precommitted(N,R,V) & ~locked(N,R,V) & V ~= value.nil -> exists R2,V2 . V2 ~= value.nil & R < R2 & precommitted(N,R2,V2) & locked(N,R2,V2) + + invariant (exists N . well_behaved(N) & precommitted(N,R,V) & V ~= value.nil) -> exists Q . nset.is_quorum(Q) & forall N . nset.member(N,Q) -> observed_prevoted(N,R,V) + + invariant well_behaved(N) -> (observed_prevoted(N,R,V) <-> prevoted(N,R,V)) + invariant well_behaved(N) -> (observed_precommitted(N,R,V) <-> precommitted(N,R,V)) + +# nodes stop prevoting or precommitting in lower rounds when doing so in a higher round: + invariant well_behaved(N) & prevoted(N,R2,V2) & R1 < R2 -> left_round(N,R1) + invariant well_behaved(N) & locked(N,R2,V2) & R1 < R2 -> left_round(N,R1) + + invariant [precommit_unique_per_round] well_behaved(N) & precommitted(N,R,V1) & precommitted(N,R,V2) -> V1 = V2 + +} with nset, round, abstract_accountable_safety_2.defs.observed_equivocation_def, abstract_accountable_safety_2.defs.observed_unlawful_prevote_def + +# Proof that the property holds in the concrete specification +# =========================================================== + +isolate accountable_safety_2 = { + + instantiate tendermint(abstract_accountable_safety_2) + + invariant well_behaved(N) -> ~(abstract_accountable_safety_2.observed_equivocation(N) | abstract_accountable_safety_2.observed_unlawful_prevote(N)) + +} with round, value, shim, abstract_accountable_safety_2, abstract_accountable_safety_2.defs.observed_equivocation_def, abstract_accountable_safety_2.defs.observed_unlawful_prevote_def diff --git a/spec/ivy-proofs/check_proofs.sh b/spec/ivy-proofs/check_proofs.sh new file mode 100755 index 0000000000..6afd1a962d --- /dev/null +++ b/spec/ivy-proofs/check_proofs.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# returns non-zero error code if any proof fails + +success=0 +log_dir=$(cat /dev/urandom | tr -cd 'a-f0-9' | head -c 6) +cmd="ivy_check seed=$RANDOM" +mkdir -p output/$log_dir + +echo "Checking classic safety:" +res=$($cmd classic_safety.ivy | tee "output/$log_dir/classic_safety.txt" | tail -n 1) +if [ "$res" = "OK" ]; then + echo "OK" +else + echo "FAILED" + success=1 +fi + +echo "Checking accountable safety 1:" +res=$($cmd accountable_safety_1.ivy | tee "output/$log_dir/accountable_safety_1.txt" | tail -n 1) +if [ "$res" = "OK" ]; then + echo "OK" +else + echo "FAILED" + success=1 +fi + +echo "Checking accountable safety 2:" +res=$($cmd complete=fo accountable_safety_2.ivy | tee "output/$log_dir/accountable_safety_2.txt" | tail -n 1) +if [ "$res" = "OK" ]; then + echo "OK" +else + echo "FAILED" + success=1 +fi + +echo +echo "See ivy_check output in the output/ folder" +exit $success diff --git a/spec/ivy-proofs/classic_safety.ivy b/spec/ivy-proofs/classic_safety.ivy new file mode 100644 index 0000000000..b422a2c175 --- /dev/null +++ b/spec/ivy-proofs/classic_safety.ivy @@ -0,0 +1,85 @@ +#lang ivy1.7 +# --- +# layout: page +# title: Proof of Classic Safety +# --- + +include tendermint +include abstract_tendermint + +# Here we prove the classic safety property: assuming that every two quorums +# have a well-behaved node in common, no two well-behaved nodes ever disagree. + +# The proof is done in two steps: first we prove the the abstract specification +# satisfies the property, and then we show by refinement that this property +# also holds in the concrete specification. + +# To see what is checked in the refinement proof, use `ivy_show isolate=classic_safety classic_safety.ivy` +# To see what is checked in the abstract correctness proof, use `ivy_show isolate=abstract_classic_safety classic_safety.ivy` + +# To check the whole proof, use `ivy_check classic_safety.ivy`. + +# Note that all the verification conditions sent to Z3 for this proof are in +# EPR. + +# Classic safety in the abstract model +# ==================================== + +# We start by proving that classic safety holds in the abstract model. + +isolate abstract_classic_safety = { + + instantiate abstract_tendermint + + invariant [classic_safety] classic_bft.quorum_intersection & decided(N1,R1,V1) & decided(N2,R2,V2) -> V1 = V2 + +# The notion of choosable value +# ----------------------------- + + relation choosable(R:round, V:value) + definition choosable(R,V) = exists Q . nset.is_quorum(Q) & forall N . well_behaved(N) & nset.member(N,Q) -> ~left_round(N,R) | precommitted(N,R,V) + +# Main invariants +# --------------- + +# `classic_safety` is inductive relative to those invariants + + invariant [decision_is_quorum_precommit] (exists N1 . decided(N1,R,V)) -> exists Q. nset.is_quorum(Q) & forall N2. well_behaved(N2) & nset.member(N2, Q) -> precommitted(N2,R,V) + + invariant [precommitted_is_quorum_prevote] V ~= value.nil & (exists N1 . precommitted(N1,R,V)) -> exists Q. nset.is_quorum(Q) & forall N2. well_behaved(N2) & nset.member(N2, Q) -> prevoted(N2,R,V) + + invariant [prevote_unique_per_round] prevoted(N,R,V1) & prevoted(N,R,V2) -> V1 = V2 + +# This is the core invariant: as long as a precommitted value is still choosable, it remains protected by a lock and prevents any new value from being prevoted: + invariant [locks] classic_bft.quorum_intersection & V ~= value.nil & precommitted(N,R,V) & choosable(R,V) -> locked(N,R,V) & forall R2,V2 . R < R2 & prevoted(N,R2,V2) -> V2 = V | V2 = value.nil + +# Supporting invariants +# --------------------- + +# The main invariants are inductive relative to those + + invariant decided(N,R,V) -> V ~= value.nil + + invariant left_round(N,R2) & R1 < R2 -> left_round(N,R1) # if a node left round R2>R1, then it also left R1: + + invariant prevoted(N,R2,V2) & R1 < R2 -> left_round(N,R1) + invariant precommitted(N,R2,V2) & R1 < R2 -> left_round(N,R1) + +} with round, nset, classic_bft.quorum_intersection_def + +# The refinement proof +# ==================== + +# Now, thanks to the refinement relation that we establish in +# `concrete_tendermint.ivy`, we prove that classic safety transfers to the +# concrete specification: +isolate classic_safety = { + + # We instantiate the `tendermint` module providing `abstract_classic_safety` as abstract model. + instantiate tendermint(abstract_classic_safety) + + # We prove that if every two quorums have a well-behaved node in common, + # then well-behaved nodes never disagree: + invariant [classic_safety] classic_bft.quorum_intersection & server.decision(N1) ~= value.nil & server.decision(N2) ~= value.nil -> server.decision(N1) = server.decision(N2) + +} with value, round, proposers, shim, abstract_classic_safety # here we list all the specifications that we rely on for this proof diff --git a/spec/ivy-proofs/count_lines.sh b/spec/ivy-proofs/count_lines.sh new file mode 100755 index 0000000000..b2c457e21a --- /dev/null +++ b/spec/ivy-proofs/count_lines.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +r='^\s*$\|^\s*\#\|^\s*\}\s*$\|^\s*{\s*$' # removes comments and blank lines and lines that contain only { or } +N1=`cat tendermint.ivy domain_model.ivy network_shim.ivy | grep -v $r'\|.*invariant.*' | wc -l` +N2=`cat abstract_tendermint.ivy | grep "observed_" | wc -l` # the observed_* variables specify the observations of the nodes +SPEC_LINES=`expr $N1 + $N2` +echo "spec lines: $SPEC_LINES" +N3=`cat abstract_tendermint.ivy | grep -v $r'\|.*observed_.*' | wc -l` +N4=`cat accountable_safety_1.ivy | grep -v $r | wc -l` +PROOF_LINES=`expr $N3 + $N4` +echo "proof lines: $PROOF_LINES" +RATIO=`bc <<< "scale=2;$PROOF_LINES / $SPEC_LINES"` +echo "proof-to-code ratio for the accountable-safety property: $RATIO" diff --git a/spec/ivy-proofs/docker-compose.yml b/spec/ivy-proofs/docker-compose.yml new file mode 100644 index 0000000000..e0612d4b1d --- /dev/null +++ b/spec/ivy-proofs/docker-compose.yml @@ -0,0 +1,7 @@ +version: '3' +services: + tendermint-proof: + build: . + volumes: + - ./:/home/user/tendermint-proof:ro + - ./output:/home/user/tendermint-proof/output:rw diff --git a/spec/ivy-proofs/domain_model.ivy b/spec/ivy-proofs/domain_model.ivy new file mode 100644 index 0000000000..0f12f7288a --- /dev/null +++ b/spec/ivy-proofs/domain_model.ivy @@ -0,0 +1,143 @@ +#lang ivy1.7 + +include order # this is a file from the standard library (`ivy/ivy/include/1.7/order.ivy`) + +isolate round = { + type this + individual minus_one:this + relation succ(R1:round, R2:round) + action incr(i:this) returns (j:this) + specification { +# to simplify verification, we treat rounds as an abstract totally ordered set with a successor relation. + instantiate totally_ordered(this) + property minus_one < 0 + property succ(X,Z) -> (X < Z & ~(X < Y & Y < Z)) + after incr { + ensure succ(i,j) + } + } + implementation { +# here we prove that the abstraction is sound. + interpret this -> int # rounds are integers in the Tendermint specification. + definition minus_one = 0-1 + definition succ(R1,R2) = R2 = R1 + 1 + implement incr { + j := i+1; + } + } +} + +instance node : iterable # nodes are a set with an order, that can be iterated over (see order.ivy in the standard library) + +relation well_behaved(N:node) # whether a node is well-behaved or not. NOTE: Used only in the proof and the Byzantine model; Nodes do know know who is well-behaved and who is not. + +isolate proposers = { + # each round has a unique proposer in Tendermint. In order to avoid a + # function from round to node (which makes verification more difficult), we + # abstract over this function using a relation. + relation is_proposer(N:node, R:round) + export action get_proposer(r:round) returns (n:node) + specification { + property is_proposer(N1,R) & is_proposer(N2,R) -> N1 = N2 + after get_proposer { + ensure is_proposer(n,r); + } + } + implementation { + function f(R:round):node + definition f(r:round) = <<>> + definition is_proposer(N,R) = N = f(R) + implement get_proposer { + n := f(r); + } + } +} + +isolate value = { # the type of values + type this + relation valid(V:value) + individual nil:value + specification { + property ~valid(nil) + } + implementation { + interpret value -> bv[2] + definition nil = <<< -1 >>> # let's say nil is -1 + definition valid(V) = V ~= nil + } +} + +object nset = { # the type of node sets + type this # a set of N=3f+i nodes for 0 + #include + namespace hash_space { + template + class hash > { + public: + size_t operator()(const std::set &s) const { + hash h; + size_t res = 0; + for (const T &e : s) + res += h(e); + return res; + } + }; + } + >>> + interpret nset -> <<< std::set<`node`> >>> + definition member(n:node, s:nset) = <<< `s`.find(`n`) != `s`.end() >>> + definition is_quorum(s:nset) = <<< 3*`s`.size() > 2*`node.size` >>> + definition is_blocking(s:nset) = <<< 3*`s`.size() > `node.size` >>> + implement empty { + <<< + >>> + } + implement insert { + <<< + `t` = `s`; + `t`.insert(`n`); + >>> + } + <<< encode `nset` + + std::ostream &operator <<(std::ostream &s, const `nset` &a) { + s << "{"; + for (auto iter = a.begin(); iter != a.end(); iter++) { + if (iter != a.begin()) s << ", "; + s << *iter; + } + s << "}"; + return s; + } + + template <> + `nset` _arg<`nset`>(std::vector &args, unsigned idx, long long bound) { + throw std::invalid_argument("Not implemented"); // no syntax for nset values in the REPL + } + + >>> + } +} + +object classic_bft = { + relation quorum_intersection + private { + definition [quorum_intersection_def] quorum_intersection = forall Q1,Q2. exists N. well_behaved(N) & nset.member(N, Q1) & nset.member(N, Q2) # every two quorums have a well-behaved node in common + } +} + +trusted isolate accountable_bft = { + # this is our baseline assumption about quorums: + private { + property [max_2f_byzantine] exists N . well_behaved(N) & nset.member(N,Q) # every quorum has a well-behaved member + } +} diff --git a/spec/ivy-proofs/network_shim.ivy b/spec/ivy-proofs/network_shim.ivy new file mode 100644 index 0000000000..ebc3a04fce --- /dev/null +++ b/spec/ivy-proofs/network_shim.ivy @@ -0,0 +1,133 @@ +#lang ivy1.7 +# --- +# layout: page +# title: Network model and network shim +# --- + +# Here we define a network module, which is our model of the network, and a +# shim module that sits on top of the network and which, upon receiving a +# message, calls the appropriate protocol handler. + +include domain_model + +# Here we define an enumeration type for identifying the 3 different types of +# messages that nodes send. +object msg_kind = { # TODO: merge with step_t + type this = {proposal, prevote, precommit} +} + +# Here we define the type of messages `msg`. Its members are structs with the fields described below. +object msg = { + type this = struct { + m_kind : msg_kind, + m_src : node, + m_round : round, + m_value : value, + m_vround : round + } +} + +# This is our model of the network: +isolate net = { + + export action recv(dst:node,v:msg) + action send(src:node,dst:node,v:msg) + # Note that the `recv` action is exported, meaning that it can be called + # non-deterministically by the environment any time it is enabled. In other + # words, a packet that is in flight can be received at any time. In this + # sense, the network is fully asynchronous. Moreover, there is no + # requirement that a given message will be received at all. + + # The state of the network consists of all the packets that have been + # sent so far, along with their destination. + relation sent(V:msg, N:node) + + after init { + sent(V, N) := false + } + + before send { + sent(v,dst) := true + } + + before recv { + require sent(v,dst) # only sent messages can be received. + } +} + +# The network shim sits on top of the network and, upon receiving a message, +# calls the appropriate protocol handler. It also exposes a `broadcast` action +# that sends to all nodes. + +isolate shim = { + + # In order not repeat the same code for each handler, we use a handler + # module parameterized by the type of message it will handle. Below we + # instantiate this module for the 3 types of messages of Tendermint + module handler(p_kind) = { + action handle(dst:node,m:msg) + object spec = { + before handle { + assert sent(m,dst) & m.m_kind = p_kind + } + } + } + + instance proposal_handler : handler(msg_kind.proposal) + instance prevote_handler : handler(msg_kind.prevote) + instance precommit_handler : handler(msg_kind.precommit) + + relation sent(M:msg,N:node) + + action broadcast(src:node,m:msg) + action send(src:node,dst:node,m:msg) + + specification { + after init { + sent(M,D) := false; + } + before broadcast { + sent(m,D) := true + } + before send { + sent(m,dst) := true + } + } + + # Here we give an implementation of it that satisfies its specification: + implementation { + + implement net.recv(dst:node,m:msg) { + + if m.m_kind = msg_kind.proposal { + call proposal_handler.handle(dst,m) + } + else if m.m_kind = msg_kind.prevote { + call prevote_handler.handle(dst,m) + } + else if m.m_kind = msg_kind.precommit { + call precommit_handler.handle(dst,m) + } + } + + implement broadcast { # broadcast sends to all nodes, including the sender. + var iter := node.iter.create(0); + while ~iter.is_end + invariant net.sent(M,D) -> sent(M,D) + { + var n := iter.val; + call net.send(src,n,m); + iter := iter.next; + } + } + + implement send { + call net.send(src,dst,m) + } + + private { + invariant net.sent(M,D) -> sent(M,D) + } + } + +} with net, node # to prove that the shim implementation satisfies the shim specification, we rely on the specification of net and node. diff --git a/spec/ivy-proofs/output/.gitignore b/spec/ivy-proofs/output/.gitignore new file mode 100644 index 0000000000..5e7d2734cf --- /dev/null +++ b/spec/ivy-proofs/output/.gitignore @@ -0,0 +1,4 @@ +# Ignore everything in this directory +* +# Except this file +!.gitignore diff --git a/spec/ivy-proofs/tendermint.ivy b/spec/ivy-proofs/tendermint.ivy new file mode 100644 index 0000000000..b7678bef98 --- /dev/null +++ b/spec/ivy-proofs/tendermint.ivy @@ -0,0 +1,420 @@ +#lang ivy1.7 +# --- +# layout: page +# title: Specification of Tendermint in Ivy +# --- + +# This specification closely follows the pseudo-code given in "The latest +# gossip on BFT consensus" by E. Buchman, J. Kwon, Z. Milosevic +# + +include domain_model +include network_shim + +# We model the Tendermint protocol as an Ivy object. Like in Object-Oriented +# Programming, the basic structuring unit in Ivy is the object. Objects have +# internal state and actions (i.e. methods in OO parlance) that modify their +# state. We model Tendermint as an object whose actions represent steps taken +# by individual nodes in the protocol. Actions in Ivy can have preconditions, +# and a valid execution is a sequence of actions whose preconditions are all +# satisfied in the state in which they are called. + +# For technical reasons, we define below a `tendermint` module instead of an +# object. Ivy modules are a little bit like classes in OO programs, and like +# classes they can be instantiated to obtain objects. To instantiate the +# `tendermint` module, we must provide an abstract-protocol object. This allows +# us to use different abstract-protocol objects for different parts of the +# proof, and to do so without too much notational burden (we could have used +# Ivy monitors, but then we would need to prefix every variable name by the +# name of the object containing it, which clutters things a bit compared to the +# approach we took). + +# The abstract-protocol object is called by the resulting tendermint object so +# as to run the abstract protocol alongside the concrete protocol. This allows +# us to transfer properties proved of the abstract protocol to the concrete +# protocol, as follows. First, we prove that running the abstract protocol in +# this way results in a valid execution of the abstract protocol. This is done +# by checking that all preconditions of the abstract actions are satisfied at +# their call sites. Second, we establish a relation between abstract state and +# concrete state (in the form of invariants of the resulting, two-object +# transition system) that allow us to transfer properties proved in the +# abstract protocol to the concrete protocol (for example, we prove that any +# decision made in the Tendermint protocol is also made in the abstract +# protocol; if the abstract protocol satisfies the agreement property, this +# allows us to conclude that the Tendermint protocol also does). + +# The abstract protocol object that we will use is always the same, and only +# the abstract properties that we prove about it change in the different +# instantiations of the `tendermint` module. Thus we provide common invariants +# that a) allow to prove that the abstract preconditions are met, and b) +# provide a refinement relation (see end of the module) relating the state of +# Tendermint to the state of the abstract protocol. + +# In the model, Byzantine nodes can send whatever messages they want, except +# that they cannot forge sender identities. This reflects the fact that, in +# practice, nodes use public key cryptography to sign their messages. + +# Finally, note that the observations that serve to adjudicate misbehavior are +# defined only in the abstract protocol (they happen in the abstract actions). + +module tendermint(abstract_protocol) = { + + # the initial value of a node: + function init_val(N:node): value + + # the three type of steps + object step_t = { + type this = {propose, prevote, precommit} + } # refer to those e.g. as step_t.propose + + object server(n:node) = { + + # the current round of a node + individual round_p: round + + individual step: step_t + + individual decision: value + + individual lockedValue: value + individual lockedRound: round + + individual validValue: value + individual validRound: round + + + relation done_l34(R:round) + relation done_l36(R:round, V:value) + relation done_l47(R:round) + + # variables for scheduling request + relation propose_timer_scheduled(R:round) + relation prevote_timer_scheduled(R:round) + relation precommit_timer_scheduled(R:round) + + relation _recved_proposal(Sender:node, R:round, V:value, VR:round) + relation _recved_prevote(Sender:node, R:round, V:value) + relation _recved_precommit(Sender:node, R:round, V:value) + + relation _has_started + + after init { + round_p := 0; + step := step_t.propose; + decision := value.nil; + + lockedValue := value.nil; + lockedRound := round.minus_one; + + validValue := value.nil; + validRound := round.minus_one; + + done_l34(R) := false; + done_l36(R, V) := false; + done_l47(R) := false; + + propose_timer_scheduled(R) := false; + prevote_timer_scheduled(R) := false; + precommit_timer_scheduled(R) := false; + + _recved_proposal(Sender, R, V, VR) := false; + _recved_prevote(Sender, R, V) := false; + _recved_precommit(Sender, R, V) := false; + + _has_started := false; + } + + action getValue returns (v:value) = { + v := init_val(n) + } + + export action start = { + require ~_has_started; + _has_started := true; + # line 10 + call startRound(0); + } + + # line 11-21 + action startRound(r:round) = { + # line 12 + round_p := r; + + # line 13 + step := step_t.propose; + + var proposal : value; + + # line 14 + if (proposers.get_proposer(r) = n) { + if validValue ~= value.nil { # line 15 + proposal := validValue; # line 16 + } else { + proposal := getValue(); # line 18 + }; + call broadcast_proposal(r, proposal, validRound); # line 19 + } else { + propose_timer_scheduled(r) := true; # line 21 + }; + + call abstract_protocol.l_11(n, r); + } + + # This action, as not exported, can only be called at specific call sites. + action broadcast_proposal(r:round, v:value, vr:round) = { + var m: msg; + m.m_kind := msg_kind.proposal; + m.m_src := n; + m.m_round := r; + m.m_value := v; + m.m_vround := vr; + call shim.broadcast(n,m); + } + + implement shim.proposal_handler.handle(msg:msg) { + _recved_proposal(msg.m_src, msg.m_round, msg.m_value, msg.m_vround) := true; + } + + # line 22-27 + export action l_22(v:value) = { + require _has_started; + require _recved_proposal(proposers.get_proposer(round_p), round_p, v, round.minus_one); + require step = step_t.propose; + + if (value.valid(v) & (lockedRound = round.minus_one | lockedValue = v)) { + call broadcast_prevote(round_p, v); # line 24 + call abstract_protocol.l_22(n, round_p, v); + } else { + call broadcast_prevote(round_p, value.nil); # line 26 + call abstract_protocol.l_22(n, round_p, value.nil); + }; + + # line 27 + step := step_t.prevote; + } + + # line 28-33 + export action l_28(r:round, v:value, vr:round, q:nset) = { + require _has_started; + require r = round_p; + require _recved_proposal(proposers.get_proposer(r), r, v, vr); + require nset.is_quorum(q); + require nset.member(N,q) -> _recved_prevote(N,vr,v); + require step = step_t.propose; + require vr >= 0 & vr < r; + + # line 29 + if (value.valid(v) & (lockedRound <= vr | lockedValue = v)) { + call broadcast_prevote(r, v); + } else { + call broadcast_prevote(r, value.nil); + }; + + call abstract_protocol.l_28(n,r,v,vr,q); + step := step_t.prevote; + } + + action broadcast_prevote(r:round, v:value) = { + var m: msg; + m.m_kind := msg_kind.prevote; + m.m_src := n; + m.m_round := r; + m.m_value := v; + call shim.broadcast(n,m); + } + + implement shim.prevote_handler.handle(msg:msg) { + _recved_prevote(msg.m_src, msg.m_round, msg.m_value) := true; + } + + # line 34-35 + export action l_34(r:round, q:nset) = { + require _has_started; + require round_p = r; + require nset.is_quorum(q); + require exists V . nset.member(N,q) -> _recved_prevote(N,r,V); + require step = step_t.prevote; + require ~done_l34(r); + done_l34(r) := true; + + prevote_timer_scheduled(r) := true; + } + + + # line 36-43 + export action l_36(r:round, v:value, q:nset) = { + require _has_started; + require r = round_p; + require exists VR . round.minus_one <= VR & VR < r & _recved_proposal(proposers.get_proposer(r), r, v, VR); + require nset.is_quorum(q); + require nset.member(N,q) -> _recved_prevote(N,r,v); + require value.valid(v); + require step = step_t.prevote | step = step_t.precommit; + + require ~done_l36(r,v); + done_l36(r, v) := true; + + if step = step_t.prevote { + lockedValue := v; # line 38 + lockedRound := r; # line 39 + call broadcast_precommit(r, v); # line 40 + step := step_t.precommit; # line 41 + call abstract_protocol.l_36(n, r, v, q); + }; + + validValue := v; # line 42 + validRound := r; # line 43 + } + + # line 44-46 + export action l_44(r:round, q:nset) = { + require _has_started; + require r = round_p; + require nset.is_quorum(q); + require nset.member(N,q) -> _recved_prevote(N,r,value.nil); + require step = step_t.prevote; + + call broadcast_precommit(r, value.nil); # line 45 + step := step_t.precommit; # line 46 + + call abstract_protocol.l_44(n, r, q); + } + + action broadcast_precommit(r:round, v:value) = { + var m: msg; + m.m_kind := msg_kind.precommit; + m.m_src := n; + m.m_round := r; + m.m_value := v; + call shim.broadcast(n,m); + } + + implement shim.precommit_handler.handle(msg:msg) { + _recved_precommit(msg.m_src, msg.m_round, msg.m_value) := true; + } + + + # line 47-48 + export action l_47(r:round, q:nset) = { + require _has_started; + require round_p = r; + require nset.is_quorum(q); + require nset.member(N,q) -> exists V . _recved_precommit(N,r,V); + require ~done_l47(r); + done_l47(r) := true; + + precommit_timer_scheduled(r) := true; + } + + + # line 49-54 + export action l_49_decide(r:round, v:value, q:nset) = { + require _has_started; + require exists VR . round.minus_one <= VR & VR < r & _recved_proposal(proposers.get_proposer(r), r, v, VR); + require nset.is_quorum(q); + require nset.member(N,q) -> _recved_precommit(N,r,v); + require decision = value.nil; + + if value.valid(v) { + decision := v; + # MORE for next height + call abstract_protocol.decide(n, r, v, q); + } + } + + # line 55-56 + export action l_55(r:round, b:nset) = { + require _has_started; + require nset.is_blocking(b); + require nset.member(N,b) -> exists VR . round.minus_one <= VR & VR < r & exists V . _recved_proposal(N,r,V,VR) | _recved_prevote(N,r,V) | _recved_precommit(N,r,V); + require r > round_p; + call startRound(r); # line 56 + } + + # line 57-60 + export action onTimeoutPropose(r:round) = { + require _has_started; + require propose_timer_scheduled(r); + require r = round_p; + require step = step_t.propose; + call broadcast_prevote(r,value.nil); + step := step_t.prevote; + + call abstract_protocol.l_57(n,r); + + propose_timer_scheduled(r) := false; + } + + # line 61-64 + export action onTimeoutPrevote(r:round) = { + require _has_started; + require prevote_timer_scheduled(r); + require r = round_p; + require step = step_t.prevote; + call broadcast_precommit(r,value.nil); + step := step_t.precommit; + + call abstract_protocol.l_61(n,r); + + prevote_timer_scheduled(r) := false; + } + + # line 65-67 + export action onTimeoutPrecommit(r:round) = { + require _has_started; + require precommit_timer_scheduled(r); + require r = round_p; + call startRound(round.incr(r)); + + precommit_timer_scheduled(r) := false; + } + +# The Byzantine actions +# --------------------- + +# Byzantine nodes can send whatever they want, but they cannot send +# messages on behalf of well-behaved nodes. In practice this is implemented +# using cryptography (e.g. public-key cryptography). + + export action byzantine_send(m:msg, dst:node) = { + require ~well_behaved(n); + require ~well_behaved(m.m_src); # cannot forge the identity of well-behaved nodes + call shim.send(n,dst,m); + } + +# Byzantine nodes can also report fake observations, as defined in the abstract protocol. + export action fake_observations = { + call abstract_protocol.misbehave + } + +# Invariants +# ---------- + +# We provide common invariants that a) allow to prove that the abstract +# preconditions are met, and b) provide a refinement relation. + + + specification { + + invariant 0 <= round_p + invariant abstract_protocol.left_round(n,R) <-> R < round_p + + invariant lockedRound ~= round.minus_one -> forall R,V . abstract_protocol.locked(n,R,V) <-> R <= lockedRound & lockedValue = V + invariant lockedRound = round.minus_one -> forall R,V . ~abstract_protocol.locked(n,R,V) + + invariant forall M:msg . well_behaved(M.m_src) & M.m_kind = msg_kind.prevote & shim.sent(M,N) -> abstract_protocol.prevoted(M.m_src,M.m_round,M.m_value) + invariant well_behaved(N) & _recved_prevote(N,R,V) -> abstract_protocol.prevoted(N,R,V) + invariant forall M:msg . well_behaved(M.m_src) & M.m_kind = msg_kind.precommit & shim.sent(M,N) -> abstract_protocol.precommitted(M.m_src,M.m_round,M.m_value) + invariant well_behaved(N) & _recved_precommit(N,R,V) -> abstract_protocol.precommitted(N,R,V) + + invariant (step = step_t.prevote | step = step_t.propose) -> ~abstract_protocol.precommitted(n,round_p,V) + invariant step = step_t.propose -> ~abstract_protocol.prevoted(n,round_p,V) + invariant step = step_t.prevote -> exists V . abstract_protocol.prevoted(n,round_p,V) + + invariant round_p < R -> ~(abstract_protocol.prevoted(n,R,V) | abstract_protocol.precommitted(n,R,V)) + invariant ~_has_started -> step = step_t.propose & ~(abstract_protocol.prevoted(n,R,V) | abstract_protocol.precommitted(n,R,V)) & round_p = 0 + + invariant decision ~= value.nil -> exists R . abstract_protocol.decided(n,R,decision) + } + } +} diff --git a/spec/ivy-proofs/tendermint_test.ivy b/spec/ivy-proofs/tendermint_test.ivy new file mode 100644 index 0000000000..1299fc086d --- /dev/null +++ b/spec/ivy-proofs/tendermint_test.ivy @@ -0,0 +1,127 @@ +#lang ivy1.7 + +include tendermint +include abstract_tendermint + +isolate ghost_ = { + instantiate abstract_tendermint +} + +isolate protocol = { + instantiate tendermint(ghost_) # here we instantiate the parameter of the tendermint module with `ghost_`; however note that we don't extract any code for `ghost_` (it's not in the list of object in the extract, and it's thus sliced away). + implementation { + definition init_val(n:node) = <<< `n`%2 >>> + } + # attribute test = impl +} with ghost_, shim, value, round, proposers + +# Here we run a simple scenario that exhibits an execution in which nodes make +# a decision. We do this to rule out trivial modeling errors. + +# One option to check that this scenario is valid is to run it in Ivy's REPL. +# For this, first compile the scenario: +#```ivyc target=repl isolate=code trace=true tendermint_test.ivy +# Then, run the produced binary (e.g. for 4 nodes): +#``` ./tendermint_test 4 +# Finally, call the action: +#``` scenarios.scenario_1 +# Note that Ivy will check at runtime that all action preconditions are +# satisfied. For example, runing the scenario twice will cause a violation of +# the precondition of the `start` action, because a node cannot start twice +# (see `require ~_has_started` in action `start`). + +# Another possibility would be to run `ivy_check` on the scenario, but that +# does not seem to work at the moment. + +isolate scenarios = { + individual all:nset # will be used as parameter to actions requiring a quorum + + after init { + var iter := node.iter.create(0); + while ~iter.is_end + { + all := all.insert(iter.val); + iter := iter.next; + }; + assert nset.is_quorum(all); # we can also use asserts to make sure we are getting what we expect + } + + export action scenario_1 = { + # all nodes start: + var iter := node.iter.create(0); + while ~iter.is_end + { + call protocol.server.start(iter.val); + iter := iter.next; + }; + # all nodes receive the leader's proposal: + var m:msg; + m.m_kind := msg_kind.proposal; + m.m_src := 0; + m.m_round := 0; + m.m_value := 0; + m.m_vround := round.minus_one; + iter := node.iter.create(0); + while ~iter.is_end + { + call net.recv(iter.val,m); + iter := iter.next; + }; + # all nodes prevote: + iter := node.iter.create(0); + while ~iter.is_end + { + call protocol.server.l_22(iter.val,0); + iter := iter.next; + }; + # all nodes receive each other's prevote messages; + m.m_kind := msg_kind.prevote; + m.m_vround := 0; + iter := node.iter.create(0); + while ~iter.is_end + { + var iter2 := node.iter.create(0); # the sender + while ~iter2.is_end + { + m.m_src := iter2.val; + call net.recv(iter.val,m); + iter2 := iter2.next; + }; + iter := iter.next; + }; + # all nodes precommit: + iter := node.iter.create(0); + while ~iter.is_end + { + call protocol.server.l_36(iter.val,0,0,all); + iter := iter.next; + }; + # all nodes receive each other's pre-commits + m.m_kind := msg_kind.precommit; + iter := node.iter.create(0); + while ~iter.is_end + { + var iter2 := node.iter.create(0); # the sender + while ~iter2.is_end + { + m.m_src := iter2.val; + call net.recv(iter.val,m); + iter2 := iter2.next; + }; + iter := iter.next; + }; + # now all nodes can decide: + iter := node.iter.create(0); + while ~iter.is_end + { + call protocol.server.l_49_decide(iter.val,0,0,all); + iter := iter.next; + }; + } + + # TODO: add more scenarios + +} with round, node, proposers, value, nset, protocol, shim, net + +# extract code = protocol, shim, round, node +extract code = round, node, proposers, value, nset, protocol, shim, net, scenarios diff --git a/spec/light-client/README.md b/spec/light-client/README.md new file mode 100644 index 0000000000..2cf888a9de --- /dev/null +++ b/spec/light-client/README.md @@ -0,0 +1,206 @@ +--- +order: 1 +parent: + title: Light Client + order: 5 +--- + + +# Light Client Specification + +This directory contains work-in-progress English and TLA+ specifications for the Light Client +protocol. Implementations of the light client can be found in +[Rust](https://github.com/informalsystems/tendermint-rs/tree/master/light-client) and +[Go](https://github.com/tendermint/tendermint/tree/master/light). + +Light clients are assumed to be initialized once from a trusted source +with a trusted header and validator set. The light client +protocol allows a client to then securely update its trusted state by requesting and +verifying a minimal set of data from a network of full nodes (at least one of which is correct). + +The light client is decomposed into two main components: + +- [Commit Verification](#Commit-Verification) - verify signed headers and associated validator + set changes from a single full node, called primary +- [Attack Detection](#Attack-Detection) - verify commits across multiple full nodes (called secondaries) and detect conflicts (ie. the existence of a lightclient attack) + +In case a lightclient attack is detected, the lightclient submits evidence to a full node which is responsible for "accountability", that is, punishing attackers: + +- [Accountability](#Accountability) - given evidence for an attack, compute a set of validators that are responsible for it. + +## Commit Verification + +The [English specification](verification/verification_001_published.md) describes the light client +commit verification problem in terms of the temporal properties +[LCV-DIST-SAFE.1](https://github.com/informalsystems/tendermint-rs/blob/master/docs/spec/lightclient/verification/verification_001_published.md#lcv-dist-safe1) and +[LCV-DIST-LIVE.1](https://github.com/informalsystems/tendermint-rs/blob/master/docs/spec/lightclient/verification/verification_001_published.md#lcv-dist-live1). +Commit verification is assumed to operate within the Tendermint Failure Model, where +2/3 of validators are correct for some time period and +validator sets can change arbitrarily at each height. + +A light client protocol is also provided, including all checks that +need to be performed on headers, commits, and validator sets +to satisfy the temporal properties - so a light client can continuously +synchronize with a blockchain. Clients can skip possibly +many intermediate headers by exploiting overlap in trusted and untrusted validator sets. +When there is not enough overlap, a bisection routine can be used to find a +minimal set of headers that do provide the required overlap. + +The [TLA+ specification ver. 001](verification/Lightclient_A_1.tla) +is a formal description of the +commit verification protocol executed by a client, including the safety and +termination, which can be model checked with Apalache. + +A more detailed TLA+ specification of +[Light client verification ver. 003](verification/Lightclient_003_draft.tla) +is currently under peer review. + +The `MC*.tla` files contain concrete parameters for the +[TLA+ specification](verification/Lightclient_A_1.tla), in order to do model checking. +For instance, [MC4_3_faulty.tla](verification/MC4_3_faulty.tla) contains the following parameters +for the nodes, heights, the trusting period, the clock drifts, +correctness of the primary node, and the ratio of the faulty processes: + +```tla +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 3 +TRUSTING_PERIOD == 1400 \* the trusting period in some time units +CLOCK_DRIFT = 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT = 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators +``` + +To run a complete set of experiments, clone [apalache](https://github.com/informalsystems/apalache) and [apalache-tests](https://github.com/informalsystems/apalache-tests) into a directory `$DIR` and run the following commands: + +```sh +$DIR/apalache-tests/scripts/mk-run.py --memlimit 28 002bmc-apalache-ok.csv $DIR/apalache . out +./out/run-all.sh +``` + +After the experiments have finished, you can collect the logs by executing the following command: + +```sh +cd ./out +$DIR/apalache-tests/scripts/parse-logs.py --human . +``` + +All lines in `results.csv` should report `Deadlock`, which means that the algorithm +has terminated and no invariant violation was found. + +Similar to [002bmc-apalache-ok.csv](verification/002bmc-apalache-ok.csv), +file [003bmc-apalache-error.csv](verification/003bmc-apalache-error.csv) specifies +the set of experiments that should result in counterexamples: + +```sh +$DIR/apalache-tests/scripts/mk-run.py --memlimit 28 003bmc-apalache-error.csv $DIR/apalache . out +./out/run-all.sh +``` + +All lines in `results.csv` should report `Error`. + +The following table summarizes the experimental results for Light client verification +version 001. The TLA+ properties can be found in the +[TLA+ specification](verification/Lightclient_A_1.tla). + The experiments were run in an AWS instance equipped with 32GB +RAM and a 4-core Intel® Xeon® CPU E5-2686 v4 @ 2.30GHz CPU. +We write “✗=k” when a bug is reported at depth k, and “✓<=k” when +no bug is reported up to depth k. + +![Experimental results](experiments.png) + +The experimental results for version 003 are to be added. + +## Attack Detection + +The [English specification](detection/detection_003_reviewed.md) +defines light client attacks (and how they differ from blockchain +forks), and describes the problem of a light client detecting +these attacks by communicating with a network of full nodes, +where at least one is correct. + +The specification also contains a detection protocol that checks +whether the header obtained from the primary via the verification +protocol matches corresponding headers provided by the secondaries. +If this is not the case, the protocol analyses the verification traces +of the involved full nodes +and generates +[evidence](detection/detection_003_reviewed.md#tmbc-lc-evidence-data1) +of misbehavior that can be submitted to a full node so that +the faulty validators can be punished. + +The [TLA+ specification](detection/LCDetector_003_draft.tla) +is a formal description of the +detection protocol for two peers, including the safety and +termination, which can be model checked with Apalache. + +The `LCD_MC*.tla` files contain concrete parameters for the +[TLA+ specification](detection/LCDetector_003_draft.tla), +in order to run the model checker. +For instance, [LCD_MC4_4_faulty.tla](detection/MC4_4_faulty.tla) +contains the following parameters +for the nodes, heights, the trusting period, the clock drifts, +correctness of the nodes, and the ratio of the faulty processes: + +```tla +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 3 +TRUSTING_PERIOD == 1400 \* the trusting period in some time units +CLOCK_DRIFT = 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT = 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +IS_SECONDARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators +``` + +To run a complete set of experiments, clone [apalache](https://github.com/informalsystems/apalache) and [apalache-tests](https://github.com/informalsystems/apalache-tests) into a directory `$DIR` and run the following commands: + +```sh +$DIR/apalache-tests/scripts/mk-run.py --memlimit 28 004bmc-apalache-ok.csv $DIR/apalache . out +./out/run-all.sh +``` + +After the experiments have finished, you can collect the logs by executing the following command: + +```sh +cd ./out +$DIR/apalache-tests/scripts/parse-logs.py --human . +``` + +All lines in `results.csv` should report `Deadlock`, which means that the algorithm +has terminated and no invariant violation was found. + +Similar to [004bmc-apalache-ok.csv](verification/004bmc-apalache-ok.csv), +file [005bmc-apalache-error.csv](verification/005bmc-apalache-error.csv) specifies +the set of experiments that should result in counterexamples: + +```sh +$DIR/apalache-tests/scripts/mk-run.py --memlimit 28 005bmc-apalache-error.csv $DIR/apalache . out +./out/run-all.sh +``` + +All lines in `results.csv` should report `Error`. + +The detailed experimental results are to be added soon. + +## Accountability + +The [English specification](attacks/isolate-attackers_002_reviewed.md) +defines the protocol that is executed on a full node upon receiving attack [evidence](detection/detection_003_reviewed.md#tmbc-lc-evidence-data1) from a lightclient. In particular, the protocol handles three types of attacks + +- lunatic +- equivocation +- amnesia + +We discussed in the [last part](attacks/isolate-attackers_002_reviewed.md#Part-III---Completeness) of the English specification +that the non-lunatic cases are defined by having the same validator set in the conflicting blocks. For these cases, +computer-aided analysis of [Tendermint Consensus in TLA+](./accountability/README.md) shows that equivocation and amnesia capture all non-lunatic attacks. + +The [TLA+ specification](attacks/Isolation_001_draft.tla) +is a formal description of the +protocol, including the safety property, which can be model checked with Apalache. + +Similar to the other specifications, [MC_5_3.tla](attacks/MC_5_3.tla) contains concrete parameters to run the model checker. The specification can be checked within seconds. + +[tendermint-accountability](./accountability/README.md) diff --git a/spec/light-client/accountability/001indinv-apalache.csv b/spec/light-client/accountability/001indinv-apalache.csv new file mode 100644 index 0000000000..37c6aeda25 --- /dev/null +++ b/spec/light-client/accountability/001indinv-apalache.csv @@ -0,0 +1,13 @@ +no,filename,tool,timeout,init,inv,next,args +1,MC_n4_f1.tla,apalache,10h,TypedInv,TypedInv,,--length=1 --cinit=ConstInit +2,MC_n4_f2.tla,apalache,10h,TypedInv,TypedInv,,--length=1 --cinit=ConstInit +3,MC_n5_f1.tla,apalache,10h,TypedInv,TypedInv,,--length=1 --cinit=ConstInit +4,MC_n5_f2.tla,apalache,10h,TypedInv,TypedInv,,--length=1 --cinit=ConstInit +5,MC_n4_f1.tla,apalache,20h,Init,TypedInv,,--length=0 --cinit=ConstInit +6,MC_n4_f2.tla,apalache,20h,Init,TypedInv,,--length=0 --cinit=ConstInit +7,MC_n5_f1.tla,apalache,20h,Init,TypedInv,,--length=0 --cinit=ConstInit +8,MC_n5_f2.tla,apalache,20h,Init,TypedInv,,--length=0 --cinit=ConstInit +9,MC_n4_f1.tla,apalache,20h,TypedInv,Agreement,,--length=0 --cinit=ConstInit +10,MC_n4_f2.tla,apalache,20h,TypedInv,Accountability,,--length=0 --cinit=ConstInit +11,MC_n5_f1.tla,apalache,20h,TypedInv,Agreement,,--length=0 --cinit=ConstInit +12,MC_n5_f2.tla,apalache,20h,TypedInv,Accountability,,--length=0 --cinit=ConstInit diff --git a/spec/light-client/accountability/MC_n4_f1.tla b/spec/light-client/accountability/MC_n4_f1.tla new file mode 100644 index 0000000000..62bcb30de2 --- /dev/null +++ b/spec/light-client/accountability/MC_n4_f1.tla @@ -0,0 +1,46 @@ +----------------------------- MODULE MC_n4_f1 ------------------------------- +CONSTANT + \* @type: ROUND -> PROCESS; + Proposer + +\* the variables declared in TendermintAcc3 +VARIABLES + \* @type: PROCESS -> ROUND; + round, + \* @type: PROCESS -> STEP; + step, + \* @type: PROCESS -> VALUE; + decision, + \* @type: PROCESS -> VALUE; + lockedValue, + \* @type: PROCESS -> ROUND; + lockedRound, + \* @type: PROCESS -> VALUE; + validValue, + \* @type: PROCESS -> ROUND; + validRound, + \* @type: ROUND -> Set(PROPMESSAGE); + msgsPropose, + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrevote, + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrecommit, + \* @type: Set(MESSAGE); + evidence, + \* @type: ACTION; + action + +INSTANCE TendermintAccDebug_004_draft WITH + Corr <- {"c1", "c2", "c3"}, + Faulty <- {"f1"}, + N <- 4, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 2 + +\* run Apalache with --cinit=ConstInit +ConstInit == \* the proposer is arbitrary -- works for safety + Proposer \in [Rounds -> AllProcs] + +============================================================================= diff --git a/spec/light-client/accountability/MC_n4_f2.tla b/spec/light-client/accountability/MC_n4_f2.tla new file mode 100644 index 0000000000..baab2a21d5 --- /dev/null +++ b/spec/light-client/accountability/MC_n4_f2.tla @@ -0,0 +1,46 @@ +----------------------------- MODULE MC_n4_f2 ------------------------------- +CONSTANT + \* @type: ROUND -> PROCESS; + Proposer + +\* the variables declared in TendermintAcc3 +VARIABLES + \* @type: PROCESS -> ROUND; + round, + \* @type: PROCESS -> STEP; + step, + \* @type: PROCESS -> VALUE; + decision, + \* @type: PROCESS -> VALUE; + lockedValue, + \* @type: PROCESS -> ROUND; + lockedRound, + \* @type: PROCESS -> VALUE; + validValue, + \* @type: PROCESS -> ROUND; + validRound, + \* @type: ROUND -> Set(PROPMESSAGE); + msgsPropose, + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrevote, + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrecommit, + \* @type: Set(MESSAGE); + evidence, + \* @type: ACTION; + action + +INSTANCE TendermintAccDebug_004_draft WITH + Corr <- {"c1", "c2"}, + Faulty <- {"f3", "f4"}, + N <- 4, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 2 + +\* run Apalache with --cinit=ConstInit +ConstInit == \* the proposer is arbitrary -- works for safety + Proposer \in [Rounds -> AllProcs] + +============================================================================= diff --git a/spec/light-client/accountability/MC_n4_f2_amnesia.tla b/spec/light-client/accountability/MC_n4_f2_amnesia.tla new file mode 100644 index 0000000000..940903a76a --- /dev/null +++ b/spec/light-client/accountability/MC_n4_f2_amnesia.tla @@ -0,0 +1,62 @@ +---------------------- MODULE MC_n4_f2_amnesia ------------------------------- +EXTENDS Sequences + +CONSTANT + \* @type: ROUND -> PROCESS; + Proposer + +\* the variables declared in TendermintAcc3 +VARIABLES + \* @type: PROCESS -> ROUND; + round, + \* @type: PROCESS -> STEP; + step, + \* @type: PROCESS -> VALUE; + decision, + \* @type: PROCESS -> VALUE; + lockedValue, + \* @type: PROCESS -> ROUND; + lockedRound, + \* @type: PROCESS -> VALUE; + validValue, + \* @type: PROCESS -> ROUND; + validRound, + \* @type: ROUND -> Set(PROPMESSAGE); + msgsPropose, + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrevote, + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrecommit, + \* @type: Set(MESSAGE); + evidence, + \* @type: ACTION; + action + +\* the variable declared in TendermintAccTrace3 +VARIABLE + \* @type: TRACE; + toReplay + +INSTANCE TendermintAccTrace_004_draft WITH + Corr <- {"c1", "c2"}, + Faulty <- {"f3", "f4"}, + N <- 4, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 2, + Trace <- << + "UponProposalInPropose", + "UponProposalInPrevoteOrCommitAndPrevote", + "UponProposalInPrecommitNoDecision", + "OnRoundCatchup", + "UponProposalInPropose", + "UponProposalInPrevoteOrCommitAndPrevote", + "UponProposalInPrecommitNoDecision" + >> + +\* run Apalache with --cinit=ConstInit +ConstInit == \* the proposer is arbitrary -- works for safety + Proposer \in [Rounds -> AllProcs] + +============================================================================= diff --git a/spec/light-client/accountability/MC_n4_f3.tla b/spec/light-client/accountability/MC_n4_f3.tla new file mode 100644 index 0000000000..d4c64e6d01 --- /dev/null +++ b/spec/light-client/accountability/MC_n4_f3.tla @@ -0,0 +1,46 @@ +----------------------------- MODULE MC_n4_f3 ------------------------------- +CONSTANT + \* @type: ROUND -> PROCESS; + Proposer + +\* the variables declared in TendermintAcc3 +VARIABLES + \* @type: PROCESS -> ROUND; + round, + \* @type: PROCESS -> STEP; + step, + \* @type: PROCESS -> VALUE; + decision, + \* @type: PROCESS -> VALUE; + lockedValue, + \* @type: PROCESS -> ROUND; + lockedRound, + \* @type: PROCESS -> VALUE; + validValue, + \* @type: PROCESS -> ROUND; + validRound, + \* @type: ROUND -> Set(PROPMESSAGE); + msgsPropose, + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrevote, + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrecommit, + \* @type: Set(MESSAGE); + evidence, + \* @type: ACTION; + action + +INSTANCE TendermintAccDebug_004_draft WITH + Corr <- {"c1"}, + Faulty <- {"f2", "f3", "f4"}, + N <- 4, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 2 + +\* run Apalache with --cinit=ConstInit +ConstInit == \* the proposer is arbitrary -- works for safety + Proposer \in [Rounds -> AllProcs] + +============================================================================= diff --git a/spec/light-client/accountability/MC_n5_f1.tla b/spec/light-client/accountability/MC_n5_f1.tla new file mode 100644 index 0000000000..3d7ff979ed --- /dev/null +++ b/spec/light-client/accountability/MC_n5_f1.tla @@ -0,0 +1,46 @@ +----------------------------- MODULE MC_n5_f1 ------------------------------- +CONSTANT + \* @type: ROUND -> PROCESS; + Proposer + +\* the variables declared in TendermintAcc3 +VARIABLES + \* @type: PROCESS -> ROUND; + round, + \* @type: PROCESS -> STEP; + step, + \* @type: PROCESS -> VALUE; + decision, + \* @type: PROCESS -> VALUE; + lockedValue, + \* @type: PROCESS -> ROUND; + lockedRound, + \* @type: PROCESS -> VALUE; + validValue, + \* @type: PROCESS -> ROUND; + validRound, + \* @type: ROUND -> Set(PROPMESSAGE); + msgsPropose, + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrevote, + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrecommit, + \* @type: Set(MESSAGE); + evidence, + \* @type: ACTION; + action + +INSTANCE TendermintAccDebug_004_draft WITH + Corr <- {"c1", "c2", "c3", "c4"}, + Faulty <- {"f5"}, + N <- 5, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 2 + +\* run Apalache with --cinit=ConstInit +ConstInit == \* the proposer is arbitrary -- works for safety + Proposer \in [Rounds -> AllProcs] + +============================================================================= diff --git a/spec/light-client/accountability/MC_n5_f2.tla b/spec/light-client/accountability/MC_n5_f2.tla new file mode 100644 index 0000000000..24400dc07f --- /dev/null +++ b/spec/light-client/accountability/MC_n5_f2.tla @@ -0,0 +1,46 @@ +----------------------------- MODULE MC_n5_f2 ------------------------------- +CONSTANT + \* @type: ROUND -> PROCESS; + Proposer + +\* the variables declared in TendermintAcc3 +VARIABLES + \* @type: PROCESS -> ROUND; + round, + \* @type: PROCESS -> STEP; + step, + \* @type: PROCESS -> VALUE; + decision, + \* @type: PROCESS -> VALUE; + lockedValue, + \* @type: PROCESS -> ROUND; + lockedRound, + \* @type: PROCESS -> VALUE; + validValue, + \* @type: PROCESS -> ROUND; + validRound, + \* @type: ROUND -> Set(PROPMESSAGE); + msgsPropose, + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrevote, + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrecommit, + \* @type: Set(MESSAGE); + evidence, + \* @type: ACTION; + action + +INSTANCE TendermintAccDebug_004_draft WITH + Corr <- {"c1", "c2", "c3"}, + Faulty <- {"f4", "f5"}, + N <- 5, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 2 + +\* run Apalache with --cinit=ConstInit +ConstInit == \* the proposer is arbitrary -- works for safety + Proposer \in [Rounds -> AllProcs] + +============================================================================= diff --git a/spec/light-client/accountability/MC_n6_f1.tla b/spec/light-client/accountability/MC_n6_f1.tla new file mode 100644 index 0000000000..a58f8c78a8 --- /dev/null +++ b/spec/light-client/accountability/MC_n6_f1.tla @@ -0,0 +1,46 @@ +----------------------------- MODULE MC_n6_f1 ------------------------------- +CONSTANT + \* @type: ROUND -> PROCESS; + Proposer + +\* the variables declared in TendermintAcc3 +VARIABLES + \* @type: PROCESS -> ROUND; + round, + \* @type: PROCESS -> STEP; + step, + \* @type: PROCESS -> VALUE; + decision, + \* @type: PROCESS -> VALUE; + lockedValue, + \* @type: PROCESS -> ROUND; + lockedRound, + \* @type: PROCESS -> VALUE; + validValue, + \* @type: PROCESS -> ROUND; + validRound, + \* @type: ROUND -> Set(PROPMESSAGE); + msgsPropose, + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrevote, + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrecommit, + \* @type: Set(MESSAGE); + evidence, + \* @type: ACTION; + action + +INSTANCE TendermintAccDebug_004_draft WITH + Corr <- {"c1", "c2", "c3", "c4", "c5"}, + Faulty <- {"f6"}, + N <- 4, + T <- 1, + ValidValues <- { "v0", "v1" }, + InvalidValues <- {"v2"}, + MaxRound <- 2 + +\* run Apalache with --cinit=ConstInit +ConstInit == \* the proposer is arbitrary -- works for safety + Proposer \in [Rounds -> AllProcs] + +============================================================================= diff --git a/spec/light-client/accountability/README.md b/spec/light-client/accountability/README.md new file mode 100644 index 0000000000..a6eda7e674 --- /dev/null +++ b/spec/light-client/accountability/README.md @@ -0,0 +1,308 @@ +--- +order: 1 +parent: + title: Accountability + order: 4 +--- + +# Fork accountability + +## Problem Statement + +Tendermint consensus guarantees the following specifications for all heights: + +* agreement -- no two correct full nodes decide differently. +* validity -- the decided block satisfies the predefined predicate *valid()*. +* termination -- all correct full nodes eventually decide, + +If the faulty validators have less than 1/3 of voting power in the current validator set. In the case where this assumption +does not hold, each of the specification may be violated. + +The agreement property says that for a given height, any two correct validators that decide on a block for that height decide on the same block. That the block was indeed generated by the blockchain, can be verified starting from a trusted (genesis) block, and checking that all subsequent blocks are properly signed. + +However, faulty nodes may forge blocks and try to convince users (light clients) that the blocks had been correctly generated. In addition, Tendermint agreement might be violated in the case where 1/3 or more of the voting power belongs to faulty validators: Two correct validators decide on different blocks. The latter case motivates the term "fork": as Tendermint consensus also agrees on the next validator set, correct validators may have decided on disjoint next validator sets, and the chain branches into two or more partitions (possibly having faulty validators in common) and each branch continues to generate blocks independently of the other. + +We say that a fork is a case in which there are two commits for different blocks at the same height of the blockchain. The problem is to ensure that in those cases we are able to detect faulty validators (and not mistakenly accuse correct validators), and incentivize therefore validators to behave according to the protocol specification. + +**Conceptual Limit.** In order to prove misbehavior of a node, we have to show that the behavior deviates from correct behavior with respect to a given algorithm. Thus, an algorithm that detects misbehavior of nodes executing some algorithm *A* must be defined with respect to algorithm *A*. In our case, *A* is Tendermint consensus (+ other protocols in the infrastructure; e.g.,full nodes and the Light Client). If the consensus algorithm is changed/updated/optimized in the future, we have to check whether changes to the accountability algorithm are also required. All the discussions in this document are thus inherently specific to Tendermint consensus and the Light Client specification. + +**Q:** Should we distinguish agreement for validators and full nodes for agreement? The case where all correct validators agree on a block, but a correct full node decides on a different block seems to be slightly less severe that the case where two correct validators decide on different blocks. Still, if a contaminated full node becomes validator that may be problematic later on. Also it is not clear how gossiping is impaired if a contaminated full node is on a different branch. + +*Remark.* In the case 1/3 or more of the voting power belongs to faulty validators, also validity and termination can be broken. Termination can be broken if faulty processes just do not send the messages that are needed to make progress. Due to asynchrony, this is not punishable, because faulty validators can always claim they never received the messages that would have forced them to send messages. + +## The Misbehavior of Faulty Validators + +Forks are the result of faulty validators deviating from the protocol. In principle several such deviations can be detected without a fork actually occurring: + +1. double proposal: A faulty proposer proposes two different values (blocks) for the same height and the same round in Tendermint consensus. + +2. double signing: Tendermint consensus forces correct validators to prevote and precommit for at most one value per round. In case a faulty validator sends multiple prevote and/or precommit messages for different values for the same height/round, this is a misbehavior. + +3. lunatic validator: Tendermint consensus forces correct validators to prevote and precommit only for values *v* that satisfy *valid(v)*. If faulty validators prevote and precommit for *v* although *valid(v)=false* this is misbehavior. + +*Remark.* In isolation, Point 3 is an attack on validity (rather than agreement). However, the prevotes and precommits can then also be used to forge blocks. + +1. amnesia: Tendermint consensus has a locking mechanism. If a validator has some value v locked, then it can only prevote/precommit for v or nil. Sending prevote/precomit message for a different value v' (that is not nil) while holding lock on value v is misbehavior. + +2. spurious messages: In Tendermint consensus most of the message send instructions are guarded by threshold guards, e.g., one needs to receive *2f + 1* prevote messages to send precommit. Faulty validators may send precommit without having received the prevote messages. + +Independently of a fork happening, punishing this behavior might be important to prevent forks altogether. This should keep attackers from misbehaving: if less than 1/3 of the voting power is faulty, this misbehavior is detectable but will not lead to a safety violation. Thus, unless they have 1/3 or more (or in some cases more than 2/3) of the voting power attackers have the incentive to not misbehave. If attackers control too much voting power, we have to deal with forks, as discussed in this document. + +## Two types of forks + +* Fork-Full. Two correct validators decide on different blocks for the same height. Since also the next validator sets are decided upon, the correct validators may be partitioned to participate in two distinct branches of the forked chain. + +As in this case we have two different blocks (both having the same right/no right to exist), a central system invariant (one block per height decided by correct validators) is violated. As full nodes are contaminated in this case, the contamination can spread also to light clients. However, even without breaking this system invariant, light clients can be subject to a fork: + +* Fork-Light. All correct validators decide on the same block for height *h*, but faulty processes (validators or not), forge a different block for that height, in order to fool users (who use the light client). + +# Attack scenarios + +## On-chain attacks + +### Equivocation (one round) + +There are several scenarios in which forks might happen. The first is double signing within a round. + +* F1. Equivocation: faulty validators sign multiple vote messages (prevote and/or precommit) for different values *during the same round r* at a given height h. + +### Flip-flopping + +Tendermint consensus implements a locking mechanism: If a correct validator *p* receives proposal for value v and *2f + 1* prevotes for a value *id(v)* in round *r*, it locks *v* and remembers *r*. In this case, *p* also sends a precommit message for *id(v)*, which later may serve as proof that *p* locked *v*. +In subsequent rounds, *p* only sends prevote messages for a value it had previously locked. However, it is possible to change the locked value if in a future round *r' > r*, if the process receives proposal and *2f + 1* prevotes for a different value *v'*. In this case, *p* could send a prevote/precommit for *id(v')*. This algorithmic feature can be exploited in two ways: + +* F2. Faulty Flip-flopping (Amnesia): faulty validators precommit some value *id(v)* in round *r* (value *v* is locked in round *r*) and then prevote for different value *id(v')* in higher round *r' > r* without previously correctly unlocking value *v*. In this case faulty processes "forget" that they have locked value *v* and prevote some other value in the following rounds. +Some correct validators might have decided on *v* in *r*, and other correct validators decide on *v'* in *r'*. Here we can have branching on the main chain (Fork-Full). + +* F3. Correct Flip-flopping (Back to the past): There are some precommit messages signed by (correct) validators for value *id(v)* in round *r*. Still, *v* is not decided upon, and all processes move on to the next round. Then correct validators (correctly) lock and decide a different value *v'* in some round *r' > r*. And the correct validators continue; there is no branching on the main chain. +However, faulty validators may use the correct precommit messages from round *r* together with a posteriori generated faulty precommit messages for round *r* to forge a block for a value that was not decided on the main chain (Fork-Light). + +## Off-chain attacks + +F1-F3 may contaminate the state of full nodes (and even validators). Contaminated (but otherwise correct) full nodes may thus communicate faulty blocks to light clients. +Similarly, without actually interfering with the main chain, we can have the following: + +* F4. Phantom validators: faulty validators vote (sign prevote and precommit messages) in heights in which they are not part of the validator sets (at the main chain). + +* F5. Lunatic validator: faulty validator that sign vote messages to support (arbitrary) application state that is different from the application state that resulted from valid state transitions. + +## Types of victims + +We consider three types of potential attack victims: + +* FN: full node +* LCS: light client with sequential header verification +* LCB: light client with bisection based header verification + +F1 and F2 can be used by faulty validators to actually create multiple branches on the blockchain. That means that correctly operating full nodes decide on different blocks for the same height. Until a fork is detected locally by a full node (by receiving evidence from others or by some other local check that fails), the full node can spread corrupted blocks to light clients. + +*Remark.* If full nodes take a branch different from the one taken by the validators, it may be that the liveness of the gossip protocol may be affected. We should eventually look at this more closely. However, as it does not influence safety it is not a primary concern. + +F3 is similar to F1, except that no two correct validators decide on different blocks. It may still be the case that full nodes become affected. + +In addition, without creating a fork on the main chain, light clients can be contaminated by more than a third of validators that are faulty and sign a forged header +F4 cannot fool correct full nodes as they know the current validator set. Similarly, LCS know who the validators are. Hence, F4 is an attack against LCB that do not necessarily know the complete prefix of headers (Fork-Light), as they trust a header that is signed by at least one correct validator (trusting period method). + +The following table gives an overview of how the different attacks may affect different nodes. F1-F3 are *on-chain* attacks so they can corrupt the state of full nodes. Then if a light client (LCS or LCB) contacts a full node to obtain headers (or blocks), the corrupted state may propagate to the light client. + +F4 and F5 are *off-chain*, that is, these attacks cannot be used to corrupt the state of full nodes (which have sufficient knowledge on the state of the chain to not be fooled). + +| Attack | FN | LCS | LCB | +|:------:|:------:|:------:|:------:| +| F1 | direct | FN | FN | +| F2 | direct | FN | FN | +| F3 | direct | FN | FN | +| F4 | | | direct | +| F5 | | | direct | + +**Q:** Light clients are more vulnerable than full nodes, because the former do only verify headers but do not execute transactions. What kind of certainty is gained by a full node that executes a transaction? + +As a full node verifies all transactions, it can only be +contaminated by an attack if the blockchain itself violates its invariant (one block per height), that is, in case of a fork that leads to branching. + +## Detailed Attack Scenarios + +### Equivocation based attacks + +In case of equivocation based attacks, faulty validators sign multiple votes (prevote and/or precommit) in the same +round of some height. This attack can be executed on both full nodes and light clients. It requires 1/3 or more of voting power to be executed. + +#### Scenario 1: Equivocation on the main chain + +Validators: + +* CA - a set of correct validators with less than 1/3 of the voting power +* CB - a set of correct validators with less than 1/3 of the voting power +* CA and CB are disjoint +* F - a set of faulty validators with 1/3 or more voting power + +Observe that this setting violates the Tendermint failure model. + +Execution: + +* A faulty proposer proposes block A to CA +* A faulty proposer proposes block B to CB +* Validators from the set CA and CB prevote for A and B, respectively. +* Faulty validators from the set F prevote both for A and B. +* The faulty prevote messages + * for A arrive at CA long before the B messages + * for B arrive at CB long before the A messages +* Therefore correct validators from set CA and CB will observe +more than 2/3 of prevotes for A and B and precommit for A and B, respectively. +* Faulty validators from the set F precommit both values A and B. +* Thus, we have more than 2/3 commits for both A and B. + +Consequences: + +* Creating evidence of misbehavior is simple in this case as we have multiple messages signed by the same faulty processes for different values in the same round. + +* We have to ensure that these different messages reach a correct process (full node, monitor?), which can submit evidence. + +* This is an attack on the full node level (Fork-Full). +* It extends also to the light clients, +* For both we need a detection and recovery mechanism. + +#### Scenario 2: Equivocation to a light client (LCS) + +Validators: + +* a set F of faulty validators with more than 2/3 of the voting power. + +Execution: + +* for the main chain F behaves nicely +* F coordinates to sign a block B that is different from the one on the main chain. +* the light clients obtains B and trusts at as it is signed by more than 2/3 of the voting power. + +Consequences: + +Once equivocation is used to attack light client it opens space +for different kind of attacks as application state can be diverged in any direction. For example, it can modify validator set such that it contains only validators that do not have any stake bonded. Note that after a light client is fooled by a fork, that means that an attacker can change application state and validator set arbitrarily. + +In order to detect such (equivocation-based attack), the light client would need to cross check its state with some correct validator (or to obtain a hash of the state from the main chain using out of band channels). + +*Remark.* The light client would be able to create evidence of misbehavior, but this would require to pull potentially a lot of data from correct full nodes. Maybe we need to figure out different architecture where a light client that is attacked will push all its data for the current unbonding period to a correct node that will inspect this data and submit corresponding evidence. There are also architectures that assumes a special role (sometimes called fisherman) whose goal is to collect as much as possible useful data from the network, to do analysis and create evidence transactions. That functionality is outside the scope of this document. + +*Remark.* The difference between LCS and LCB might only be in the amount of voting power needed to convince light client about arbitrary state. In case of LCB where security threshold is at minimum, an attacker can arbitrarily modify application state with 1/3 or more of voting power, while in case of LCS it requires more than 2/3 of the voting power. + +### Flip-flopping: Amnesia based attacks + +In case of amnesia, faulty validators lock some value *v* in some round *r*, and then vote for different value *v'* in higher rounds without correctly unlocking value *v*. This attack can be used both on full nodes and light clients. + +#### Scenario 3: At most 2/3 of faults + +Validators: + +* a set F of faulty validators with 1/3 or more but at most 2/3 of the voting power +* a set C of correct validators + +Execution: + +* Faulty validators commit (without exposing it on the main chain) a block A in round *r* by collecting more than 2/3 of the + voting power (containing correct and faulty validators). +* All validators (correct and faulty) reach a round *r' > r*. +* Some correct validators in C do not lock any value before round *r'*. +* The faulty validators in F deviate from Tendermint consensus by ignoring that they locked A in *r*, and propose a different block B in *r'*. +* As the validators in C that have not locked any value find B acceptable, they accept the proposal for B and commit a block B. + +*Remark.* In this case, the more than 1/3 of faulty validators do not need to commit an equivocation (F1) as they only vote once per round in the execution. + +Detecting faulty validators in the case of such an attack can be done by the fork accountability mechanism described in: . + +If a light client is attacked using this attack with 1/3 or more of voting power (and less than 2/3), the attacker cannot change the application state arbitrarily. Rather, the attacker is limited to a state a correct validator finds acceptable: In the execution above, correct validators still find the value acceptable, however, the block the light client trusts deviates from the one on the main chain. + +#### Scenario 4: More than 2/3 of faults + +In case there is an attack with more than 2/3 of the voting power, an attacker can arbitrarily change application state. + +Validators: + +* a set F1 of faulty validators with 1/3 or more of the voting power +* a set F2 of faulty validators with less than 1/3 of the voting power + +Execution + +* Similar to Scenario 3 (however, messages by correct validators are not needed) +* The faulty validators in F1 lock value A in round *r* +* They sign a different value in follow-up rounds +* F2 does not lock A in round *r* + +Consequences: + +* The validators in F1 will be detectable by the the fork accountability mechanisms. +* The validators in F2 cannot be detected using this mechanism. +Only in case they signed something which conflicts with the application this can be used against them. Otherwise they do not do anything incorrect. +* This case is not covered by the report as it only assumes at most 2/3 of faulty validators. + +**Q:** do we need to define a special kind of attack for the case where a validator sign arbitrarily state? It seems that detecting such attack requires a different mechanism that would require as an evidence a sequence of blocks that led to that state. This might be very tricky to implement. + +### Back to the past + +In this kind of attack, faulty validators take advantage of the fact that they did not sign messages in some of the past rounds. Due to the asynchronous network in which Tendermint operates, we cannot easily differentiate between such an attack and delayed message. This kind of attack can be used at both full nodes and light clients. + +#### Scenario 5 + +Validators: + +* C1 - a set of correct validators with over 1/3 of the voting power +* C2 - a set of correct validators with 1/3 of the voting power +* C1 and C2 are disjoint +* F - a set of faulty validators with less than 1/3 voting power +* one additional faulty process *q* +* F and *q* violate the Tendermint failure model. + +Execution: + +* in a round *r* of height *h* we have C1 precommitting a value A, +* C2 precommits nil, +* F does not send any message +* *q* precommits nil. +* In some round *r' > r*, F and *q* and C2 commit some other value B different from A. +* F and *fp* "go back to the past" and sign precommit message for value A in round *r*. +* Together with precomit messages of C1 this is sufficient for a commit for value A. + +Consequences: + +* Only a single faulty validator that previously precommited nil did equivocation, while the other 1/3 of faulty validators actually executed an attack that has exactly the same sequence of messages as part of amnesia attack. Detecting this kind of attack boil down to mechanisms for equivocation and amnesia. + +**Q:** should we keep this as a separate kind of attack? It seems that equivocation, amnesia and phantom validators are the only kind of attack we need to support and this gives us security also in other cases. This would not be surprising as equivocation and amnesia are attacks that followed from the protocol and phantom attack is not really an attack to Tendermint but more to the Proof of Stake module. + +### Phantom validators + +In case of phantom validators, processes that are not part of the current validator set but are still bonded (as attack happen during their unbonding period) can be part of the attack by signing vote messages. This attack can be executed against both full nodes and light clients. + +#### Scenario 6 + +Validators: + +* F -- a set of faulty validators that are not part of the validator set on the main chain at height *h + k* + +Execution: + +* There is a fork, and there exist two different headers for height *h + k*, with different validator sets: + * VS2 on the main chain + * forged header VS2', signed by F (and others) + +* a light client has a trust in a header for height *h* (and the corresponding validator set VS1). +* As part of bisection header verification, it verifies the header at height *h + k* with new validator set VS2'. + +Consequences: + +* To detect this, a node needs to see both, the forged header and the canonical header from the chain. +* If this is the case, detecting these kind of attacks is easy as it just requires verifying if processes are signing messages in heights in which they are not part of the validator set. + +**Remark.** We can have phantom-validator-based attacks as a follow up of equivocation or amnesia based attack where forked state contains validators that are not part of the validator set at the main chain. In this case, they keep signing messages contributed to a forked chain (the wrong branch) although they are not part of the validator set on the main chain. This attack can also be used to attack full node during a period of time it is eclipsed. + +**Remark.** Phantom validator evidence has been removed from implementation as it was deemed, although possibly a plausible form of evidence, not relevant. Any attack on +the light client involving a phantom validator will have needed to be initiated by 1/3+ lunatic +validators that can forge a new validator set that includes the phantom validator. Only in +that case will the light client accept the phantom validators vote. We need only worry about +punishing the 1/3+ lunatic cabal, that is the root cause of the attack. + +### Lunatic validator + +Lunatic validator agrees to sign commit messages for arbitrary application state. It is used to attack light clients. +Note that detecting this behavior require application knowledge. Detecting this behavior can probably be done by +referring to the block before the one in which height happen. + +**Q:** can we say that in this case a validator declines to check if a proposed value is valid before voting for it? diff --git a/spec/light-client/accountability/Synopsis.md b/spec/light-client/accountability/Synopsis.md new file mode 100644 index 0000000000..76da3868c7 --- /dev/null +++ b/spec/light-client/accountability/Synopsis.md @@ -0,0 +1,105 @@ + +# Synopsis + + A TLA+ specification of a simplified Tendermint consensus, tuned for + fork accountability. The simplifications are as follows: + +- the procotol runs for one height, that is, one-shot consensus + +- this specification focuses on safety, so timeouts are modelled with + with non-determinism + +- the proposer function is non-determinstic, no fairness is assumed + +- the messages by the faulty processes are injected right in the initial states + +- every process has the voting power of 1 + +- hashes are modelled as identity + + Having the above assumptions in mind, the specification follows the pseudo-code + of the Tendermint paper: + + Byzantine processes can demonstrate arbitrary behavior, including + no communication. However, we have to show that under the collective evidence + collected by the correct processes, at least `f+1` Byzantine processes demonstrate + one of the following behaviors: + +- Equivocation: a Byzantine process sends two different values + in the same round. + +- Amnesia: a Byzantine process locks a value, although it has locked + another value in the past. + +# TLA+ modules + +- [TendermintAcc_004_draft](TendermintAcc_004_draft.tla) is the protocol + specification, + +- [TendermintAccInv_004_draft](TendermintAccInv_004_draft.tla) contains an + inductive invariant for establishing the protocol safety as well as the + forking cases, + +- `MC_n_f`, e.g., [MC_n4_f1](MC_n4_f1.tla), contains fixed constants for + model checking with the [Apalache model + checker](https://github.com/informalsystems/apalache), + +- [TendermintAccTrace_004_draft](TendermintAccTrace_004_draft.tla) shows how + to restrict the execution space to a fixed sequence of actions (e.g., to + instantiate a counterexample), + +- [TendermintAccDebug_004_draft](TendermintAccDebug_004_draft.tla) contains + the useful definitions for debugging the protocol specification with TLC and + Apalache. + +# Reasoning about fork scenarios + +The theorem statements can be found in +[TendermintAccInv_004_draft.tla](TendermintAccInv_004_draft.tla). + +First, we would like to show that `TypedInv` is an inductive invariant. +Formally, the statement looks as follows: + +```tla +THEOREM TypedInvIsInductive == + \/ FaultyQuorum + \//\ Init => TypedInv + /\ TypedInv /\ [Next]_vars => TypedInv' +``` + +When over two-thirds of processes are faulty, `TypedInv` is not inductive. +However, there is no hope to repair the protocol in this case. We run +[Apalache](https://github.com/informalsystems/apalache) to prove this theorem +only for fixed instances of 4 to 5 validators. Apalache does not parse theorem +statements at the moment, so we ran Apalache using a shell script. To find a +parameterized argument, one has to use a theorem prover, e.g., TLAPS. + +Second, we would like to show that the invariant implies `Agreement`, that is, +no fork, provided that less than one third of processes is faulty. By combining +this theorem with the previous theorem, we conclude that the protocol indeed +satisfies Agreement under the condition `LessThanThirdFaulty`. + +```tla +THEOREM AgreementWhenLessThanThirdFaulty == + LessThanThirdFaulty /\ TypedInv => Agreement +``` + +Third, in the general case, we either have no fork, or two fork scenarios: + +```tla +THEOREM AgreementOrFork == + ~FaultyQuorum /\ TypedInv => Accountability +``` + +# Model checking results + +Check the report on [model checking with Apalache](./results/001indinv-apalache-report.md). + +To run the model checking experiments, use the script: + +```console +./run.sh +``` + +This script assumes that the apalache build is available in +`~/devl/apalache-unstable`. diff --git a/spec/light-client/accountability/TendermintAccDebug_004_draft.tla b/spec/light-client/accountability/TendermintAccDebug_004_draft.tla new file mode 100644 index 0000000000..9281b87265 --- /dev/null +++ b/spec/light-client/accountability/TendermintAccDebug_004_draft.tla @@ -0,0 +1,101 @@ +------------------ MODULE TendermintAccDebug_004_draft ------------------------- +(* + A few definitions that we use for debugging TendermintAcc3, which do not belong + to the specification itself. + + * Version 3. Modular and parameterized definitions. + + Igor Konnov, 2020. + *) + +EXTENDS TendermintAccInv_004_draft + +\* make them parameters? +NFaultyProposals == 0 \* the number of injected faulty PROPOSE messages +NFaultyPrevotes == 6 \* the number of injected faulty PREVOTE messages +NFaultyPrecommits == 6 \* the number of injected faulty PRECOMMIT messages + +\* Given a set of allowed messages Msgs, this operator produces a function from +\* rounds to sets of messages. +\* Importantly, there will be exactly k messages in the image of msgFun. +\* We use this action to produce k faults in an initial state. +\* @type: (ROUND -> Set(MESSAGE), Set(MESSAGE), Int) => Bool; +ProduceFaults(msgFun, From, k) == + \E f \in [1..k -> From]: + msgFun = [r \in Rounds |-> {m \in {f[i]: i \in 1..k}: m.round = r}] + +\* As TLC explodes with faults, we may have initial states without faults +InitNoFaults == + /\ round = [p \in Corr |-> 0] + /\ step = [p \in Corr |-> "PROPOSE"] + /\ decision = [p \in Corr |-> NilValue] + /\ lockedValue = [p \in Corr |-> NilValue] + /\ lockedRound = [p \in Corr |-> NilRound] + /\ validValue = [p \in Corr |-> NilValue] + /\ validRound = [p \in Corr |-> NilRound] + /\ msgsPropose = [r \in Rounds |-> EmptyMsgSet] + /\ msgsPrevote = [r \in Rounds |-> EmptyMsgSet] + /\ msgsPrecommit = [r \in Rounds |-> EmptyMsgSet] + /\ evidence = EmptyMsgSet + +(* + A specialized version of Init that injects NFaultyProposals proposals, + NFaultyPrevotes prevotes, NFaultyPrecommits precommits by the faulty processes + *) +InitFewFaults == + /\ round = [p \in Corr |-> 0] + /\ step = [p \in Corr |-> "PROPOSE"] + /\ decision = [p \in Corr |-> NilValue] + /\ lockedValue = [p \in Corr |-> NilValue] + /\ lockedRound = [p \in Corr |-> NilRound] + /\ validValue = [p \in Corr |-> NilValue] + /\ validRound = [p \in Corr |-> NilRound] + /\ ProduceFaults(msgsPrevote', + [type: {"PREVOTE"}, src: Faulty, round: Rounds, id: Values], + NFaultyPrevotes) + /\ ProduceFaults(msgsPrecommit', + [type: {"PRECOMMIT"}, src: Faulty, round: Rounds, id: Values], + NFaultyPrecommits) + /\ ProduceFaults(msgsPropose', + [type: {"PROPOSAL"}, src: Faulty, round: Rounds, + proposal: Values, validRound: Rounds \cup {NilRound}], + NFaultyProposals) + /\ evidence = EmptyMsgSet + +\* Add faults incrementally +NextWithFaults == + \* either the protocol makes a step + \/ Next + \* or a faulty process sends a message + \//\ UNCHANGED <> + /\ \E p \in Faulty: + \E r \in Rounds: + \//\ UNCHANGED <> + /\ \E proposal \in ValidValues \union {NilValue}: + \E vr \in RoundsOrNil: + BroadcastProposal(p, r, proposal, vr) + \//\ UNCHANGED <> + /\ \E id \in ValidValues \union {NilValue}: + BroadcastPrevote(p, r, id) + \//\ UNCHANGED <> + /\ \E id \in ValidValues \union {NilValue}: + BroadcastPrecommit(p, r, id) + +(******************************** PROPERTIES ***************************************) +\* simple reachability properties to see that the spec is progressing +NoPrevote == \A p \in Corr: step[p] /= "PREVOTE" + +NoPrecommit == \A p \in Corr: step[p] /= "PRECOMMIT" + +NoValidPrecommit == + \A r \in Rounds: + \A m \in msgsPrecommit[r]: + m.id = NilValue \/ m.src \in Faulty + +NoHigherRounds == \A p \in Corr: round[p] < 1 + +NoDecision == \A p \in Corr: decision[p] = NilValue + +============================================================================= + diff --git a/spec/light-client/accountability/TendermintAccInv_004_draft.tla b/spec/light-client/accountability/TendermintAccInv_004_draft.tla new file mode 100644 index 0000000000..2eeec1fb24 --- /dev/null +++ b/spec/light-client/accountability/TendermintAccInv_004_draft.tla @@ -0,0 +1,376 @@ +------------------- MODULE TendermintAccInv_004_draft -------------------------- +(* + An inductive invariant for TendermintAcc3, which capture the forked + and non-forked cases. + + * Version 3. Modular and parameterized definitions. + * Version 2. Bugfixes in the spec and an inductive invariant. + + Igor Konnov, 2020. + *) + +EXTENDS TendermintAcc_004_draft + +(************************** TYPE INVARIANT ***********************************) +(* first, we define the sets of all potential messages *) +\* @type: Set(PROPMESSAGE); +AllProposals == + [type: {"PROPOSAL"}, + src: AllProcs, + round: Rounds, + proposal: ValuesOrNil, + validRound: RoundsOrNil] + +\* @type: Set(PREMESSAGE); +AllPrevotes == + [type: {"PREVOTE"}, + src: AllProcs, + round: Rounds, + id: ValuesOrNil] + +\* @type: Set(PREMESSAGE); +AllPrecommits == + [type: {"PRECOMMIT"}, + src: AllProcs, + round: Rounds, + id: ValuesOrNil] + +(* the standard type invariant -- importantly, it is inductive *) +TypeOK == + /\ round \in [Corr -> Rounds] + /\ step \in [Corr -> { "PROPOSE", "PREVOTE", "PRECOMMIT", "DECIDED" }] + /\ decision \in [Corr -> ValidValues \union {NilValue}] + /\ lockedValue \in [Corr -> ValidValues \union {NilValue}] + /\ lockedRound \in [Corr -> RoundsOrNil] + /\ validValue \in [Corr -> ValidValues \union {NilValue}] + /\ validRound \in [Corr -> RoundsOrNil] + /\ msgsPropose \in [Rounds -> SUBSET AllProposals] + /\ BenignRoundsInMessages(msgsPropose) + /\ msgsPrevote \in [Rounds -> SUBSET AllPrevotes] + /\ BenignRoundsInMessages(msgsPrevote) + /\ msgsPrecommit \in [Rounds -> SUBSET AllPrecommits] + /\ BenignRoundsInMessages(msgsPrecommit) + /\ evidence \in SUBSET (AllProposals \union AllPrevotes \union AllPrecommits) + /\ action \in { + "Init", + "InsertProposal", + "UponProposalInPropose", + "UponProposalInProposeAndPrevote", + "UponQuorumOfPrevotesAny", + "UponProposalInPrevoteOrCommitAndPrevote", + "UponQuorumOfPrecommitsAny", + "UponProposalInPrecommitNoDecision", + "OnTimeoutPropose", + "OnQuorumOfNilPrevotes", + "OnRoundCatchup" + } + +(************************** INDUCTIVE INVARIANT *******************************) +EvidenceContainsMessages == + \* evidence contains only the messages from: + \* msgsPropose, msgsPrevote, and msgsPrecommit + \A m \in evidence: + LET r == m.round + t == m.type + IN + CASE t = "PROPOSAL" -> m \in msgsPropose[r] + [] t = "PREVOTE" -> m \in msgsPrevote[r] + [] OTHER -> m \in msgsPrecommit[r] + +NoFutureMessagesForLargerRounds(p) == + \* a correct process does not send messages for the future rounds + \A r \in { rr \in Rounds: rr > round[p] }: + /\ \A m \in msgsPropose[r]: m.src /= p + /\ \A m \in msgsPrevote[r]: m.src /= p + /\ \A m \in msgsPrecommit[r]: m.src /= p + +NoFutureMessagesForCurrentRound(p) == + \* a correct process does not send messages in the future + LET r == round[p] IN + /\ Proposer[r] = p \/ \A m \in msgsPropose[r]: m.src /= p + /\ \/ step[p] \in {"PREVOTE", "PRECOMMIT", "DECIDED"} + \/ \A m \in msgsPrevote[r]: m.src /= p + /\ \/ step[p] \in {"PRECOMMIT", "DECIDED"} + \/ \A m \in msgsPrecommit[r]: m.src /= p + +\* the correct processes never send future messages +AllNoFutureMessagesSent == + \A p \in Corr: + /\ NoFutureMessagesForCurrentRound(p) + /\ NoFutureMessagesForLargerRounds(p) + +\* a correct process in the PREVOTE state has sent a PREVOTE message +IfInPrevoteThenSentPrevote(p) == + step[p] = "PREVOTE" => + \E m \in msgsPrevote[round[p]]: + /\ m.id \in ValidValues \cup { NilValue } + /\ m.src = p + +AllIfInPrevoteThenSentPrevote == + \A p \in Corr: IfInPrevoteThenSentPrevote(p) + +\* a correct process in the PRECOMMIT state has sent a PRECOMMIT message +IfInPrecommitThenSentPrecommit(p) == + step[p] = "PRECOMMIT" => + \E m \in msgsPrecommit[round[p]]: + /\ m.id \in ValidValues \cup { NilValue } + /\ m.src = p + +AllIfInPrecommitThenSentPrecommit == + \A p \in Corr: IfInPrecommitThenSentPrecommit(p) + +\* a process in the PRECOMMIT state has sent a PRECOMMIT message +IfInDecidedThenValidDecision(p) == + step[p] = "DECIDED" <=> decision[p] \in ValidValues + +AllIfInDecidedThenValidDecision == + \A p \in Corr: IfInDecidedThenValidDecision(p) + +\* a decided process should have received a proposal on its decision +IfInDecidedThenReceivedProposal(p) == + step[p] = "DECIDED" => + \E r \in Rounds: \* r is not necessarily round[p] + /\ \E m \in msgsPropose[r] \intersect evidence: + /\ m.src = Proposer[r] + /\ m.proposal = decision[p] + \* not inductive: /\ m.src \in Corr => (m.validRound <= r) + +AllIfInDecidedThenReceivedProposal == + \A p \in Corr: + IfInDecidedThenReceivedProposal(p) + +\* a decided process has received two-thirds of precommit messages +IfInDecidedThenReceivedTwoThirds(p) == + step[p] = "DECIDED" => + \E r \in Rounds: + LET PV == + { m \in msgsPrecommit[r] \intersect evidence: m.id = decision[p] } + IN + Cardinality(PV) >= THRESHOLD2 + +AllIfInDecidedThenReceivedTwoThirds == + \A p \in Corr: + IfInDecidedThenReceivedTwoThirds(p) + +\* for a round r, there is proposal by the round proposer for a valid round vr +ProposalInRound(r, proposedVal, vr) == + \E m \in msgsPropose[r]: + /\ m.src = Proposer[r] + /\ m.proposal = proposedVal + /\ m.validRound = vr + +TwoThirdsPrevotes(vr, v) == + LET PV == { mm \in msgsPrevote[vr] \intersect evidence: mm.id = v } IN + Cardinality(PV) >= THRESHOLD2 + +\* if a process sends a PREVOTE, then there are three possibilities: +\* 1) the process is faulty, 2) the PREVOTE cotains Nil, +\* 3) there is a proposal in an earlier (valid) round and two thirds of PREVOTES +IfSentPrevoteThenReceivedProposalOrTwoThirds(r) == + \A mpv \in msgsPrevote[r]: + \/ mpv.src \in Faulty + \* lockedRound and lockedValue is beyond my comprehension + \/ mpv.id = NilValue + \//\ mpv.src \in Corr + /\ mpv.id /= NilValue + /\ \/ ProposalInRound(r, mpv.id, NilRound) + \/ \E vr \in { rr \in Rounds: rr < r }: + /\ ProposalInRound(r, mpv.id, vr) + /\ TwoThirdsPrevotes(vr, mpv.id) + +AllIfSentPrevoteThenReceivedProposalOrTwoThirds == + \A r \in Rounds: + IfSentPrevoteThenReceivedProposalOrTwoThirds(r) + +\* if a correct process has sent a PRECOMMIT, then there are two thirds, +\* either on a valid value, or a nil value +IfSentPrecommitThenReceivedTwoThirds == + \A r \in Rounds: + \A mpc \in msgsPrecommit[r]: + mpc.src \in Corr => + \/ /\ mpc.id \in ValidValues + /\ LET PV == + { m \in msgsPrevote[r] \intersect evidence: m.id = mpc.id } + IN + Cardinality(PV) >= THRESHOLD2 + \/ /\ mpc.id = NilValue + /\ Cardinality(msgsPrevote[r]) >= THRESHOLD2 + +\* if a correct process has sent a precommit message in a round, it should +\* have sent a prevote +IfSentPrecommitThenSentPrevote == + \A r \in Rounds: + \A mpc \in msgsPrecommit[r]: + mpc.src \in Corr => + \E m \in msgsPrevote[r]: + m.src = mpc.src + +\* there is a locked round if a only if there is a locked value +LockedRoundIffLockedValue(p) == + (lockedRound[p] = NilRound) <=> (lockedValue[p] = NilValue) + +AllLockedRoundIffLockedValue == + \A p \in Corr: + LockedRoundIffLockedValue(p) + +\* when a process locked a round, it must have sent a precommit on the locked value. +IfLockedRoundThenSentCommit(p) == + lockedRound[p] /= NilRound + => \E r \in { rr \in Rounds: rr <= round[p] }: + \E m \in msgsPrecommit[r]: + m.src = p /\ m.id = lockedValue[p] + +AllIfLockedRoundThenSentCommit == + \A p \in Corr: + IfLockedRoundThenSentCommit(p) + +\* a process always locks the latest round, for which it has sent a PRECOMMIT +LatestPrecommitHasLockedRound(p) == + LET pPrecommits == + {mm \in UNION { msgsPrecommit[r]: r \in Rounds }: mm.src = p /\ mm.id /= NilValue } + IN + pPrecommits /= {} + => LET latest == + CHOOSE m \in pPrecommits: + \A m2 \in pPrecommits: + m2.round <= m.round + IN + /\ lockedRound[p] = latest.round + /\ lockedValue[p] = latest.id + +AllLatestPrecommitHasLockedRound == + \A p \in Corr: + LatestPrecommitHasLockedRound(p) + +\* Every correct process sends only one value or NilValue. +\* This test has quantifier alternation -- a threat to all decision procedures. +\* Luckily, the sets Corr and ValidValues are small. +\* @type: (ROUND, ROUND -> Set(PREMESSAGE)) => Bool; +NoEquivocationByCorrect(r, msgs) == + \A p \in Corr: + \E v \in ValidValues \union {NilValue}: + \A m \in msgs[r]: + \/ m.src /= p + \/ m.id = v + +\* a proposer nevers sends two values +\* @type: (ROUND, ROUND -> Set(PROPMESSAGE)) => Bool; +ProposalsByProposer(r, msgs) == + \* if the proposer is not faulty, it sends only one value + \E v \in ValidValues: + \A m \in msgs[r]: + \/ m.src \in Faulty + \/ m.src = Proposer[r] /\ m.proposal = v + +AllNoEquivocationByCorrect == + \A r \in Rounds: + /\ ProposalsByProposer(r, msgsPropose) + /\ NoEquivocationByCorrect(r, msgsPrevote) + /\ NoEquivocationByCorrect(r, msgsPrecommit) + +\* construct the set of the message senders +\* @type: (Set(MESSAGE)) => Set(PROCESS); +Senders(M) == { m.src: m \in M } + +\* The final piece by Josef Widder: +\* if T + 1 processes precommit on the same value in a round, +\* then in the future rounds there are less than 2T + 1 prevotes for another value +PrecommitsLockValue == + \A r \in Rounds: + \A v \in ValidValues \union {NilValue}: + \/ LET Precommits == {m \in msgsPrecommit[r]: m.id = v} + IN + Cardinality(Senders(Precommits)) < THRESHOLD1 + \/ \A fr \in { rr \in Rounds: rr > r }: \* future rounds + \A w \in (ValuesOrNil) \ {v}: + LET Prevotes == {m \in msgsPrevote[fr]: m.id = w} + IN + Cardinality(Senders(Prevotes)) < THRESHOLD2 + +\* a combination of all lemmas +Inv == + /\ EvidenceContainsMessages + /\ AllNoFutureMessagesSent + /\ AllIfInPrevoteThenSentPrevote + /\ AllIfInPrecommitThenSentPrecommit + /\ AllIfInDecidedThenReceivedProposal + /\ AllIfInDecidedThenReceivedTwoThirds + /\ AllIfInDecidedThenValidDecision + /\ AllLockedRoundIffLockedValue + /\ AllIfLockedRoundThenSentCommit + /\ AllLatestPrecommitHasLockedRound + /\ AllIfSentPrevoteThenReceivedProposalOrTwoThirds + /\ IfSentPrecommitThenSentPrevote + /\ IfSentPrecommitThenReceivedTwoThirds + /\ AllNoEquivocationByCorrect + /\ PrecommitsLockValue + +\* this is the inductive invariant we like to check +TypedInv == TypeOK /\ Inv + +\* UNUSED FOR SAFETY +ValidRoundNotSmallerThanLockedRound(p) == + validRound[p] >= lockedRound[p] + +\* UNUSED FOR SAFETY +ValidRoundIffValidValue(p) == + (validRound[p] = NilRound) <=> (validValue[p] = NilValue) + +\* UNUSED FOR SAFETY +AllValidRoundIffValidValue == + \A p \in Corr: ValidRoundIffValidValue(p) + +\* if validRound is defined, then there are two-thirds of PREVOTEs +IfValidRoundThenTwoThirds(p) == + \/ validRound[p] = NilRound + \/ LET PV == { m \in msgsPrevote[validRound[p]]: m.id = validValue[p] } IN + Cardinality(PV) >= THRESHOLD2 + +\* UNUSED FOR SAFETY +AllIfValidRoundThenTwoThirds == + \A p \in Corr: IfValidRoundThenTwoThirds(p) + +\* a valid round can be only set to a valid value that was proposed earlier +IfValidRoundThenProposal(p) == + \/ validRound[p] = NilRound + \/ \E m \in msgsPropose[validRound[p]]: + m.proposal = validValue[p] + +\* UNUSED FOR SAFETY +AllIfValidRoundThenProposal == + \A p \in Corr: IfValidRoundThenProposal(p) + +(******************************** THEOREMS ***************************************) +(* Under this condition, the faulty processes can decide alone *) +FaultyQuorum == Cardinality(Faulty) >= THRESHOLD2 + +(* The standard condition of the Tendermint security model *) +LessThanThirdFaulty == N > 3 * T /\ Cardinality(Faulty) <= T + +(* + TypedInv is an inductive invariant, provided that there is no faulty quorum. + We run Apalache to prove this theorem only for fixed instances of 4 to 10 processes. + (We run Apalache manually, as it does not parse theorem statements at the moment.) + To get a parameterized argument, one has to use a theorem prover, e.g., TLAPS. + *) +THEOREM TypedInvIsInductive == + \/ FaultyQuorum \* if there are 2 * T + 1 faulty processes, we give up + \//\ Init => TypedInv + /\ TypedInv /\ [Next]_vars => TypedInv' + +(* + There should be no fork, when there are less than 1/3 faulty processes. + *) +THEOREM AgreementWhenLessThanThirdFaulty == + LessThanThirdFaulty /\ TypedInv => Agreement + +(* + In a more general case, when there are less than 2/3 faulty processes, + there is either Agreement (no fork), or two scenarios exist: + equivocation by Faulty, or amnesia by Faulty. + *) +THEOREM AgreementOrFork == + ~FaultyQuorum /\ TypedInv => Accountability + +============================================================================= + diff --git a/spec/light-client/accountability/TendermintAccTrace_004_draft.tla b/spec/light-client/accountability/TendermintAccTrace_004_draft.tla new file mode 100644 index 0000000000..bbc708063a --- /dev/null +++ b/spec/light-client/accountability/TendermintAccTrace_004_draft.tla @@ -0,0 +1,37 @@ +------------------ MODULE TendermintAccTrace_004_draft ------------------------- +(* + When Apalache is running too slow and we have an idea of a counterexample, + we use this module to restrict the behaviors only to certain actions. + Once the whole trace is replayed, the system deadlocks. + + Version 1. + + Igor Konnov, 2020. + *) + +EXTENDS Sequences, Apalache, TendermintAcc_004_draft + +\* a sequence of action names that should appear in the given order, +\* excluding "Init" +CONSTANT + \* @type: TRACE; + Trace + +VARIABLE + \* @type: TRACE; + toReplay + +TraceInit == + /\ toReplay = Trace + /\ action' := "Init" + /\ Init + +TraceNext == + /\ Len(toReplay) > 0 + /\ toReplay' = Tail(toReplay) + \* Here is the trick. We restrict the action to the expected one, + \* so the other actions will be pruned + /\ action' := Head(toReplay) + /\ Next + +================================================================================ diff --git a/spec/light-client/accountability/TendermintAcc_004_draft.tla b/spec/light-client/accountability/TendermintAcc_004_draft.tla new file mode 100644 index 0000000000..c542f4641b --- /dev/null +++ b/spec/light-client/accountability/TendermintAcc_004_draft.tla @@ -0,0 +1,596 @@ +-------------------- MODULE TendermintAcc_004_draft --------------------------- +(* + A TLA+ specification of a simplified Tendermint consensus, tuned for + fork accountability. The simplifications are as follows: + + - the protocol runs for one height, that is, it is one-shot consensus + + - this specification focuses on safety, so timeouts are modelled + with non-determinism + + - the proposer function is non-determinstic, no fairness is assumed + + - the messages by the faulty processes are injected right in the initial states + + - every process has the voting power of 1 + + - hashes are modelled as identity + + Having the above assumptions in mind, the specification follows the pseudo-code + of the Tendermint paper: https://arxiv.org/abs/1807.04938 + + Byzantine processes can demonstrate arbitrary behavior, including + no communication. We show that if agreement is violated, then the Byzantine + processes demonstrate one of the two behaviours: + + - Equivocation: a Byzantine process may send two different values + in the same round. + + - Amnesia: a Byzantine process may lock a value without unlocking + the previous value that it has locked in the past. + + * Version 4. Remove defective processes, fix bugs, collect global evidence. + * Version 3. Modular and parameterized definitions. + * Version 2. Bugfixes in the spec and an inductive invariant. + * Version 1. A preliminary specification. + + Zarko Milosevic, Igor Konnov, Informal Systems, 2019-2020. + *) + +EXTENDS Integers, FiniteSets, typedefs + +(********************* PROTOCOL PARAMETERS **********************************) +CONSTANTS + \* @type: Set(PROCESS); + Corr, \* the set of correct processes + \* @type: Set(PROCESS); + Faulty, \* the set of Byzantine processes, may be empty + \* @type: Int; + N, \* the total number of processes: correct, defective, and Byzantine + \* @type: Int; + T, \* an upper bound on the number of Byzantine processes + \* @type: Set(VALUE); + ValidValues, \* the set of valid values, proposed both by correct and faulty + \* @type: Set(VALUE); + InvalidValues, \* the set of invalid values, never proposed by the correct ones + \* @type: ROUND; + MaxRound, \* the maximal round number + \* @type: ROUND -> PROCESS; + Proposer \* the proposer function from Rounds to AllProcs + +ASSUME(N = Cardinality(Corr \union Faulty)) + +(*************************** DEFINITIONS ************************************) +AllProcs == Corr \union Faulty \* the set of all processes +\* @type: Set(ROUND); +Rounds == 0..MaxRound \* the set of potential rounds +\* @type: ROUND; +NilRound == -1 \* a special value to denote a nil round, outside of Rounds +RoundsOrNil == Rounds \union {NilRound} +Values == ValidValues \union InvalidValues \* the set of all values +\* @type: VALUE; +NilValue == "None" \* a special value for a nil round, outside of Values +ValuesOrNil == Values \union {NilValue} + +\* a value hash is modeled as identity +\* @type: (t) => t; +Id(v) == v + +\* The validity predicate +IsValid(v) == v \in ValidValues + +\* the two thresholds that are used in the algorithm +THRESHOLD1 == T + 1 \* at least one process is not faulty +THRESHOLD2 == 2 * T + 1 \* a quorum when having N > 3 * T + +(********************* TYPE ANNOTATIONS FOR APALACHE **************************) + +\* An empty set of messages +\* @type: Set(MESSAGE); +EmptyMsgSet == {} + +(********************* PROTOCOL STATE VARIABLES ******************************) +VARIABLES + \* @type: PROCESS -> ROUND; + round, \* a process round number: Corr -> Rounds + \* @type: PROCESS -> STEP; + step, \* a process step: Corr -> { "PROPOSE", "PREVOTE", "PRECOMMIT", "DECIDED" } + \* @type: PROCESS -> VALUE; + decision, \* process decision: Corr -> ValuesOrNil + \* @type: PROCESS -> VALUE; + lockedValue, \* a locked value: Corr -> ValuesOrNil + \* @type: PROCESS -> ROUND; + lockedRound, \* a locked round: Corr -> RoundsOrNil + \* @type: PROCESS -> VALUE; + validValue, \* a valid value: Corr -> ValuesOrNil + \* @type: PROCESS -> ROUND; + validRound \* a valid round: Corr -> RoundsOrNil + +\* book-keeping variables +VARIABLES + \* @type: ROUND -> Set(PROPMESSAGE); + msgsPropose, \* PROPOSE messages broadcast in the system, Rounds -> Messages + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrevote, \* PREVOTE messages broadcast in the system, Rounds -> Messages + \* @type: ROUND -> Set(PREMESSAGE); + msgsPrecommit, \* PRECOMMIT messages broadcast in the system, Rounds -> Messages + \* @type: Set(MESSAGE); + evidence, \* the messages that were used by the correct processes to make transitions + \* @type: ACTION; + action \* we use this variable to see which action was taken + +(* to see a type invariant, check TendermintAccInv3 *) + +\* a handy definition used in UNCHANGED +vars == <> + +(********************* PROTOCOL INITIALIZATION ******************************) +\* @type: (ROUND) => Set(PROPMESSAGE); +FaultyProposals(r) == + [ + type : {"PROPOSAL"}, + src : Faulty, + round : {r}, + proposal : Values, + validRound: RoundsOrNil + ] + +\* @type: Set(PROPMESSAGE); +AllFaultyProposals == + [ + type : {"PROPOSAL"}, + src : Faulty, + round : Rounds, + proposal : Values, + validRound: RoundsOrNil + ] + +\* @type: (ROUND) => Set(PREMESSAGE); +FaultyPrevotes(r) == + [ + type : {"PREVOTE"}, + src : Faulty, + round: {r}, + id : Values + ] + +\* @type: Set(PREMESSAGE); +AllFaultyPrevotes == + [ + type : {"PREVOTE"}, + src : Faulty, + round: Rounds, + id : Values + ] + +\* @type: (ROUND) => Set(PREMESSAGE); +FaultyPrecommits(r) == + [ + type : {"PRECOMMIT"}, + src : Faulty, + round: {r}, + id : Values + ] + +\* @type: Set(PREMESSAGE); +AllFaultyPrecommits == + [ + type : {"PRECOMMIT"}, + src : Faulty, + round: Rounds, + id : Values + ] + +\* @type: (ROUND -> Set(MESSAGE)) => Bool; +BenignRoundsInMessages(msgfun) == + \* the message function never contains a message for a wrong round + \A r \in Rounds: + \A m \in msgfun[r]: + r = m.round + +\* The initial states of the protocol. Some faults can be in the system already. +Init == + /\ round = [p \in Corr |-> 0] + /\ step = [p \in Corr |-> "PROPOSE"] + /\ decision = [p \in Corr |-> NilValue] + /\ lockedValue = [p \in Corr |-> NilValue] + /\ lockedRound = [p \in Corr |-> NilRound] + /\ validValue = [p \in Corr |-> NilValue] + /\ validRound = [p \in Corr |-> NilRound] + /\ msgsPropose \in [Rounds -> SUBSET AllFaultyProposals] + /\ msgsPrevote \in [Rounds -> SUBSET AllFaultyPrevotes] + /\ msgsPrecommit \in [Rounds -> SUBSET AllFaultyPrecommits] + /\ BenignRoundsInMessages(msgsPropose) + /\ BenignRoundsInMessages(msgsPrevote) + /\ BenignRoundsInMessages(msgsPrecommit) + /\ evidence = EmptyMsgSet + /\ action = "Init" + +(************************ MESSAGE PASSING ********************************) +\* @type: (PROCESS, ROUND, VALUE, ROUND) => Bool; +BroadcastProposal(pSrc, pRound, pProposal, pValidRound) == + LET + \* @type: PROPMESSAGE; + newMsg == + [ + type |-> "PROPOSAL", + src |-> pSrc, + round |-> pRound, + proposal |-> pProposal, + validRound |-> pValidRound + ] + IN + msgsPropose' = [msgsPropose EXCEPT ![pRound] = msgsPropose[pRound] \union {newMsg}] + +\* @type: (PROCESS, ROUND, VALUE) => Bool; +BroadcastPrevote(pSrc, pRound, pId) == + LET + \* @type: PREMESSAGE; + newMsg == + [ + type |-> "PREVOTE", + src |-> pSrc, + round |-> pRound, + id |-> pId + ] + IN + msgsPrevote' = [msgsPrevote EXCEPT ![pRound] = msgsPrevote[pRound] \union {newMsg}] + +\* @type: (PROCESS, ROUND, VALUE) => Bool; +BroadcastPrecommit(pSrc, pRound, pId) == + LET + \* @type: PREMESSAGE; + newMsg == + [ + type |-> "PRECOMMIT", + src |-> pSrc, + round |-> pRound, + id |-> pId + ] + IN + msgsPrecommit' = [msgsPrecommit EXCEPT ![pRound] = msgsPrecommit[pRound] \union {newMsg}] + + +(********************* PROTOCOL TRANSITIONS ******************************) +\* lines 12-13 +StartRound(p, r) == + /\ step[p] /= "DECIDED" \* a decided process does not participate in consensus + /\ round' = [round EXCEPT ![p] = r] + /\ step' = [step EXCEPT ![p] = "PROPOSE"] + +\* lines 14-19, a proposal may be sent later +\* @type: (PROCESS) => Bool; +InsertProposal(p) == + LET r == round[p] IN + /\ p = Proposer[r] + /\ step[p] = "PROPOSE" + \* if the proposer is sending a proposal, then there are no other proposals + \* by the correct processes for the same round + /\ \A m \in msgsPropose[r]: m.src /= p + /\ \E v \in ValidValues: + LET + \* @type: VALUE; + proposal == + IF validValue[p] /= NilValue + THEN validValue[p] + ELSE v + IN BroadcastProposal(p, round[p], proposal, validRound[p]) + /\ UNCHANGED <> + /\ action' = "InsertProposal" + +\* lines 22-27 +UponProposalInPropose(p) == + \E v \in Values: + /\ step[p] = "PROPOSE" (* line 22 *) + /\ LET + \* @type: PROPMESSAGE; + msg == + [ + type |-> "PROPOSAL", + src |-> Proposer[round[p]], + round |-> round[p], + proposal |-> v, + validRound |-> NilRound + ] + IN + /\ msg \in msgsPropose[round[p]] \* line 22 + /\ evidence' = {msg} \union evidence + /\ LET mid == (* line 23 *) + IF IsValid(v) /\ (lockedRound[p] = NilRound \/ lockedValue[p] = v) + THEN Id(v) + ELSE NilValue + IN + BroadcastPrevote(p, round[p], mid) \* lines 24-26 + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ action' = "UponProposalInPropose" + +\* lines 28-33 +UponProposalInProposeAndPrevote(p) == + \E v \in Values, vr \in Rounds: + /\ step[p] = "PROPOSE" /\ 0 <= vr /\ vr < round[p] \* line 28, the while part + /\ LET + \* @type: PROPMESSAGE; + msg == + [ + type |-> "PROPOSAL", + src |-> Proposer[round[p]], + round |-> round[p], + proposal |-> v, + validRound |-> vr + ] + IN + /\ msg \in msgsPropose[round[p]] \* line 28 + /\ LET PV == { m \in msgsPrevote[vr]: m.id = Id(v) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 28 + /\ evidence' = PV \union {msg} \union evidence + /\ LET mid == (* line 29 *) + IF IsValid(v) /\ (lockedRound[p] <= vr \/ lockedValue[p] = v) + THEN Id(v) + ELSE NilValue + IN + BroadcastPrevote(p, round[p], mid) \* lines 24-26 + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ action' = "UponProposalInProposeAndPrevote" + + \* lines 34-35 + lines 61-64 (onTimeoutPrevote) +UponQuorumOfPrevotesAny(p) == + /\ step[p] = "PREVOTE" \* line 34 and 61 + /\ \E MyEvidence \in SUBSET msgsPrevote[round[p]]: + \* find the unique voters in the evidence + LET Voters == { m.src: m \in MyEvidence } IN + \* compare the number of the unique voters against the threshold + /\ Cardinality(Voters) >= THRESHOLD2 \* line 34 + /\ evidence' = MyEvidence \union evidence + /\ BroadcastPrecommit(p, round[p], NilValue) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + /\ UNCHANGED <> + /\ action' = "UponQuorumOfPrevotesAny" + +\* lines 36-46 +UponProposalInPrevoteOrCommitAndPrevote(p) == + \E v \in ValidValues, vr \in RoundsOrNil: + /\ step[p] \in {"PREVOTE", "PRECOMMIT"} \* line 36 + /\ LET + \* @type: PROPMESSAGE; + msg == + [ + type |-> "PROPOSAL", + src |-> Proposer[round[p]], + round |-> round[p], + proposal |-> v, + validRound |-> vr + ] + IN + /\ msg \in msgsPropose[round[p]] \* line 36 + /\ LET PV == { m \in msgsPrevote[round[p]]: m.id = Id(v) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 36 + /\ evidence' = PV \union {msg} \union evidence + /\ IF step[p] = "PREVOTE" + THEN \* lines 38-41: + /\ lockedValue' = [lockedValue EXCEPT ![p] = v] + /\ lockedRound' = [lockedRound EXCEPT ![p] = round[p]] + /\ BroadcastPrecommit(p, round[p], Id(v)) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + ELSE + UNCHANGED <> + \* lines 42-43 + /\ validValue' = [validValue EXCEPT ![p] = v] + /\ validRound' = [validRound EXCEPT ![p] = round[p]] + /\ UNCHANGED <> + /\ action' = "UponProposalInPrevoteOrCommitAndPrevote" + +\* lines 47-48 + 65-67 (onTimeoutPrecommit) +UponQuorumOfPrecommitsAny(p) == + /\ \E MyEvidence \in SUBSET msgsPrecommit[round[p]]: + \* find the unique committers in the evidence + LET Committers == { m.src: m \in MyEvidence } IN + \* compare the number of the unique committers against the threshold + /\ Cardinality(Committers) >= THRESHOLD2 \* line 47 + /\ evidence' = MyEvidence \union evidence + /\ round[p] + 1 \in Rounds + /\ StartRound(p, round[p] + 1) + /\ UNCHANGED <> + /\ action' = "UponQuorumOfPrecommitsAny" + +\* lines 49-54 +UponProposalInPrecommitNoDecision(p) == + /\ decision[p] = NilValue \* line 49 + /\ \E v \in ValidValues (* line 50*) , r \in Rounds, vr \in RoundsOrNil: + /\ LET + \* @type: PROPMESSAGE; + msg == + [ + type |-> "PROPOSAL", + src |-> Proposer[r], + round |-> r, + proposal |-> v, + validRound |-> vr + ] + IN + /\ msg \in msgsPropose[r] \* line 49 + /\ LET PV == { m \in msgsPrecommit[r]: m.id = Id(v) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 49 + /\ evidence' = PV \union {msg} \union evidence + /\ decision' = [decision EXCEPT ![p] = v] \* update the decision, line 51 + \* The original algorithm does not have 'DECIDED', but it increments the height. + \* We introduced 'DECIDED' here to prevent the process from changing its decision. + /\ step' = [step EXCEPT ![p] = "DECIDED"] + /\ UNCHANGED <> + /\ action' = "UponProposalInPrecommitNoDecision" + +\* the actions below are not essential for safety, but added for completeness + +\* lines 20-21 + 57-60 +OnTimeoutPropose(p) == + /\ step[p] = "PROPOSE" + /\ p /= Proposer[round[p]] + /\ BroadcastPrevote(p, round[p], NilValue) + /\ step' = [step EXCEPT ![p] = "PREVOTE"] + /\ UNCHANGED <> + /\ action' = "OnTimeoutPropose" + +\* lines 44-46 +OnQuorumOfNilPrevotes(p) == + /\ step[p] = "PREVOTE" + /\ LET PV == { m \in msgsPrevote[round[p]]: m.id = Id(NilValue) } IN + /\ Cardinality(PV) >= THRESHOLD2 \* line 36 + /\ evidence' = PV \union evidence + /\ BroadcastPrecommit(p, round[p], Id(NilValue)) + /\ step' = [step EXCEPT ![p] = "PRECOMMIT"] + /\ UNCHANGED <> + /\ action' = "OnQuorumOfNilPrevotes" + +\* lines 55-56 +OnRoundCatchup(p) == + \E r \in {rr \in Rounds: rr > round[p]}: + LET RoundMsgs == msgsPropose[r] \union msgsPrevote[r] \union msgsPrecommit[r] IN + \E MyEvidence \in SUBSET RoundMsgs: + LET Faster == { m.src: m \in MyEvidence } IN + /\ Cardinality(Faster) >= THRESHOLD1 + /\ evidence' = MyEvidence \union evidence + /\ StartRound(p, r) + /\ UNCHANGED <> + /\ action' = "OnRoundCatchup" + +(* + * A system transition. In this specificatiom, the system may eventually deadlock, + * e.g., when all processes decide. This is expected behavior, as we focus on safety. + *) +Next == + \E p \in Corr: + \/ InsertProposal(p) + \/ UponProposalInPropose(p) + \/ UponProposalInProposeAndPrevote(p) + \/ UponQuorumOfPrevotesAny(p) + \/ UponProposalInPrevoteOrCommitAndPrevote(p) + \/ UponQuorumOfPrecommitsAny(p) + \/ UponProposalInPrecommitNoDecision(p) + \* the actions below are not essential for safety, but added for completeness + \/ OnTimeoutPropose(p) + \/ OnQuorumOfNilPrevotes(p) + \/ OnRoundCatchup(p) + + +(**************************** FORK SCENARIOS ***************************) + +\* equivocation by a process p +EquivocationBy(p) == + \E m1, m2 \in evidence: + /\ m1 /= m2 + /\ m1.src = p + /\ m2.src = p + /\ m1.round = m2.round + /\ m1.type = m2.type + +\* amnesic behavior by a process p +AmnesiaBy(p) == + \E r1, r2 \in Rounds: + /\ r1 < r2 + /\ \E v1, v2 \in ValidValues: + /\ v1 /= v2 + /\ [ + type |-> "PRECOMMIT", + src |-> p, + round |-> r1, + id |-> Id(v1) + ] \in evidence + /\ [ + type |-> "PREVOTE", + src |-> p, + round |-> r2, + id |-> Id(v2) + ] \in evidence + /\ \A r \in { rnd \in Rounds: r1 <= rnd /\ rnd < r2 }: + LET prevotes == + { m \in evidence: + m.type = "PREVOTE" /\ m.round = r /\ m.id = Id(v2) } + IN + Cardinality(prevotes) < THRESHOLD2 + +(******************************** PROPERTIES ***************************************) + +\* the safety property -- agreement +Agreement == + \A p, q \in Corr: + \/ decision[p] = NilValue + \/ decision[q] = NilValue + \/ decision[p] = decision[q] + +\* the protocol validity +Validity == + \A p \in Corr: decision[p] \in ValidValues \union {NilValue} + +(* + The protocol safety. Two cases are possible: + 1. There is no fork, that is, Agreement holds true. + 2. A subset of faulty processes demonstrates equivocation or amnesia. + *) +Accountability == + \/ Agreement + \/ \E Detectable \in SUBSET Faulty: + /\ Cardinality(Detectable) >= THRESHOLD1 + /\ \A p \in Detectable: + EquivocationBy(p) \/ AmnesiaBy(p) + +(****************** FALSE INVARIANTS TO PRODUCE EXAMPLES ***********************) + +\* This property is violated. You can check it to see how amnesic behavior +\* appears in the evidence variable. +NoAmnesia == + \A p \in Faulty: ~AmnesiaBy(p) + +\* This property is violated. You can check it to see an example of equivocation. +NoEquivocation == + \A p \in Faulty: ~EquivocationBy(p) + +\* This property is violated. You can check it to see an example of agreement. +\* It is not exactly ~Agreement, as we do not want to see the states where +\* decision[p] = NilValue +NoAgreement == + \A p, q \in Corr: + (p /= q /\ decision[p] /= NilValue /\ decision[q] /= NilValue) + => decision[p] /= decision[q] + +\* Either agreement holds, or the faulty processes indeed demonstrate amnesia. +\* This property is violated. A counterexample should demonstrate equivocation. +AgreementOrAmnesia == + Agreement \/ (\A p \in Faulty: AmnesiaBy(p)) + +\* We expect this property to be violated. It shows us a protocol run, +\* where one faulty process demonstrates amnesia without equivocation. +\* However, the absence of amnesia +\* is a tough constraint for Apalache. It has not reported a counterexample +\* for n=4,f=2, length <= 5. +ShowMeAmnesiaWithoutEquivocation == + (~Agreement /\ \E p \in Faulty: ~EquivocationBy(p)) + => \A p \in Faulty: ~AmnesiaBy(p) + +\* This property is violated on n=4,f=2, length=4 in less than 10 min. +\* Two faulty processes may demonstrate amnesia without equivocation. +AmnesiaImpliesEquivocation == + (\E p \in Faulty: AmnesiaBy(p)) => (\E q \in Faulty: EquivocationBy(q)) + +(* + This property is violated. You can check it to see that all correct processes + may reach MaxRound without making a decision. + *) +NeverUndecidedInMaxRound == + LET AllInMax == \A p \in Corr: round[p] = MaxRound + AllDecided == \A p \in Corr: decision[p] /= NilValue + IN + AllInMax => AllDecided + +============================================================================= + diff --git a/spec/light-client/accountability/results/001indinv-apalache-mem-log.svg b/spec/light-client/accountability/results/001indinv-apalache-mem-log.svg new file mode 100644 index 0000000000..5821418da4 --- /dev/null +++ b/spec/light-client/accountability/results/001indinv-apalache-mem-log.svg @@ -0,0 +1,1063 @@ + + + + + + + + + 2020-12-11T20:07:39.617177 + image/svg+xml + + + Matplotlib v3.3.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spec/light-client/accountability/results/001indinv-apalache-mem.svg b/spec/light-client/accountability/results/001indinv-apalache-mem.svg new file mode 100644 index 0000000000..dc7213eaed --- /dev/null +++ b/spec/light-client/accountability/results/001indinv-apalache-mem.svg @@ -0,0 +1,1141 @@ + + + + + + + + + 2020-12-11T20:07:40.321995 + image/svg+xml + + + Matplotlib v3.3.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spec/light-client/accountability/results/001indinv-apalache-ncells.svg b/spec/light-client/accountability/results/001indinv-apalache-ncells.svg new file mode 100644 index 0000000000..20c49f4f19 --- /dev/null +++ b/spec/light-client/accountability/results/001indinv-apalache-ncells.svg @@ -0,0 +1,1015 @@ + + + + + + + + + 2020-12-11T20:07:40.804886 + image/svg+xml + + + Matplotlib v3.3.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spec/light-client/accountability/results/001indinv-apalache-nclauses.svg b/spec/light-client/accountability/results/001indinv-apalache-nclauses.svg new file mode 100644 index 0000000000..86d19143bf --- /dev/null +++ b/spec/light-client/accountability/results/001indinv-apalache-nclauses.svg @@ -0,0 +1,1133 @@ + + + + + + + + + 2020-12-11T20:07:41.276750 + image/svg+xml + + + Matplotlib v3.3.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spec/light-client/accountability/results/001indinv-apalache-report.md b/spec/light-client/accountability/results/001indinv-apalache-report.md new file mode 100644 index 0000000000..0c14742c53 --- /dev/null +++ b/spec/light-client/accountability/results/001indinv-apalache-report.md @@ -0,0 +1,61 @@ +# Results of 001indinv-apalache + +## 1. Awesome plots + +### 1.1. Time (logarithmic scale) + +![time-log](001indinv-apalache-time-log.svg "Time Log") + +### 1.2. Time (linear) + +![time-log](001indinv-apalache-time.svg "Time Log") + +### 1.3. Memory (logarithmic scale) + +![mem-log](001indinv-apalache-mem-log.svg "Memory Log") + +### 1.4. Memory (linear) + +![mem](001indinv-apalache-mem.svg "Memory Log") + +### 1.5. Number of arena cells (linear) + +![ncells](001indinv-apalache-ncells.svg "Number of arena cells") + +### 1.6. Number of SMT clauses (linear) + +![nclauses](001indinv-apalache-nclauses.svg "Number of SMT clauses") + +## 2. Input parameters + +no | filename | tool | timeout | init | inv | next | args +----|----------------|------------|-----------|------------|------------------|--------|------------------------------ +1 | MC_n4_f1.tla | apalache | 10h | TypedInv | TypedInv | | --length=1 --cinit=ConstInit +2 | MC_n4_f2.tla | apalache | 10h | TypedInv | TypedInv | | --length=1 --cinit=ConstInit +3 | MC_n5_f1.tla | apalache | 10h | TypedInv | TypedInv | | --length=1 --cinit=ConstInit +4 | MC_n5_f2.tla | apalache | 10h | TypedInv | TypedInv | | --length=1 --cinit=ConstInit +5 | MC_n4_f1.tla | apalache | 20h | Init | TypedInv | | --length=0 --cinit=ConstInit +6 | MC_n4_f2.tla | apalache | 20h | Init | TypedInv | | --length=0 --cinit=ConstInit +7 | MC_n5_f1.tla | apalache | 20h | Init | TypedInv | | --length=0 --cinit=ConstInit +8 | MC_n5_f2.tla | apalache | 20h | Init | TypedInv | | --length=0 --cinit=ConstInit +9 | MC_n4_f1.tla | apalache | 20h | TypedInv | Agreement | | --length=0 --cinit=ConstInit +10 | MC_n4_f2.tla | apalache | 20h | TypedInv | Accountability | | --length=0 --cinit=ConstInit +11 | MC_n5_f1.tla | apalache | 20h | TypedInv | Agreement | | --length=0 --cinit=ConstInit +12 | MC_n5_f2.tla | apalache | 20h | TypedInv | Accountability | | --length=0 --cinit=ConstInit + +## 3. Detailed results: 001indinv-apalache-unstable.csv + +01:no | 02:tool | 03:status | 04:time_sec | 05:depth | 05:mem_kb | 10:ninit_trans | 11:ninit_trans | 12:ncells | 13:nclauses | 14:navg_clause_len +-------|------------|-------------|---------------|------------|-------------|------------------|------------------|-------------|---------------|-------------------- +1 | apalache | NoError | 11m | 1 | 3.0GB | 0 | 0 | 217K | 1.0M | 89 +2 | apalache | NoError | 11m | 1 | 3.0GB | 0 | 0 | 207K | 1.0M | 88 +3 | apalache | NoError | 16m | 1 | 4.0GB | 0 | 0 | 311K | 2.0M | 101 +4 | apalache | NoError | 14m | 1 | 3.0GB | 0 | 0 | 290K | 1.0M | 103 +5 | apalache | NoError | 9s | 0 | 563MB | 0 | 0 | 2.0K | 14K | 42 +6 | apalache | NoError | 10s | 0 | 657MB | 0 | 0 | 2.0K | 28K | 43 +7 | apalache | NoError | 8s | 0 | 635MB | 0 | 0 | 2.0K | 17K | 44 +8 | apalache | NoError | 10s | 0 | 667MB | 0 | 0 | 3.0K | 32K | 45 +9 | apalache | NoError | 5m05s | 0 | 2.0GB | 0 | 0 | 196K | 889K | 108 +10 | apalache | NoError | 8m08s | 0 | 6.0GB | 0 | 0 | 2.0M | 3.0M | 34 +11 | apalache | NoError | 9m09s | 0 | 3.0GB | 0 | 0 | 284K | 1.0M | 128 +12 | apalache | NoError | 14m | 0 | 7.0GB | 0 | 0 | 4.0M | 5.0M | 38 diff --git a/spec/light-client/accountability/results/001indinv-apalache-time-log.svg b/spec/light-client/accountability/results/001indinv-apalache-time-log.svg new file mode 100644 index 0000000000..458d67c6c3 --- /dev/null +++ b/spec/light-client/accountability/results/001indinv-apalache-time-log.svg @@ -0,0 +1,1134 @@ + + + + + + + + + 2020-12-11T20:07:38.347583 + image/svg+xml + + + Matplotlib v3.3.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spec/light-client/accountability/results/001indinv-apalache-time.svg b/spec/light-client/accountability/results/001indinv-apalache-time.svg new file mode 100644 index 0000000000..a5db5a8b59 --- /dev/null +++ b/spec/light-client/accountability/results/001indinv-apalache-time.svg @@ -0,0 +1,957 @@ + + + + + + + + + 2020-12-11T20:07:39.136767 + image/svg+xml + + + Matplotlib v3.3.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/spec/light-client/accountability/results/001indinv-apalache-unstable.csv b/spec/light-client/accountability/results/001indinv-apalache-unstable.csv new file mode 100644 index 0000000000..db1a060938 --- /dev/null +++ b/spec/light-client/accountability/results/001indinv-apalache-unstable.csv @@ -0,0 +1,13 @@ +01:no,02:tool,03:status,04:time_sec,05:depth,05:mem_kb,10:ninit_trans,11:ninit_trans,12:ncells,13:nclauses,14:navg_clause_len +1,apalache,NoError,704,1,3215424,0,0,217385,1305718,89 +2,apalache,NoError,699,1,3195020,0,0,207969,1341979,88 +3,apalache,NoError,1018,1,4277060,0,0,311798,2028544,101 +4,apalache,NoError,889,1,4080012,0,0,290989,1951616,103 +5,apalache,NoError,9,0,577100,0,0,2045,14655,42 +6,apalache,NoError,10,0,673772,0,0,2913,28213,43 +7,apalache,NoError,8,0,651008,0,0,2214,17077,44 +8,apalache,NoError,10,0,683188,0,0,3082,32651,45 +9,apalache,NoError,340,0,3053848,0,0,196943,889859,108 +10,apalache,NoError,517,0,6424536,0,0,2856378,3802779,34 +11,apalache,NoError,587,0,4028516,0,0,284369,1343296,128 +12,apalache,NoError,880,0,7881148,0,0,4382556,5778072,38 diff --git a/spec/light-client/accountability/run.sh b/spec/light-client/accountability/run.sh new file mode 100755 index 0000000000..75e57a5f86 --- /dev/null +++ b/spec/light-client/accountability/run.sh @@ -0,0 +1,9 @@ +#!/bin/sh +# +# The script to run all experiments at once + +export SCRIPTS_DIR=~/devl/apalache-tests/scripts +export BUILDS="unstable" +export BENCHMARK=001indinv-apalache +export RUN_SCRIPT=./run-all.sh # alternatively, use ./run-parallel.sh +make -e -f ~/devl/apalache-tests/Makefile.common diff --git a/spec/light-client/accountability/typedefs.tla b/spec/light-client/accountability/typedefs.tla new file mode 100644 index 0000000000..5b4f7de52c --- /dev/null +++ b/spec/light-client/accountability/typedefs.tla @@ -0,0 +1,36 @@ +-------------------- MODULE typedefs --------------------------- +(* + @typeAlias: PROCESS = Str; + @typeAlias: VALUE = Str; + @typeAlias: STEP = Str; + @typeAlias: ROUND = Int; + @typeAlias: ACTION = Str; + @typeAlias: TRACE = Seq(Str); + @typeAlias: PROPMESSAGE = + [ + type: STEP, + src: PROCESS, + round: ROUND, + proposal: VALUE, + validRound: ROUND + ]; + @typeAlias: PREMESSAGE = + [ + type: STEP, + src: PROCESS, + round: ROUND, + id: VALUE + ]; + @typeAlias: MESSAGE = + [ + type: STEP, + src: PROCESS, + round: ROUND, + proposal: VALUE, + validRound: ROUND, + id: VALUE + ]; +*) +TypeAliases == TRUE + +============================================================================= \ No newline at end of file diff --git a/spec/light-client/assets/light-node-image.png b/spec/light-client/assets/light-node-image.png new file mode 100644 index 0000000000..f0b93c6e41 Binary files /dev/null and b/spec/light-client/assets/light-node-image.png differ diff --git a/spec/light-client/attacks/Blockchain_003_draft.tla b/spec/light-client/attacks/Blockchain_003_draft.tla new file mode 100644 index 0000000000..fb6e6e8e87 --- /dev/null +++ b/spec/light-client/attacks/Blockchain_003_draft.tla @@ -0,0 +1,166 @@ +------------------------ MODULE Blockchain_003_draft ----------------------------- +(* + This is a high-level specification of Tendermint blockchain + that is designed specifically for the light client. + Validators have the voting power of one. If you like to model various + voting powers, introduce multiple copies of the same validator + (do not forget to give them unique names though). + *) +EXTENDS Integers, FiniteSets, Apalache + +Min(a, b) == IF a < b THEN a ELSE b + +CONSTANT + AllNodes, + (* a set of all nodes that can act as validators (correct and faulty) *) + ULTIMATE_HEIGHT, + (* a maximal height that can be ever reached (modelling artifact) *) + TRUSTING_PERIOD + (* the period within which the validators are trusted *) + +Heights == 1..ULTIMATE_HEIGHT (* possible heights *) + +(* A commit is just a set of nodes who have committed the block *) +Commits == SUBSET AllNodes + +(* The set of all block headers that can be on the blockchain. + This is a simplified version of the Block data structure in the actual implementation. *) +BlockHeaders == [ + height: Heights, + \* the block height + time: Int, + \* the block timestamp in some integer units + lastCommit: Commits, + \* the nodes who have voted on the previous block, the set itself instead of a hash + (* in the implementation, only the hashes of V and NextV are stored in a block, + as V and NextV are stored in the application state *) + VS: SUBSET AllNodes, + \* the validators of this bloc. We store the validators instead of the hash. + NextVS: SUBSET AllNodes + \* the validators of the next block. We store the next validators instead of the hash. +] + +(* A signed header is just a header together with a set of commits *) +LightBlocks == [header: BlockHeaders, Commits: Commits] + +VARIABLES + refClock, + (* the current global time in integer units as perceived by the reference chain *) + blockchain, + (* A sequence of BlockHeaders, which gives us a bird view of the blockchain. *) + Faulty + (* A set of faulty nodes, which can act as validators. We assume that the set + of faulty processes is non-decreasing. If a process has recovered, it should + connect using a different id. *) + +(* all variables, to be used with UNCHANGED *) +vars == <> + +(* The set of all correct nodes in a state *) +Corr == AllNodes \ Faulty + +(* APALACHE annotations *) +a <: b == a \* type annotation + +NT == STRING +NodeSet(S) == S <: {NT} +EmptyNodeSet == NodeSet({}) + +BT == [height |-> Int, time |-> Int, lastCommit |-> {NT}, VS |-> {NT}, NextVS |-> {NT}] + +LBT == [header |-> BT, Commits |-> {NT}] +(* end of APALACHE annotations *) + +(****************************** BLOCKCHAIN ************************************) + +(* the header is still within the trusting period *) +InTrustingPeriod(header) == + refClock < header.time + TRUSTING_PERIOD + +(* + Given a function pVotingPower \in D -> Powers for some D \subseteq AllNodes + and pNodes \subseteq D, test whether the set pNodes \subseteq AllNodes has + more than 2/3 of voting power among the nodes in D. + *) +TwoThirds(pVS, pNodes) == + LET TP == Cardinality(pVS) + SP == Cardinality(pVS \intersect pNodes) + IN + 3 * SP > 2 * TP \* when thinking in real numbers, not integers: SP > 2.0 / 3.0 * TP + +(* + Given a set of FaultyNodes, test whether the voting power of the correct nodes in D + is more than 2/3 of the voting power of the faulty nodes in D. + + Parameters: + - pFaultyNodes is a set of nodes that are considered faulty + - pVS is a set of all validators, maybe including Faulty, intersecting with it, etc. + - pMaxFaultRatio is a pair <> that limits the ratio a / b of the faulty + validators from above (exclusive) + *) +FaultyValidatorsFewerThan(pFaultyNodes, pVS, maxRatio) == + LET FN == pFaultyNodes \intersect pVS \* faulty nodes in pNodes + CN == pVS \ pFaultyNodes \* correct nodes in pNodes + CP == Cardinality(CN) \* power of the correct nodes + FP == Cardinality(FN) \* power of the faulty nodes + IN + \* CP + FP = TP is the total voting power + LET TP == CP + FP IN + FP * maxRatio[2] < TP * maxRatio[1] + +(* Can a block be produced by a correct peer, or an authenticated Byzantine peer *) +IsLightBlockAllowedByDigitalSignatures(ht, block) == + \/ block.header = blockchain[ht] \* signed by correct and faulty (maybe) + \/ /\ block.Commits \subseteq Faulty + /\ block.header.height = ht + /\ block.header.time >= 0 \* signed only by faulty + +(* + Initialize the blockchain to the ultimate height right in the initial states. + We pick the faulty validators statically, but that should not affect the light client. + + Parameters: + - pMaxFaultyRatioExclusive is a pair <> that bound the number of + faulty validators in each block by the ratio a / b (exclusive) + *) +InitToHeight(pMaxFaultyRatioExclusive) == + /\ \E Nodes \in SUBSET AllNodes: + Faulty := Nodes \* pick a subset of nodes to be faulty + \* pick the validator sets and last commits + /\ \E vs, lastCommit \in [Heights -> SUBSET AllNodes]: + \E timestamp \in [Heights -> Int]: + \* refClock is at least as early as the timestamp in the last block + /\ \E tm \in Int: + refClock := tm /\ tm >= timestamp[ULTIMATE_HEIGHT] + \* the genesis starts on day 1 + /\ timestamp[1] = 1 + /\ vs[1] = AllNodes + /\ lastCommit[1] = EmptyNodeSet + /\ \A h \in Heights \ {1}: + /\ lastCommit[h] \subseteq vs[h - 1] \* the non-validators cannot commit + /\ TwoThirds(vs[h - 1], lastCommit[h]) \* the commit has >2/3 of validator votes + \* the faulty validators have the power below the threshold + /\ FaultyValidatorsFewerThan(Faulty, vs[h], pMaxFaultyRatioExclusive) + /\ timestamp[h] > timestamp[h - 1] \* the time grows monotonically + /\ timestamp[h] < timestamp[h - 1] + TRUSTING_PERIOD \* but not too fast + \* form the block chain out of validator sets and commits (this makes apalache faster) + /\ blockchain := [h \in Heights |-> + [height |-> h, + time |-> timestamp[h], + VS |-> vs[h], + NextVS |-> IF h < ULTIMATE_HEIGHT THEN vs[h + 1] ELSE AllNodes, + lastCommit |-> lastCommit[h]] + ] \****** + +(********************* BLOCKCHAIN ACTIONS ********************************) +(* + Advance the clock by zero or more time units. + *) +AdvanceTime == + /\ \E tm \in Int: tm >= refClock /\ refClock' = tm + /\ UNCHANGED <> + +============================================================================= +\* Modification History +\* Last modified Wed Jun 10 14:10:54 CEST 2020 by igor +\* Created Fri Oct 11 15:45:11 CEST 2019 by igor diff --git a/spec/light-client/attacks/Isolation_001_draft.tla b/spec/light-client/attacks/Isolation_001_draft.tla new file mode 100644 index 0000000000..7406b89422 --- /dev/null +++ b/spec/light-client/attacks/Isolation_001_draft.tla @@ -0,0 +1,159 @@ +----------------------- MODULE Isolation_001_draft ---------------------------- +(** + * The specification of the attackers isolation at full node, + * when it has received an evidence from the light client. + * We check that the isolation spec produces a set of validators + * that have more than 1/3 of the voting power. + * + * It follows the English specification: + * + * https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/attacks/isolate-attackers_001_draft.md + * + * The assumptions made in this specification: + * + * - the voting power of every validator is 1 + * (add more validators, if you need more validators) + * + * - Tendermint security model is violated + * (there are Byzantine validators who signed a conflicting block) + * + * Igor Konnov, Zarko Milosevic, Josef Widder, Informal Systems, 2020 + *) + + +EXTENDS Integers, FiniteSets, Apalache + +\* algorithm parameters +CONSTANTS + AllNodes, + (* a set of all nodes that can act as validators (correct and faulty) *) + COMMON_HEIGHT, + (* an index of the block header that two peers agree upon *) + CONFLICT_HEIGHT, + (* an index of the block header that two peers disagree upon *) + TRUSTING_PERIOD, + (* the period within which the validators are trusted *) + FAULTY_RATIO + (* a pair <> that limits that ratio of faulty validator in the blockchain + from above (exclusive). Tendermint security model prescribes 1 / 3. *) + +VARIABLES + blockchain, (* the chain at the full node *) + refClock, (* the reference clock at the full node *) + Faulty, (* the set of faulty validators *) + conflictingBlock, (* an evidence that two peers reported conflicting blocks *) + state, (* the state of the attack isolation machine at the full node *) + attackers (* the set of the identified attackers *) + +vars == <> + +\* instantiate the chain at the full node +ULTIMATE_HEIGHT == CONFLICT_HEIGHT + 1 +BC == INSTANCE Blockchain_003_draft + +\* use the light client API +TRUSTING_HEIGHT == COMMON_HEIGHT +TARGET_HEIGHT == CONFLICT_HEIGHT + +LC == INSTANCE LCVerificationApi_003_draft + WITH localClock <- refClock, REAL_CLOCK_DRIFT <- 0, CLOCK_DRIFT <- 0 + +\* old-style type annotations in apalache +a <: b == a + +\* [LCAI-NONVALID-OUTPUT.1::TLA.1] +ViolatesValidity(header1, header2) == + \/ header1.VS /= header2.VS + \/ header1.NextVS /= header2.NextVS + \/ header1.height /= header2.height + \/ header1.time /= header2.time + (* The English specification also checks the fields that we do not have + at this level of abstraction: + - header1.ConsensusHash != header2.ConsensusHash or + - header1.AppHash != header2.AppHash or + - header1.LastResultsHash header2 != ev.LastResultsHash + *) + +Init == + /\ state := "init" + \* Pick an arbitrary blockchain from 1 to COMMON_HEIGHT + 1. + /\ BC!InitToHeight(FAULTY_RATIO) \* initializes blockchain, Faulty, and refClock + /\ attackers := {} <: {STRING} \* attackers are unknown + \* Receive an arbitrary evidence. + \* Instantiate the light block fields one by one, + \* to avoid combinatorial explosion of records. + /\ \E time \in Int: + \E VS, NextVS, lastCommit, Commits \in SUBSET AllNodes: + LET conflicting == + [ Commits |-> Commits, + header |-> + [height |-> CONFLICT_HEIGHT, + time |-> time, + VS |-> VS, + NextVS |-> NextVS, + lastCommit |-> lastCommit] ] + IN + LET refBlock == [ header |-> blockchain[COMMON_HEIGHT], + Commits |-> blockchain[COMMON_HEIGHT + 1].lastCommit ] + IN + /\ "SUCCESS" = LC!ValidAndVerifiedUntimed(refBlock, conflicting) + \* More than third of next validators in the common reference block + \* is faulty. That is a precondition for a fork. + /\ 3 * Cardinality(Faulty \intersect refBlock.header.NextVS) + > Cardinality(refBlock.header.NextVS) + \* correct validators cannot sign an invalid block + /\ ViolatesValidity(conflicting.header, refBlock.header) + => conflicting.Commits \subseteq Faulty + /\ conflictingBlock := conflicting + + +\* This is a specification of isolateMisbehavingProcesses. +\* +\* [LCAI-FUNC-MAIN.1::TLA.1] +Next == + /\ state = "init" + \* Extract the rounds from the reference block and the conflicting block. + \* In this specification, we just pick rounds non-deterministically. + \* The English specification calls RoundOf on the blocks. + /\ \E referenceRound, evidenceRound \in Int: + /\ referenceRound >= 0 /\ evidenceRound >= 0 + /\ LET reference == blockchain[CONFLICT_HEIGHT] + referenceCommit == blockchain[CONFLICT_HEIGHT + 1].lastCommit + evidenceHeader == conflictingBlock.header + evidenceCommit == conflictingBlock.Commits + IN + IF ViolatesValidity(reference, evidenceHeader) + THEN /\ attackers' := blockchain[COMMON_HEIGHT].NextVS \intersect evidenceCommit + /\ state' := "Lunatic" + ELSE IF referenceRound = evidenceRound + THEN /\ attackers' := referenceCommit \intersect evidenceCommit + /\ state' := "Equivocation" + ELSE + \* This property is shown in property + \* Accountability of TendermintAcc3.tla + /\ state' := "Amnesia" + /\ \E Attackers \in SUBSET (Faulty \intersect reference.VS): + /\ 3 * Cardinality(Attackers) > Cardinality(reference.VS) + /\ attackers' := Attackers + /\ blockchain' := blockchain + /\ refClock' := refClock + /\ Faulty' := Faulty + /\ conflictingBlock' := conflictingBlock + +(********************************** INVARIANTS *******************************) + +\* This invariant ensure that the attackers have +\* more than 1/3 of the voting power +\* +\* [LCAI-INV-Output.1::TLA-DETECTION-COMPLETENESS.1] +DetectionCompleteness == + state /= "init" => + 3 * Cardinality(attackers) > Cardinality(blockchain[CONFLICT_HEIGHT].VS) + +\* This invariant ensures that only the faulty validators are detected +\* +\* [LCAI-INV-Output.1::TLA-DETECTION-ACCURACY.1] +DetectionAccuracy == + attackers \subseteq Faulty + +============================================================================== diff --git a/spec/light-client/attacks/LCVerificationApi_003_draft.tla b/spec/light-client/attacks/LCVerificationApi_003_draft.tla new file mode 100644 index 0000000000..909eab92b8 --- /dev/null +++ b/spec/light-client/attacks/LCVerificationApi_003_draft.tla @@ -0,0 +1,192 @@ +-------------------- MODULE LCVerificationApi_003_draft -------------------------- +(** + * The common interface of the light client verification and detection. + *) +EXTENDS Integers, FiniteSets + +\* the parameters of Light Client +CONSTANTS + TRUSTING_PERIOD, + (* the period within which the validators are trusted *) + CLOCK_DRIFT, + (* the assumed precision of the clock *) + REAL_CLOCK_DRIFT, + (* the actual clock drift, which under normal circumstances should not + be larger than CLOCK_DRIFT (otherwise, there will be a bug) *) + FAULTY_RATIO + (* a pair <> that limits that ratio of faulty validator in the blockchain + from above (exclusive). Tendermint security model prescribes 1 / 3. *) + +VARIABLES + localClock (* current time as measured by the light client *) + +(* the header is still within the trusting period *) +InTrustingPeriodLocal(header) == + \* note that the assumption about the drift reduces the period of trust + localClock < header.time + TRUSTING_PERIOD - CLOCK_DRIFT + +(* the header is still within the trusting period, even if the clock can go backwards *) +InTrustingPeriodLocalSurely(header) == + \* note that the assumption about the drift reduces the period of trust + localClock < header.time + TRUSTING_PERIOD - 2 * CLOCK_DRIFT + +(* ensure that the local clock does not drift far away from the global clock *) +IsLocalClockWithinDrift(local, global) == + /\ global - REAL_CLOCK_DRIFT <= local + /\ local <= global + REAL_CLOCK_DRIFT + +(** + * Check that the commits in an untrusted block form 1/3 of the next validators + * in a trusted header. + *) +SignedByOneThirdOfTrusted(trusted, untrusted) == + LET TP == Cardinality(trusted.header.NextVS) + SP == Cardinality(untrusted.Commits \intersect trusted.header.NextVS) + IN + 3 * SP > TP + +(** + The first part of the precondition of ValidAndVerified, which does not take + the current time into account. + + [LCV-FUNC-VALID.1::TLA-PRE-UNTIMED.1] + *) +ValidAndVerifiedPreUntimed(trusted, untrusted) == + LET thdr == trusted.header + uhdr == untrusted.header + IN + /\ thdr.height < uhdr.height + \* the trusted block has been created earlier + /\ thdr.time < uhdr.time + /\ untrusted.Commits \subseteq uhdr.VS + /\ LET TP == Cardinality(uhdr.VS) + SP == Cardinality(untrusted.Commits) + IN + 3 * SP > 2 * TP + /\ thdr.height + 1 = uhdr.height => thdr.NextVS = uhdr.VS + (* As we do not have explicit hashes we ignore these three checks of the English spec: + + 1. "trusted.Commit is a commit is for the header trusted.Header, + i.e. it contains the correct hash of the header". + 2. untrusted.Validators = hash(untrusted.Header.Validators) + 3. untrusted.NextValidators = hash(untrusted.Header.NextValidators) + *) + +(** + Check the precondition of ValidAndVerified, including the time checks. + + [LCV-FUNC-VALID.1::TLA-PRE.1] + *) +ValidAndVerifiedPre(trusted, untrusted, checkFuture) == + LET thdr == trusted.header + uhdr == untrusted.header + IN + /\ InTrustingPeriodLocal(thdr) + \* The untrusted block is not from the future (modulo clock drift). + \* Do the check, if it is required. + /\ checkFuture => uhdr.time < localClock + CLOCK_DRIFT + /\ ValidAndVerifiedPreUntimed(trusted, untrusted) + + +(** + Check, whether an untrusted block is valid and verifiable w.r.t. a trusted header. + This test does take current time into account, but only looks at the block structure. + + [LCV-FUNC-VALID.1::TLA-UNTIMED.1] + *) +ValidAndVerifiedUntimed(trusted, untrusted) == + IF ~ValidAndVerifiedPreUntimed(trusted, untrusted) + THEN "INVALID" + ELSE IF untrusted.header.height = trusted.header.height + 1 + \/ SignedByOneThirdOfTrusted(trusted, untrusted) + THEN "SUCCESS" + ELSE "NOT_ENOUGH_TRUST" + +(** + Check, whether an untrusted block is valid and verifiable w.r.t. a trusted header. + + [LCV-FUNC-VALID.1::TLA.1] + *) +ValidAndVerified(trusted, untrusted, checkFuture) == + IF ~ValidAndVerifiedPre(trusted, untrusted, checkFuture) + THEN "INVALID" + ELSE IF ~InTrustingPeriodLocal(untrusted.header) + (* We leave the following test for the documentation purposes. + The implementation should do this test, as signature verification may be slow. + In the TLA+ specification, ValidAndVerified happens in no time. + *) + THEN "FAILED_TRUSTING_PERIOD" + ELSE IF untrusted.header.height = trusted.header.height + 1 + \/ SignedByOneThirdOfTrusted(trusted, untrusted) + THEN "SUCCESS" + ELSE "NOT_ENOUGH_TRUST" + + +(** + The invariant of the light store that is not related to the blockchain + *) +LightStoreInv(fetchedLightBlocks, lightBlockStatus) == + \A lh, rh \in DOMAIN fetchedLightBlocks: + \* for every pair of stored headers that have been verified + \/ lh >= rh + \/ lightBlockStatus[lh] /= "StateVerified" + \/ lightBlockStatus[rh] /= "StateVerified" + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh /\ lightBlockStatus[mh] = "StateVerified" + \* or the left header is outside the trusting period, so no guarantees + \/ LET lhdr == fetchedLightBlocks[lh] + rhdr == fetchedLightBlocks[rh] + IN + \* we can verify the right one using the left one + "SUCCESS" = ValidAndVerifiedUntimed(lhdr, rhdr) + +(** + Correctness states that all the obtained headers are exactly like in the blockchain. + + It is always the case that every verified header in LightStore was generated by + an instance of Tendermint consensus. + + [LCV-DIST-SAFE.1::CORRECTNESS-INV.1] + *) +CorrectnessInv(blockchain, fetchedLightBlocks, lightBlockStatus) == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" => + fetchedLightBlocks[h].header = blockchain[h] + +(** + * When the light client terminates, there are no failed blocks. + * (Otherwise, someone lied to us.) + *) +NoFailedBlocksOnSuccessInv(fetchedLightBlocks, lightBlockStatus) == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] /= "StateFailed" + +(** + The expected post-condition of VerifyToTarget. + *) +VerifyToTargetPost(blockchain, isPeerCorrect, + fetchedLightBlocks, lightBlockStatus, + trustedHeight, targetHeight, finalState) == + LET trustedHeader == fetchedLightBlocks[trustedHeight].header IN + \* The light client is not lying us on the trusted block. + \* It is straightforward to detect. + /\ lightBlockStatus[trustedHeight] = "StateVerified" + /\ trustedHeight \in DOMAIN fetchedLightBlocks + /\ trustedHeader = blockchain[trustedHeight] + \* the invariants we have found in the light client verification + \* there is a problem with trusting period + /\ isPeerCorrect + => CorrectnessInv(blockchain, fetchedLightBlocks, lightBlockStatus) + \* a correct peer should fail the light client, + \* if the trusted block is in the trusting period + /\ isPeerCorrect /\ InTrustingPeriodLocalSurely(trustedHeader) + => finalState = "finishedSuccess" + /\ finalState = "finishedSuccess" => + /\ lightBlockStatus[targetHeight] = "StateVerified" + /\ targetHeight \in DOMAIN fetchedLightBlocks + /\ NoFailedBlocksOnSuccessInv(fetchedLightBlocks, lightBlockStatus) + /\ LightStoreInv(fetchedLightBlocks, lightBlockStatus) + + +================================================================================== diff --git a/spec/light-client/attacks/MC_5_3.tla b/spec/light-client/attacks/MC_5_3.tla new file mode 100644 index 0000000000..552de49aee --- /dev/null +++ b/spec/light-client/attacks/MC_5_3.tla @@ -0,0 +1,18 @@ +------------------------- MODULE MC_5_3 ------------------------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5"} +COMMON_HEIGHT == 1 +CONFLICT_HEIGHT == 3 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +FAULTY_RATIO == <<1, 2>> \* < 1 / 2 faulty validators + +VARIABLES + blockchain, \* the reference blockchain + refClock, \* current time in the reference blockchain + Faulty, \* the set of faulty validators + state, \* the state of the light client detector + conflictingBlock, \* an evidence that two peers reported conflicting blocks + attackers + +INSTANCE Isolation_001_draft +============================================================================ diff --git a/spec/light-client/attacks/isolate-attackers_001_draft.md b/spec/light-client/attacks/isolate-attackers_001_draft.md new file mode 100644 index 0000000000..7a374f1139 --- /dev/null +++ b/spec/light-client/attacks/isolate-attackers_001_draft.md @@ -0,0 +1,222 @@ + + +# Lightclient Attackers Isolation + +> Warning: This is the beginning of an unfinished draft. Don't continue reading! + +Adversarial nodes may have the incentive to lie to a lightclient about the state of a Tendermint blockchain. An attempt to do so is called attack. Light client [verification][verification] checks incoming data by checking a so-called "commit", which is a forwarded set of signed messages that is (supposedly) produced during executing Tendermint consensus. Thus, an attack boils down to creating and signing Tendermint consensus messages in deviation from the Tendermint consensus algorithm rules. + +As Tendermint consensus and light client verification is safe under the assumption of more than 2/3 of correct voting power per block [[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link], this implies that if there was an attack then [[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link] was violated, that is, there is a block such that + +- validators deviated from the protocol, and +- these validators represent more than 1/3 of the voting power in that block. + +In the case of an [attack][node-based-attack-characterization], the lightclient [attack detection mechanism][detection] computes data, so called evidence [[LC-DATA-EVIDENCE.1]][LC-DATA-EVIDENCE-link], that can be used + +- to proof that there has been attack [[TMBC-LC-EVIDENCE-DATA.1]][TMBC-LC-EVIDENCE-DATA-link] and +- as basis to find the actual nodes that deviated from the Tendermint protocol. + +This specification considers how a full node in a Tendermint blockchain can isolate a set of attackers that launched the attack. The set should satisfy + +- the set does not contain a correct validator +- the set contains validators that represent more than 1/3 of the voting power of a block that is still within the unbonding period + +# Outline + +**TODO** when preparing a version for broader review. + +# Part I - Basics + +For definitions of data structures used here, in particular LightBlocks [[LCV-DATA-LIGHTBLOCK.1]](https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-data-lightblock1), cf. [Light Client Verification][verification]. + +# Part II - Definition of the Problem + +The specification of the [detection mechanism][detection] describes + +- what is a light client attack, +- conditions under which the detector will detect a light client attack, +- and the format of the output data, called evidence, in the case an attack is detected. The format is defined in +[[LC-DATA-EVIDENCE.1]][LC-DATA-EVIDENCE-link] and looks as follows + +```go +type LightClientAttackEvidence struct { + ConflictingBlock LightBlock + CommonHeight int64 +} +``` + +The isolator is a function that gets as input evidence `ev` +and a prefix of the blockchain `bc` at least up to height `ev.ConflictingBlock.Header.Height + 1`. The output is a set of *peerIDs* of validators. + +We assume that the full node is synchronized with the blockchain and has reached the height `ev.ConflictingBlock.Header.Height + 1`. + +#### **[FN-INV-Output.1]** + +When an output is generated it satisfies the following properties: + +- If + - `bc[CommonHeight].bfttime` is within the unbonding period w.r.t. the time at the full node, + - `ev.ConflictingBlock.Header != bc[ev.ConflictingBlock.Header.Height]` + - Validators in `ev.ConflictingBlock.Commit` represent more than 1/3 of the voting power in `bc[ev.CommonHeight].NextValidators` +- Then: A set of validators in `bc[CommonHeight].NextValidators` that + - represent more than 1/3 of the voting power in `bc[ev.commonHeight].NextValidators` + - signed Tendermint consensus messages for height `ev.ConflictingBlock.Header.Height` by violating the Tendermint consensus protocol. +- Else: the empty set. + +# Part IV - Protocol + +Here we discuss how to solve the problem of isolating misbehaving processes. We describe the function `isolateMisbehavingProcesses` as well as all the helping functions below. In [Part V](#part-v---Completeness), we discuss why the solution is complete based on result from analysis with automated tools. + +## Isolation + +### Outline + +> Describe solution (in English), decomposition into functions, where communication to other components happens. + +#### **[LCAI-FUNC-MAIN.1]** + +```go +func isolateMisbehavingProcesses(ev LightClientAttackEvidence, bc Blockchain) []ValidatorAddress { + + reference := bc[ev.conflictingBlock.Header.Height].Header + ev_header := ev.conflictingBlock.Header + + ref_commit := bc[ev.conflictingBlock.Header.Height + 1].Header.LastCommit // + 1 !! + ev_commit := ev.conflictingBlock.Commit + + if violatesTMValidity(reference, ev_header) { + // lunatic light client attack + signatories := Signers(ev.ConflictingBlock.Commit) + bonded_vals := Addresses(bc[ev.CommonHeight].NextValidators) + return intersection(signatories,bonded_vals) + + } + // If this point is reached the validator sets in reference and ev_header are identical + else if RoundOf(ref_commit) == RoundOf(ev_commit) { + // equivocation light client attack + return intersection(Signers(ref_commit), Signers(ev_commit)) + } + else { + // amnesia light client attack + return IsolateAmnesiaAttacker(ev, bc) + } +} +``` + +- Implementation comment + - If the full node has only reached height `ev.conflictingBlock.Header.Height` then `bc[ev.conflictingBlock.Header.Height + 1].Header.LastCommit` refers to the locally stored commit for this height. (This commit must be present by the precondition on `length(bc)`.) + - We check in the precondition that the unbonding period is not expired. However, since time moves on, before handing the validators over Cosmos SDK, the time needs to be checked again to satisfy the contract which requires that only bonded validators are reported. This passing of validators to the SDK is out of scope of this specification. +- Expected precondition + - `length(bc) >= ev.conflictingBlock.Header.Height` + - `ValidAndVerifiedUnbonding(bc[ev.CommonHeight], ev.ConflictingBlock) == SUCCESS` + - `ev.ConflictingBlock.Header != bc[ev.ConflictingBlock.Header.Height]` + - TODO: input light blocks pass basic validation +- Expected postcondition + - [[FN-INV-Output.1]](#FN-INV-Output1) holds +- Error condition + - returns an error if precondition is violated. + +### Details of the Functions + +#### **[LCAI-FUNC-VVU.1]** + +```go +func ValidAndVerifiedUnbonding(trusted LightBlock, untrusted LightBlock) Result +``` + +- Conditions are identical to [[LCV-FUNC-VALID.2]][LCV-FUNC-VALID.link] except the precondition "*trusted.Header.Time > now - trustingPeriod*" is substituted with + - `trusted.Header.Time > now - UnbondingPeriod` + +#### **[LCAI-FUNC-NONVALID.1]** + +```go +func violatesTMValidity(ref Header, ev Header) boolean +``` + +- Implementation remarks + - checks whether the evidence header `ev` violates the validity property of Tendermint Consensus, by checking agains a reference header +- Expected precondition + - `ref.Height == ev.Height` +- Expected postcondition + - returns evaluation of the following disjunction + **[[LCAI-NONVALID-OUTPUT.1]]** == + `ref.ValidatorsHash != ev.ValidatorsHash` or + `ref.NextValidatorsHash != ev.NextValidatorsHash` or + `ref.ConsensusHash != ev.ConsensusHash` or + `ref.AppHash != ev.AppHash` or + `ref.LastResultsHash != ev.LastResultsHash` + +```go +func IsolateAmnesiaAttacker(ev LightClientAttackEvidence, bc Blockchain) []ValidatorAddress +``` + +- Implementation remarks + **TODO:** What should we do here? Refer to the accountability doc? +- Expected postcondition + **TODO:** What should we do here? Refer to the accountability doc? + +```go +func RoundOf(commit Commit) []ValidatorAddress +``` + +- Expected precondition + - `commit` is well-formed. In particular all votes are from the same round `r`. +- Expected postcondition + - returns round `r` that is encoded in all the votes of the commit + +```go +func Signers(commit Commit) []ValidatorAddress +``` + +- Expected postcondition + - returns all validator addresses in `commit` + +```go +func Addresses(vals Validator[]) ValidatorAddress[] +``` + +- Expected postcondition + - returns all validator addresses in `vals` + +# Part V - Completeness + +As discussed in the beginning of this document, an attack boils down to creating and signing Tendermint consensus messages in deviation from the Tendermint consensus algorithm rules. +The main function `isolateMisbehavingProcesses` distinguishes three kinds of wrongly signing messages, namely, + +- lunatic: signing invalid blocks +- equivocation: double-signing valid blocks in the same consensus round +- amnesia: signing conflicting blocks in different consensus rounds, without having seen a quorum of messages that would have allowed to do so. + +The question is whether this captures all attacks. +First observe that the first checking in `isolateMisbehavingProcesses` is `violatesTMValidity`. It takes care of lunatic attacks. If this check passes, that is, if `violatesTMValidity` returns `FALSE` this means that [FN-NONVALID-OUTPUT] evaluates to false, which implies that `ref.ValidatorsHash = ev.ValidatorsHash`. Hence after `violatesTMValidity`, all the involved validators are the ones from the blockchain. It is thus sufficient to analyze one instance of Tendermint consensus with a fixed group membership (set of validators). Also it is sufficient to consider two different valid consensus values, that is, binary consensus. + +**TODO** we have analyzed Tendermint consensus with TLA+ and have accompanied Galois in an independent study of the protocol based on [Ivy proofs](https://github.com/tendermint/spec/tree/master/ivy-proofs). + +# References + +[[supervisor]] The specification of the light client supervisor. + +[[verification]] The specification of the light client verification protocol + +[[detection]] The specification of the light client attack detection mechanism. + +[supervisor]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/supervisor/supervisor_001_draft.md + +[verification]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md + +[detection]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md + +[LC-DATA-EVIDENCE-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md#lc-data-evidence1 + +[TMBC-LC-EVIDENCE-DATA-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md#tmbc-lc-evidence-data1 + +[node-based-attack-characterization]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md#node-based-characterization-of-attacks + +[TMBC-FM-2THIRDS-link]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#tmbc-fm-2thirds1 + +[LCV-FUNC-VALID.link]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-func-valid2 diff --git a/spec/light-client/attacks/isolate-attackers_002_reviewed.md b/spec/light-client/attacks/isolate-attackers_002_reviewed.md new file mode 100644 index 0000000000..482fec3352 --- /dev/null +++ b/spec/light-client/attacks/isolate-attackers_002_reviewed.md @@ -0,0 +1,225 @@ + + +# Lightclient Attackers Isolation + +Adversarial nodes may have the incentive to lie to a lightclient about the state of a Tendermint blockchain. An attempt to do so is called attack. Light client [verification][verification] checks incoming data by checking a so-called "commit", which is a forwarded set of signed messages that is (supposedly) produced during executing Tendermint consensus. Thus, an attack boils down to creating and signing Tendermint consensus messages in deviation from the Tendermint consensus algorithm rules. + +As Tendermint consensus and light client verification is safe under the assumption of more than 2/3 of correct voting power per block [[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link], this implies that if there was an attack then [[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link] was violated, that is, there is a block such that + +- validators deviated from the protocol, and +- these validators represent more than 1/3 of the voting power in that block. + +In the case of an [attack][node-based-attack-characterization], the lightclient [attack detection mechanism][detection] computes data, so called evidence [[LC-DATA-EVIDENCE.1]][LC-DATA-EVIDENCE-link], that can be used + +- to proof that there has been attack [[TMBC-LC-EVIDENCE-DATA.1]][TMBC-LC-EVIDENCE-DATA-link] and +- as basis to find the actual nodes that deviated from the Tendermint protocol. + +This specification considers how a full node in a Tendermint blockchain can isolate a set of attackers that launched the attack. The set should satisfy + +- the set does not contain a correct validator +- the set contains validators that represent more than 1/3 of the voting power of a block that is still within the unbonding period + +# Outline + +After providing the [problem statement](#Part-I---Basics-and-Definition-of-the-Problem), we specify the [isolator function](#Part-II---Protocol) and close with the discussion about its [correctness](#Part-III---Completeness) which is based on computer-aided analysis of Tendermint Consensus. + +# Part I - Basics and Definition of the Problem + +For definitions of data structures used here, in particular LightBlocks [[LCV-DATA-LIGHTBLOCK.1]](https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-data-lightblock1), we refer to the specification of [Light Client Verification][verification]. + +The specification of the [detection mechanism][detection] describes + +- what is a light client attack, +- conditions under which the detector will detect a light client attack, +- and the format of the output data, called evidence, in the case an attack is detected. The format is defined in +[[LC-DATA-EVIDENCE.1]][LC-DATA-EVIDENCE-link] and looks as follows + +```go +type LightClientAttackEvidence struct { + ConflictingBlock LightBlock + CommonHeight int64 +} +``` + +The isolator is a function that gets as input evidence `ev` +and a prefix of the blockchain `bc` at least up to height `ev.ConflictingBlock.Header.Height + 1`. The output is a set of *peerIDs* of validators. + +We assume that the full node is synchronized with the blockchain and has reached the height `ev.ConflictingBlock.Header.Height + 1`. + +#### **[LCAI-INV-Output.1]** + +When an output is generated it satisfies the following properties: + +- If + - `bc[CommonHeight].bfttime` is within the unbonding period w.r.t. the time at the full node, + - `ev.ConflictingBlock.Header != bc[ev.ConflictingBlock.Header.Height]` + - Validators in `ev.ConflictingBlock.Commit` represent more than 1/3 of the voting power in `bc[ev.CommonHeight].NextValidators` +- Then: The output is a set of validators in `bc[CommonHeight].NextValidators` that + - represent more than 1/3 of the voting power in `bc[ev.commonHeight].NextValidators` + - signed Tendermint consensus messages for height `ev.ConflictingBlock.Header.Height` by violating the Tendermint consensus protocol. +- Else: the empty set. + +# Part II - Protocol + +Here we discuss how to solve the problem of isolating misbehaving processes. We describe the function `isolateMisbehavingProcesses` as well as all the helping functions below. In [Part III](#part-III---Completeness), we discuss why the solution is complete based on result from analysis with automated tools. + +## Isolation + +### Outline + +We first check whether the conflicting block can indeed be verified from the common height. We then first check whether it was a lunatic attack (violating validity). If this is not the case, we check for equivocation. If this also is not the case, we start the on-chain [accountability protocol](https://docs.google.com/document/d/11ZhMsCj3y7zIZz4udO9l25xqb0kl7gmWqNpGVRzOeyY/edit). + +#### **[LCAI-FUNC-MAIN.1]** + +```go +func isolateMisbehavingProcesses(ev LightClientAttackEvidence, bc Blockchain) []ValidatorAddress { + + reference := bc[ev.conflictingBlock.Header.Height].Header + ev_header := ev.conflictingBlock.Header + + ref_commit := bc[ev.conflictingBlock.Header.Height + 1].Header.LastCommit // + 1 !! + ev_commit := ev.conflictingBlock.Commit + + if violatesTMValidity(reference, ev_header) { + // lunatic light client attack + signatories := Signers(ev.ConflictingBlock.Commit) + bonded_vals := Addresses(bc[ev.CommonHeight].NextValidators) + return intersection(signatories,bonded_vals) + + } + // If this point is reached the validator sets in reference and ev_header are identical + else if RoundOf(ref_commit) == RoundOf(ev_commit) { + // equivocation light client attack + return intersection(Signers(ref_commit), Signers(ev_commit)) + } + else { + // amnesia light client attack + return IsolateAmnesiaAttacker(ev, bc) + } +} +``` + +- Implementation comment + - If the full node has only reached height `ev.conflictingBlock.Header.Height` then `bc[ev.conflictingBlock.Header.Height + 1].Header.LastCommit` refers to the locally stored commit for this height. (This commit must be present by the precondition on `length(bc)`.) + - We check in the precondition that the unbonding period is not expired. However, since time moves on, before handing the validators over Cosmos SDK, the time needs to be checked again to satisfy the contract which requires that only bonded validators are reported. This passing of validators to the SDK is out of scope of this specification. +- Expected precondition + - `length(bc) >= ev.conflictingBlock.Header.Height` + - `ValidAndVerifiedUnbonding(bc[ev.CommonHeight], ev.ConflictingBlock) == SUCCESS` + - `ev.ConflictingBlock.Header != bc[ev.ConflictingBlock.Header.Height]` + - `ev.conflictingBlock` satisfies basic validation (in particular all signed messages in the Commit are from the same round) +- Expected postcondition + - [[FN-INV-Output.1]](#FN-INV-Output1) holds +- Error condition + - returns an error if precondition is violated. + +### Details of the Functions + +#### **[LCAI-FUNC-VVU.1]** + +```go +func ValidAndVerifiedUnbonding(trusted LightBlock, untrusted LightBlock) Result +``` + +- Conditions are identical to [[LCV-FUNC-VALID.2]][LCV-FUNC-VALID.link] except the precondition "*trusted.Header.Time > now - trustingPeriod*" is substituted with + - `trusted.Header.Time > now - UnbondingPeriod` + +#### **[LCAI-FUNC-NONVALID.1]** + +```go +func violatesTMValidity(ref Header, ev Header) boolean +``` + +- Implementation remarks + - checks whether the evidence header `ev` violates the validity property of Tendermint Consensus, by checking against a reference header +- Expected precondition + - `ref.Height == ev.Height` +- Expected postcondition + - returns evaluation of the following disjunction + **[LCAI-NONVALID-OUTPUT.1]** == + `ref.ValidatorsHash != ev.ValidatorsHash` or + `ref.NextValidatorsHash != ev.NextValidatorsHash` or + `ref.ConsensusHash != ev.ConsensusHash` or + `ref.AppHash != ev.AppHash` or + `ref.LastResultsHash != ev.LastResultsHash` + +```go +func IsolateAmnesiaAttacker(ev LightClientAttackEvidence, bc Blockchain) []ValidatorAddress +``` + +- Implementation remarks + - This triggers the [query/response protocol](https://docs.google.com/document/d/11ZhMsCj3y7zIZz4udO9l25xqb0kl7gmWqNpGVRzOeyY/edit). +- Expected postcondition + - returns attackers according to [LCAI-INV-Output.1]. + +```go +func RoundOf(commit Commit) []ValidatorAddress +``` + +- Expected precondition + - `commit` is well-formed. In particular all votes are from the same round `r`. +- Expected postcondition + - returns round `r` that is encoded in all the votes of the commit +- Error condition + - reports error if precondition is violated + +```go +func Signers(commit Commit) []ValidatorAddress +``` + +- Expected postcondition + - returns all validator addresses in `commit` + +```go +func Addresses(vals Validator[]) ValidatorAddress[] +``` + +- Expected postcondition + - returns all validator addresses in `vals` + +# Part III - Completeness + +As discussed in the beginning of this document, an attack boils down to creating and signing Tendermint consensus messages in deviation from the Tendermint consensus algorithm rules. +The main function `isolateMisbehavingProcesses` distinguishes three kinds of wrongly signed messages, namely, + +- lunatic: signing invalid blocks +- equivocation: double-signing valid blocks in the same consensus round +- amnesia: signing conflicting blocks in different consensus rounds, without having seen a quorum of messages that would have allowed to do so. + +The question is whether this captures all attacks. +First observe that the first check in `isolateMisbehavingProcesses` is `violatesTMValidity`. It takes care of lunatic attacks. If this check passes, that is, if `violatesTMValidity` returns `FALSE` this means that [[LCAI-NONVALID-OUTPUT.1]](#LCAI-FUNC-NONVALID1]) evaluates to false, which implies that `ref.ValidatorsHash = ev.ValidatorsHash`. Hence, after `violatesTMValidity`, all the involved validators are the ones from the blockchain. It is thus sufficient to analyze one instance of Tendermint consensus with a fixed group membership (set of validators). Also, as we have two different blocks for the same height, it is sufficient to consider two different valid consensus values, that is, binary consensus. + +For this fixed group membership, we have analyzed the attacks using the TLA+ specification of [Tendermint Consensus in TLA+][tendermint-accountability]. We checked that indeed the only possible scenarios that can lead to violation of agreement are **equivocation** and **amnesia**. An independent study by Galois of the protocol based on [Ivy proofs](https://github.com/tendermint/spec/tree/master/ivy-proofs) led to the same conclusion. + +# References + +[[supervisor]] The specification of the light client supervisor. + +[[verification]] The specification of the light client verification protocol. + +[[detection]] The specification of the light client attack detection mechanism. + +[[tendermint-accountability]]: TLA+ specification to check the types of attacks + +[tendermint-accountability]: +https://github.com/tendermint/spec/blob/master/rust-spec/tendermint-accountability/README.md + +[supervisor]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/supervisor/supervisor_001_draft.md + +[verification]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md + +[detection]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md + +[LC-DATA-EVIDENCE-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md#lc-data-evidence1 + +[TMBC-LC-EVIDENCE-DATA-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md#tmbc-lc-evidence-data1 + +[node-based-attack-characterization]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md#node-based-characterization-of-attacks + +[TMBC-FM-2THIRDS-link]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#tmbc-fm-2thirds1 + +[LCV-FUNC-VALID.link]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-func-valid2 diff --git a/spec/light-client/attacks/notes-on-evidence-handling.md b/spec/light-client/attacks/notes-on-evidence-handling.md new file mode 100644 index 0000000000..4b7d819191 --- /dev/null +++ b/spec/light-client/attacks/notes-on-evidence-handling.md @@ -0,0 +1,219 @@ + +# Light client attacks + +We define a light client attack as detection of conflicting headers for a given height that can be verified +starting from the trusted light block. A light client attack is defined in the context of interactions of +light client with two peers. One of the peers (called primary) defines a trace of verified light blocks +(primary trace) that are being checked against trace of the other peer (called witness) that we call +witness trace. + +A light client attack is defined by the primary and witness traces +that have a common root (the same trusted light block for a common height) but forms +conflicting branches (end of traces is for the same height but with different headers). +Note that conflicting branches could be arbitrarily big as branches continue to diverge after +a bifurcation point. We propose an approach that allows us to define a valid light client attack +only with a common light block and a single conflicting light block. We rely on the fact that +we assume that the primary is under suspicion (therefore not trusted) and that the witness plays +support role to detect and process an attack (therefore trusted). Therefore, once a light client +detects an attack, it needs to send to a witness only missing data (common height +and conflicting light block) as it has its trace. Keeping light client attack data of constant size +saves bandwidth and reduces an attack surface. As we will explain below, although in the context of +light client core +[verification](https://github.com/informalsystems/tendermint-rs/tree/master/docs/spec/lightclient/verification) +the roles of primary and witness are clearly defined, +in case of the attack, we run the same attack detection procedure twice where the roles are swapped. +The rationale is that the light client does not know what peer is correct (on a right main branch) +so it tries to create and submit an attack evidence to both peers. + +Light client attack evidence consists of a conflicting light block and a common height. + +```go +type LightClientAttackEvidence struct { + ConflictingBlock LightBlock + CommonHeight int64 +} +``` + +Full node can validate a light client attack evidence by executing the following procedure: + +```go +func IsValid(lcaEvidence LightClientAttackEvidence, bc Blockchain) boolean { + commonBlock = GetLightBlock(bc, lcaEvidence.CommonHeight) + if commonBlock == nil return false + + // Note that trustingPeriod in ValidAndVerified is set to UNBONDING_PERIOD + verdict = ValidAndVerified(commonBlock, lcaEvidence.ConflictingBlock) + conflictingHeight = lcaEvidence.ConflictingBlock.Header.Height + + return verdict == OK and bc[conflictingHeight].Header != lcaEvidence.ConflictingBlock.Header +} +``` + +## Light client attack creation + +Given a trusted light block `trusted`, a light node executes the bisection algorithm to verify header +`untrusted` at some height `h`. If the bisection algorithm succeeds, then the header `untrusted` is verified. +Headers that are downloaded as part of the bisection algorithm are stored in a store and they are also in +the verified state. Therefore, after the bisection algorithm successfully terminates we have a trace of +the light blocks ([] LightBlock) we obtained from the primary that we call primary trace. + +### Primary trace + +The following invariant holds for the primary trace: + +- Given a `trusted` light block, target height `h`, and `primary_trace` ([] LightBlock): + *primary_trace[0] == trusted* and *primary_trace[len(primary_trace)-1].Height == h* and + successive light blocks are passing light client verification logic. + +### Witness with a conflicting header + +The verified header at height `h` is cross-checked with every witness as part of +[detection](https://github.com/informalsystems/tendermint-rs/tree/master/docs/spec/lightclient/detection). +If a witness returns the conflicting header at the height `h` the following procedure is executed to verify +if the conflicting header comes from the valid trace and if that's the case to create an attack evidence: + +#### Helper functions + +We assume the following helper functions: + +```go +// Returns trace of verified light blocks starting from rootHeight and ending with targetHeight. +Trace(lightStore LightStore, rootHeight int64, targetHeight int64) LightBlock[] + +// Returns validator set for the given height +GetValidators(bc Blockchain, height int64) Validator[] + +// Returns validator set for the given height +GetValidators(bc Blockchain, height int64) Validator[] + +// Return validator addresses for the given validators +GetAddresses(vals Validator[]) ValidatorAddress[] +``` + +```go +func DetectLightClientAttacks(primary PeerID, + primary_trace []LightBlock, + witness PeerID) (LightClientAttackEvidence, LightClientAttackEvidence) { + primary_lca_evidence, witness_trace = DetectLightClientAttack(primary_trace, witness) + + witness_lca_evidence = nil + if witness_trace != nil { + witness_lca_evidence, _ = DetectLightClientAttack(witness_trace, primary) + } + return primary_lca_evidence, witness_lca_evidence +} + +func DetectLightClientAttack(trace []LightBlock, peer PeerID) (LightClientAttackEvidence, []LightBlock) { + + lightStore = new LightStore().Update(trace[0], StateTrusted) + + for i in 1..len(trace)-1 { + lightStore, result = VerifyToTarget(peer, lightStore, trace[i].Header.Height) + + if result == ResultFailure then return (nil, nil) + + current = lightStore.Get(trace[i].Header.Height) + + // if obtained header is the same as in the trace we continue with a next height + if current.Header == trace[i].Header continue + + // we have identified a conflicting header + commonBlock = trace[i-1] + conflictingBlock = trace[i] + + return (LightClientAttackEvidence { conflictingBlock, commonBlock.Header.Height }, + Trace(lightStore, trace[i-1].Header.Height, trace[i].Header.Height)) + } + return (nil, nil) +} +``` + +## Evidence handling + +As part of on chain evidence handling, full nodes identifies misbehaving processes and informs +the application, so they can be slashed. Note that only bonded validators should +be reported to the application. There are three types of attacks that can be executed against +Tendermint light client: + +- lunatic attack +- equivocation attack and +- amnesia attack. + +We now specify the evidence handling logic. + +```go +func detectMisbehavingProcesses(lcAttackEvidence LightClientAttackEvidence, bc Blockchain) []ValidatorAddress { + assume IsValid(lcaEvidence, bc) + + // lunatic light client attack + if !isValidBlock(current.Header, conflictingBlock.Header) { + conflictingCommit = lcAttackEvidence.ConflictingBlock.Commit + bondedValidators = GetNextValidators(bc, lcAttackEvidence.CommonHeight) + + return getSigners(conflictingCommit) intersection GetAddresses(bondedValidators) + + // equivocation light client attack + } else if current.Header.Round == conflictingBlock.Header.Round { + conflictingCommit = lcAttackEvidence.ConflictingBlock.Commit + trustedCommit = bc[conflictingBlock.Header.Height+1].LastCommit + + return getSigners(trustedCommit) intersection getSigners(conflictingCommit) + + // amnesia light client attack + } else { + HandleAmnesiaAttackEvidence(lcAttackEvidence, bc) + } +} + +// Block validity in this context is defined by the trusted header. +func isValidBlock(trusted Header, conflicting Header) boolean { + return trusted.ValidatorsHash == conflicting.ValidatorsHash and + trusted.NextValidatorsHash == conflicting.NextValidatorsHash and + trusted.ConsensusHash == conflicting.ConsensusHash and + trusted.AppHash == conflicting.AppHash and + trusted.LastResultsHash == conflicting.LastResultsHash +} + +func getSigners(commit Commit) []ValidatorAddress { + signers = []ValidatorAddress + for (i, commitSig) in commit.Signatures { + if commitSig.BlockIDFlag == BlockIDFlagCommit { + signers.append(commitSig.ValidatorAddress) + } + } + return signers +} +``` + +Note that amnesia attack evidence handling involves more complex processing, i.e., cannot be +defined simply on amnesia attack evidence. We explain in the following section a protocol +for handling amnesia attack evidence. + +### Amnesia attack evidence handling + +Detecting faulty processes in case of the amnesia attack is more complex and cannot be inferred +purely based on attack evidence data. In this case, in order to detect misbehaving processes we need +access to votes processes sent/received during the conflicting height. Therefore, amnesia handling assumes that +validators persist all votes received and sent during multi-round heights (as amnesia attack +is only possible in heights that executes over multiple rounds, i.e., commit round > 0). + +To simplify description of the algorithm we assume existence of the trusted oracle called monitor that will +drive the algorithm and output faulty processes at the end. Monitor can be implemented in a +distributed setting as on-chain module. The algorithm works as follows: + 1) Monitor sends votesets request to validators of the conflicting height. Validators + are expected to send their votesets within predefined timeout. + 2) Upon receiving votesets request, validators send their votesets to a monitor. + 2) Validators which have not sent its votesets within timeout are considered faulty. + 3) The preprocessing of the votesets is done. That means that the received votesets are analyzed + and each vote (valid) sent by process p is added to the voteset of the sender p. This phase ensures that + votes sent by faulty processes observed by at least one correct validator cannot be excluded from the analysis. + 4) Votesets of every validator are analyzed independently to decide whether the validator is correct or faulty. + A faulty validators is the one where at least one of those invalid transitions is found: + - More than one PREVOTE message is sent in a round + - More than one PRECOMMIT message is sent in a round + - PRECOMMIT message is sent without receiving +2/3 of voting-power equivalent + appropriate PREVOTE messages + - PREVOTE message is sent for the value V’ in round r’ and the PRECOMMIT message had + been sent for the value V in round r by the same process (r’ > r) and there are no + +2/3 of voting-power equivalent PREVOTE(vr, V’) messages (vr ≥ 0 and vr > r and vr < r’) + as the justification for sending PREVOTE(r’, V’) diff --git a/spec/light-client/detection/004bmc-apalache-ok.csv b/spec/light-client/detection/004bmc-apalache-ok.csv new file mode 100644 index 0000000000..bf4f53ea2a --- /dev/null +++ b/spec/light-client/detection/004bmc-apalache-ok.csv @@ -0,0 +1,10 @@ +no;filename;tool;timeout;init;inv;next;args +1;LCD_MC3_3_faulty.tla;apalache;1h;;CommonHeightOnEvidenceInv;;--length=10 +2;LCD_MC3_3_faulty.tla;apalache;1h;;AccuracyInv;;--length=10 +3;LCD_MC3_3_faulty.tla;apalache;1h;;PrecisionInvLocal;;--length=10 +4;LCD_MC3_4_faulty.tla;apalache;1h;;CommonHeightOnEvidenceInv;;--length=10 +5;LCD_MC3_4_faulty.tla;apalache;1h;;AccuracyInv;;--length=10 +6;LCD_MC3_4_faulty.tla;apalache;1h;;PrecisionInvLocal;;--length=10 +7;LCD_MC4_4_faulty.tla;apalache;1h;;CommonHeightOnEvidenceInv;;--length=10 +8;LCD_MC4_4_faulty.tla;apalache;1h;;AccuracyInv;;--length=10 +9;LCD_MC4_4_faulty.tla;apalache;1h;;PrecisionInvLocal;;--length=10 diff --git a/spec/light-client/detection/005bmc-apalache-error.csv b/spec/light-client/detection/005bmc-apalache-error.csv new file mode 100644 index 0000000000..1b9dd05ca9 --- /dev/null +++ b/spec/light-client/detection/005bmc-apalache-error.csv @@ -0,0 +1,4 @@ +no;filename;tool;timeout;init;inv;next;args +1;LCD_MC3_3_faulty.tla;apalache;1h;;PrecisionInvGrayZone;;--length=10 +2;LCD_MC3_4_faulty.tla;apalache;1h;;PrecisionInvGrayZone;;--length=10 +3;LCD_MC4_4_faulty.tla;apalache;1h;;PrecisionInvGrayZone;;--length=10 diff --git a/spec/light-client/detection/Blockchain_003_draft.tla b/spec/light-client/detection/Blockchain_003_draft.tla new file mode 100644 index 0000000000..2b37c1b181 --- /dev/null +++ b/spec/light-client/detection/Blockchain_003_draft.tla @@ -0,0 +1,164 @@ +------------------------ MODULE Blockchain_003_draft ----------------------------- +(* + This is a high-level specification of Tendermint blockchain + that is designed specifically for the light client. + Validators have the voting power of one. If you like to model various + voting powers, introduce multiple copies of the same validator + (do not forget to give them unique names though). + *) +EXTENDS Integers, FiniteSets + +Min(a, b) == IF a < b THEN a ELSE b + +CONSTANT + AllNodes, + (* a set of all nodes that can act as validators (correct and faulty) *) + ULTIMATE_HEIGHT, + (* a maximal height that can be ever reached (modelling artifact) *) + TRUSTING_PERIOD + (* the period within which the validators are trusted *) + +Heights == 1..ULTIMATE_HEIGHT (* possible heights *) + +(* A commit is just a set of nodes who have committed the block *) +Commits == SUBSET AllNodes + +(* The set of all block headers that can be on the blockchain. + This is a simplified version of the Block data structure in the actual implementation. *) +BlockHeaders == [ + height: Heights, + \* the block height + time: Int, + \* the block timestamp in some integer units + lastCommit: Commits, + \* the nodes who have voted on the previous block, the set itself instead of a hash + (* in the implementation, only the hashes of V and NextV are stored in a block, + as V and NextV are stored in the application state *) + VS: SUBSET AllNodes, + \* the validators of this bloc. We store the validators instead of the hash. + NextVS: SUBSET AllNodes + \* the validators of the next block. We store the next validators instead of the hash. +] + +(* A signed header is just a header together with a set of commits *) +LightBlocks == [header: BlockHeaders, Commits: Commits] + +VARIABLES + refClock, + (* the current global time in integer units as perceived by the reference chain *) + blockchain, + (* A sequence of BlockHeaders, which gives us a bird view of the blockchain. *) + Faulty + (* A set of faulty nodes, which can act as validators. We assume that the set + of faulty processes is non-decreasing. If a process has recovered, it should + connect using a different id. *) + +(* all variables, to be used with UNCHANGED *) +vars == <> + +(* The set of all correct nodes in a state *) +Corr == AllNodes \ Faulty + +(* APALACHE annotations *) +a <: b == a \* type annotation + +NT == STRING +NodeSet(S) == S <: {NT} +EmptyNodeSet == NodeSet({}) + +BT == [height |-> Int, time |-> Int, lastCommit |-> {NT}, VS |-> {NT}, NextVS |-> {NT}] + +LBT == [header |-> BT, Commits |-> {NT}] +(* end of APALACHE annotations *) + +(****************************** BLOCKCHAIN ************************************) + +(* the header is still within the trusting period *) +InTrustingPeriod(header) == + refClock < header.time + TRUSTING_PERIOD + +(* + Given a function pVotingPower \in D -> Powers for some D \subseteq AllNodes + and pNodes \subseteq D, test whether the set pNodes \subseteq AllNodes has + more than 2/3 of voting power among the nodes in D. + *) +TwoThirds(pVS, pNodes) == + LET TP == Cardinality(pVS) + SP == Cardinality(pVS \intersect pNodes) + IN + 3 * SP > 2 * TP \* when thinking in real numbers, not integers: SP > 2.0 / 3.0 * TP + +(* + Given a set of FaultyNodes, test whether the voting power of the correct nodes in D + is more than 2/3 of the voting power of the faulty nodes in D. + + Parameters: + - pFaultyNodes is a set of nodes that are considered faulty + - pVS is a set of all validators, maybe including Faulty, intersecting with it, etc. + - pMaxFaultRatio is a pair <> that limits the ratio a / b of the faulty + validators from above (exclusive) + *) +FaultyValidatorsFewerThan(pFaultyNodes, pVS, maxRatio) == + LET FN == pFaultyNodes \intersect pVS \* faulty nodes in pNodes + CN == pVS \ pFaultyNodes \* correct nodes in pNodes + CP == Cardinality(CN) \* power of the correct nodes + FP == Cardinality(FN) \* power of the faulty nodes + IN + \* CP + FP = TP is the total voting power + LET TP == CP + FP IN + FP * maxRatio[2] < TP * maxRatio[1] + +(* Can a block be produced by a correct peer, or an authenticated Byzantine peer *) +IsLightBlockAllowedByDigitalSignatures(ht, block) == + \/ block.header = blockchain[ht] \* signed by correct and faulty (maybe) + \/ /\ block.Commits \subseteq Faulty + /\ block.header.height = ht + /\ block.header.time >= 0 \* signed only by faulty + +(* + Initialize the blockchain to the ultimate height right in the initial states. + We pick the faulty validators statically, but that should not affect the light client. + + Parameters: + - pMaxFaultyRatioExclusive is a pair <> that bound the number of + faulty validators in each block by the ratio a / b (exclusive) + *) +InitToHeight(pMaxFaultyRatioExclusive) == + /\ Faulty \in SUBSET AllNodes \* some nodes may fail + \* pick the validator sets and last commits + /\ \E vs, lastCommit \in [Heights -> SUBSET AllNodes]: + \E timestamp \in [Heights -> Int]: + \* refClock is at least as early as the timestamp in the last block + /\ \E tm \in Int: refClock = tm /\ tm >= timestamp[ULTIMATE_HEIGHT] + \* the genesis starts on day 1 + /\ timestamp[1] = 1 + /\ vs[1] = AllNodes + /\ lastCommit[1] = EmptyNodeSet + /\ \A h \in Heights \ {1}: + /\ lastCommit[h] \subseteq vs[h - 1] \* the non-validators cannot commit + /\ TwoThirds(vs[h - 1], lastCommit[h]) \* the commit has >2/3 of validator votes + \* the faulty validators have the power below the threshold + /\ FaultyValidatorsFewerThan(Faulty, vs[h], pMaxFaultyRatioExclusive) + /\ timestamp[h] > timestamp[h - 1] \* the time grows monotonically + /\ timestamp[h] < timestamp[h - 1] + TRUSTING_PERIOD \* but not too fast + \* form the block chain out of validator sets and commits (this makes apalache faster) + /\ blockchain = [h \in Heights |-> + [height |-> h, + time |-> timestamp[h], + VS |-> vs[h], + NextVS |-> IF h < ULTIMATE_HEIGHT THEN vs[h + 1] ELSE AllNodes, + lastCommit |-> lastCommit[h]] + ] \****** + +(********************* BLOCKCHAIN ACTIONS ********************************) +(* + Advance the clock by zero or more time units. + *) +AdvanceTime == + /\ \E tm \in Int: tm >= refClock /\ refClock' = tm + /\ UNCHANGED <> + +============================================================================= +\* Modification History +\* Last modified Wed Jun 10 14:10:54 CEST 2020 by igor +\* Created Fri Oct 11 15:45:11 CEST 2019 by igor diff --git a/spec/light-client/detection/LCD_MC3_3_faulty.tla b/spec/light-client/detection/LCD_MC3_3_faulty.tla new file mode 100644 index 0000000000..cef1df4d37 --- /dev/null +++ b/spec/light-client/detection/LCD_MC3_3_faulty.tla @@ -0,0 +1,27 @@ +------------------------- MODULE LCD_MC3_3_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 3 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +IS_SECONDARY_CORRECT == TRUE +FAULTY_RATIO == <<2, 3>> \* < 1 / 3 faulty validators + +VARIABLES + blockchain, (* the reference blockchain *) + localClock, (* current time in the light client *) + refClock, (* current time in the reference blockchain *) + Faulty, (* the set of faulty validators *) + state, (* the state of the light client detector *) + fetchedLightBlocks1, (* a function from heights to LightBlocks *) + fetchedLightBlocks2, (* a function from heights to LightBlocks *) + fetchedLightBlocks1b, (* a function from heights to LightBlocks *) + commonHeight, (* the height that is trusted in CreateEvidenceForPeer *) + nextHeightToTry, (* the index in CreateEvidenceForPeer *) + evidences + +INSTANCE LCDetector_003_draft +============================================================================ diff --git a/spec/light-client/detection/LCD_MC3_4_faulty.tla b/spec/light-client/detection/LCD_MC3_4_faulty.tla new file mode 100644 index 0000000000..06bcdee13a --- /dev/null +++ b/spec/light-client/detection/LCD_MC3_4_faulty.tla @@ -0,0 +1,27 @@ +------------------------- MODULE LCD_MC3_4_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 4 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +IS_SECONDARY_CORRECT == TRUE +FAULTY_RATIO == <<2, 3>> \* < 1 / 3 faulty validators + +VARIABLES + blockchain, (* the reference blockchain *) + localClock, (* current time in the light client *) + refClock, (* current time in the reference blockchain *) + Faulty, (* the set of faulty validators *) + state, (* the state of the light client detector *) + fetchedLightBlocks1, (* a function from heights to LightBlocks *) + fetchedLightBlocks2, (* a function from heights to LightBlocks *) + fetchedLightBlocks1b, (* a function from heights to LightBlocks *) + commonHeight, (* the height that is trusted in CreateEvidenceForPeer *) + nextHeightToTry, (* the index in CreateEvidenceForPeer *) + evidences + +INSTANCE LCDetector_003_draft +============================================================================ diff --git a/spec/light-client/detection/LCD_MC4_4_faulty.tla b/spec/light-client/detection/LCD_MC4_4_faulty.tla new file mode 100644 index 0000000000..fdb97d9616 --- /dev/null +++ b/spec/light-client/detection/LCD_MC4_4_faulty.tla @@ -0,0 +1,27 @@ +------------------------- MODULE LCD_MC4_4_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 4 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +IS_SECONDARY_CORRECT == TRUE +FAULTY_RATIO == <<2, 3>> \* < 2 / 3 faulty validators + +VARIABLES + blockchain, (* the reference blockchain *) + localClock, (* current time in the light client *) + refClock, (* current time in the reference blockchain *) + Faulty, (* the set of faulty validators *) + state, (* the state of the light client detector *) + fetchedLightBlocks1, (* a function from heights to LightBlocks *) + fetchedLightBlocks2, (* a function from heights to LightBlocks *) + fetchedLightBlocks1b, (* a function from heights to LightBlocks *) + commonHeight, (* the height that is trusted in CreateEvidenceForPeer *) + nextHeightToTry, (* the index in CreateEvidenceForPeer *) + evidences + +INSTANCE LCDetector_003_draft +============================================================================ diff --git a/spec/light-client/detection/LCD_MC5_5_faulty.tla b/spec/light-client/detection/LCD_MC5_5_faulty.tla new file mode 100644 index 0000000000..fdbd87b8b9 --- /dev/null +++ b/spec/light-client/detection/LCD_MC5_5_faulty.tla @@ -0,0 +1,27 @@ +------------------------- MODULE LCD_MC5_5_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 5 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +IS_SECONDARY_CORRECT == TRUE +FAULTY_RATIO == <<2, 3>> \* < 1 / 3 faulty validators + +VARIABLES + blockchain, (* the reference blockchain *) + localClock, (* current time in the light client *) + refClock, (* current time in the reference blockchain *) + Faulty, (* the set of faulty validators *) + state, (* the state of the light client detector *) + fetchedLightBlocks1, (* a function from heights to LightBlocks *) + fetchedLightBlocks2, (* a function from heights to LightBlocks *) + fetchedLightBlocks1b, (* a function from heights to LightBlocks *) + commonHeight, (* the height that is trusted in CreateEvidenceForPeer *) + nextHeightToTry, (* the index in CreateEvidenceForPeer *) + evidences + +INSTANCE LCDetector_003_draft +============================================================================ diff --git a/spec/light-client/detection/LCDetector_003_draft.tla b/spec/light-client/detection/LCDetector_003_draft.tla new file mode 100644 index 0000000000..e2d32e996f --- /dev/null +++ b/spec/light-client/detection/LCDetector_003_draft.tla @@ -0,0 +1,373 @@ +-------------------------- MODULE LCDetector_003_draft ----------------------------- +(** + * This is a specification of the light client detector module. + * It follows the English specification: + * + * https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_003_reviewed.md + * + * The assumptions made in this specification: + * + * - light client connects to one primary and one secondary peer + * + * - the light client has its own local clock that can drift from the reference clock + * within the envelope [refClock - CLOCK_DRIFT, refClock + CLOCK_DRIFT]. + * The local clock may increase as well as decrease in the the envelope + * (similar to clock synchronization). + * + * - the ratio of the faulty validators is set as the parameter. + * + * Igor Konnov, Josef Widder, 2020 + *) + +EXTENDS Integers + +\* the parameters of Light Client +CONSTANTS + AllNodes, + (* a set of all nodes that can act as validators (correct and faulty) *) + TRUSTED_HEIGHT, + (* an index of the block header that the light client trusts by social consensus *) + TARGET_HEIGHT, + (* an index of the block header that the light client tries to verify *) + TRUSTING_PERIOD, + (* the period within which the validators are trusted *) + CLOCK_DRIFT, + (* the assumed precision of the clock *) + REAL_CLOCK_DRIFT, + (* the actual clock drift, which under normal circumstances should not + be larger than CLOCK_DRIFT (otherwise, there will be a bug) *) + FAULTY_RATIO, + (* a pair <> that limits that ratio of faulty validator in the blockchain + from above (exclusive). Tendermint security model prescribes 1 / 3. *) + IS_PRIMARY_CORRECT, + IS_SECONDARY_CORRECT + +VARIABLES + blockchain, (* the reference blockchain *) + localClock, (* the local clock of the light client *) + refClock, (* the reference clock in the reference blockchain *) + Faulty, (* the set of faulty validators *) + state, (* the state of the light client detector *) + fetchedLightBlocks1, (* a function from heights to LightBlocks *) + fetchedLightBlocks2, (* a function from heights to LightBlocks *) + fetchedLightBlocks1b, (* a function from heights to LightBlocks *) + commonHeight, (* the height that is trusted in CreateEvidenceForPeer *) + nextHeightToTry, (* the index in CreateEvidenceForPeer *) + evidences (* a set of evidences *) + +vars == <> + +\* (old) type annotations in Apalache +a <: b == a + + +\* instantiate a reference chain +ULTIMATE_HEIGHT == TARGET_HEIGHT + 1 +BC == INSTANCE Blockchain_003_draft + WITH ULTIMATE_HEIGHT <- (TARGET_HEIGHT + 1) + +\* use the light client API +LC == INSTANCE LCVerificationApi_003_draft + +\* evidence type +ET == [peer |-> STRING, conflictingBlock |-> BC!LBT, commonHeight |-> Int] + +\* is the algorithm in the terminating state +IsTerminated == + state \in { <<"NoEvidence", "PRIMARY">>, + <<"NoEvidence", "SECONDARY">>, + <<"FaultyPeer", "PRIMARY">>, + <<"FaultyPeer", "SECONDARY">>, + <<"FoundEvidence", "PRIMARY">> } + + +(********************************* Initialization ******************************) + +\* initialization for the light blocks data structure +InitLightBlocks(lb, Heights) == + \* BC!LightBlocks is an infinite set, as time is not restricted. + \* Hence, we initialize the light blocks by picking the sets inside. + \E vs, nextVS, lastCommit, commit \in [Heights -> SUBSET AllNodes]: + \* although [Heights -> Int] is an infinite set, + \* Apalache needs just one instance of this set, so it does not complain. + \E timestamp \in [Heights -> Int]: + LET hdr(h) == + [height |-> h, + time |-> timestamp[h], + VS |-> vs[h], + NextVS |-> nextVS[h], + lastCommit |-> lastCommit[h]] + IN + LET lightHdr(h) == + [header |-> hdr(h), Commits |-> commit[h]] + IN + lb = [ h \in Heights |-> lightHdr(h) ] + +\* initialize the detector algorithm +Init == + \* initialize the blockchain to TARGET_HEIGHT + 1 + /\ BC!InitToHeight(FAULTY_RATIO) + /\ \E tm \in Int: + tm >= 0 /\ LC!IsLocalClockWithinDrift(tm, refClock) /\ localClock = tm + \* start with the secondary looking for evidence + /\ state = <<"Init", "SECONDARY">> /\ commonHeight = 0 /\ nextHeightToTry = 0 + /\ evidences = {} <: {ET} + \* Precompute a possible result of light client verification for the primary. + \* It is the input to the detection algorithm. + /\ \E Heights1 \in SUBSET(TRUSTED_HEIGHT..TARGET_HEIGHT): + /\ TRUSTED_HEIGHT \in Heights1 + /\ TARGET_HEIGHT \in Heights1 + /\ InitLightBlocks(fetchedLightBlocks1, Heights1) + \* As we have a non-deterministic scheduler, for every trace that has + \* an unverified block, there is a filtered trace that only has verified + \* blocks. This is a deep observation. + /\ LET status == [h \in Heights1 |-> "StateVerified"] IN + LC!VerifyToTargetPost(blockchain, IS_PRIMARY_CORRECT, + fetchedLightBlocks1, status, + TRUSTED_HEIGHT, TARGET_HEIGHT, "finishedSuccess") + \* initialize the other data structures to the default values + /\ LET trustedBlock == blockchain[TRUSTED_HEIGHT] + trustedLightBlock == [header |-> trustedBlock, Commits |-> AllNodes] + IN + /\ fetchedLightBlocks2 = [h \in {TRUSTED_HEIGHT} |-> trustedLightBlock] + /\ fetchedLightBlocks1b = [h \in {TRUSTED_HEIGHT} |-> trustedLightBlock] + + +(********************************* Transitions ******************************) + +\* a block should contain a copy of the block from the reference chain, +\* with a matching commit +CopyLightBlockFromChain(block, height) == + LET ref == blockchain[height] + lastCommit == + IF height < ULTIMATE_HEIGHT + THEN blockchain[height + 1].lastCommit + \* for the ultimate block, which we never use, + \* as ULTIMATE_HEIGHT = TARGET_HEIGHT + 1 + ELSE blockchain[height].VS + IN + block = [header |-> ref, Commits |-> lastCommit] + +\* Either the primary is correct and the block comes from the reference chain, +\* or the block is produced by a faulty primary. +\* +\* [LCV-FUNC-FETCH.1::TLA.1] +FetchLightBlockInto(isPeerCorrect, block, height) == + IF isPeerCorrect + THEN CopyLightBlockFromChain(block, height) + ELSE BC!IsLightBlockAllowedByDigitalSignatures(height, block) + + +(** + * Pick the next height, for which there is a block. + *) +PickNextHeight(fetchedBlocks, height) == + LET largerHeights == { h \in DOMAIN fetchedBlocks: h > height } IN + IF largerHeights = ({} <: {Int}) + THEN -1 + ELSE CHOOSE h \in largerHeights: + \A h2 \in largerHeights: h <= h2 + + +(** + * Check, whether the target header matches at the secondary and primary. + *) +CompareLast == + /\ state = <<"Init", "SECONDARY">> + \* fetch a block from the secondary: + \* non-deterministically pick a block that matches the constraints + /\ \E latest \in BC!LightBlocks: + \* for the moment, we ignore the possibility of a timeout when fetching a block + /\ FetchLightBlockInto(IS_SECONDARY_CORRECT, latest, TARGET_HEIGHT) + /\ IF latest.header = fetchedLightBlocks1[TARGET_HEIGHT].header + THEN \* if the headers match, CreateEvidence is not called + /\ state' = <<"NoEvidence", "SECONDARY">> + \* save the retrieved block for further analysis + /\ fetchedLightBlocks2' = + [h \in (DOMAIN fetchedLightBlocks2) \union {TARGET_HEIGHT} |-> + IF h = TARGET_HEIGHT THEN latest ELSE fetchedLightBlocks2[h]] + /\ UNCHANGED <> + ELSE \* prepare the parameters for CreateEvidence + /\ commonHeight' = TRUSTED_HEIGHT + /\ nextHeightToTry' = PickNextHeight(fetchedLightBlocks1, TRUSTED_HEIGHT) + /\ state' = IF nextHeightToTry' >= 0 + THEN <<"CreateEvidence", "SECONDARY">> + ELSE <<"FaultyPeer", "SECONDARY">> + /\ UNCHANGED fetchedLightBlocks2 + + /\ UNCHANGED <> + + +\* the actual loop in CreateEvidence +CreateEvidence(peer, isPeerCorrect, refBlocks, targetBlocks) == + /\ state = <<"CreateEvidence", peer>> + \* precompute a possible result of light client verification for the secondary + \* we have to introduce HeightRange, because Apalache can only handle a..b + \* for constant a and b + /\ LET HeightRange == { h \in TRUSTED_HEIGHT..TARGET_HEIGHT: + commonHeight <= h /\ h <= nextHeightToTry } IN + \E HeightsRange \in SUBSET(HeightRange): + /\ commonHeight \in HeightsRange /\ nextHeightToTry \in HeightsRange + /\ InitLightBlocks(targetBlocks, HeightsRange) + \* As we have a non-deterministic scheduler, for every trace that has + \* an unverified block, there is a filtered trace that only has verified + \* blocks. This is a deep observation. + /\ \E result \in {"finishedSuccess", "finishedFailure"}: + LET targetStatus == [h \in HeightsRange |-> "StateVerified"] IN + \* call VerifyToTarget for (commonHeight, nextHeightToTry). + /\ LC!VerifyToTargetPost(blockchain, isPeerCorrect, + targetBlocks, targetStatus, + commonHeight, nextHeightToTry, result) + \* case 1: the peer has failed (or the trusting period has expired) + /\ \/ /\ result /= "finishedSuccess" + /\ state' = <<"FaultyPeer", peer>> + /\ UNCHANGED <> + \* case 2: success + \/ /\ result = "finishedSuccess" + /\ LET block1 == refBlocks[nextHeightToTry] IN + LET block2 == targetBlocks[nextHeightToTry] IN + IF block1.header /= block2.header + THEN \* the target blocks do not match + /\ state' = <<"FoundEvidence", peer>> + /\ evidences' = evidences \union + {[peer |-> peer, + conflictingBlock |-> block1, + commonHeight |-> commonHeight]} + /\ UNCHANGED <> + ELSE \* the target blocks match + /\ nextHeightToTry' = PickNextHeight(refBlocks, nextHeightToTry) + /\ commonHeight' = nextHeightToTry + /\ state' = IF nextHeightToTry' >= 0 + THEN state + ELSE <<"NoEvidence", peer>> + /\ UNCHANGED evidences + +SwitchToPrimary == + /\ state = <<"FoundEvidence", "SECONDARY">> + /\ nextHeightToTry' = PickNextHeight(fetchedLightBlocks2, commonHeight) + /\ state' = <<"CreateEvidence", "PRIMARY">> + /\ UNCHANGED <> + + +CreateEvidenceForSecondary == + /\ CreateEvidence("SECONDARY", IS_SECONDARY_CORRECT, + fetchedLightBlocks1, fetchedLightBlocks2') + /\ UNCHANGED <> + +CreateEvidenceForPrimary == + /\ CreateEvidence("PRIMARY", IS_PRIMARY_CORRECT, + fetchedLightBlocks2, + fetchedLightBlocks1b') + /\ UNCHANGED <> + +(* + The local and global clocks can be updated. They can also drift from each other. + Note that the local clock can actually go backwards in time. + However, it still stays in the drift envelope + of [refClock - REAL_CLOCK_DRIFT, refClock + REAL_CLOCK_DRIFT]. + *) +AdvanceClocks == + /\ \E tm \in Int: + tm >= refClock /\ refClock' = tm + /\ \E tm \in Int: + /\ tm >= localClock + /\ LC!IsLocalClockWithinDrift(tm, refClock') + /\ localClock' = tm + +(** + Execute AttackDetector for one secondary. + + [LCD-FUNC-DETECTOR.2::LOOP.1] + *) +Next == + /\ AdvanceClocks + /\ \/ CompareLast + \/ CreateEvidenceForSecondary + \/ SwitchToPrimary + \/ CreateEvidenceForPrimary + + +\* simple invariants to see the progress of the detector +NeverNoEvidence == state[1] /= "NoEvidence" +NeverFoundEvidence == state[1] /= "FoundEvidence" +NeverFaultyPeer == state[1] /= "FaultyPeer" +NeverCreateEvidence == state[1] /= "CreateEvidence" + +NeverFoundEvidencePrimary == state /= <<"FoundEvidence", "PRIMARY">> + +NeverReachTargetHeight == nextHeightToTry < TARGET_HEIGHT + +EvidenceWhenFaultyInv == + (state[1] = "FoundEvidence") => (~IS_PRIMARY_CORRECT \/ ~IS_SECONDARY_CORRECT) + +NoEvidenceForCorrectInv == + IS_PRIMARY_CORRECT /\ IS_SECONDARY_CORRECT => evidences = {} <: {ET} + +(** + * If we find an evidence by peer A, peer B has ineded given us a corrupted + * header following the common height. Also, we have a verification trace by peer A. + *) +CommonHeightOnEvidenceInv == + \A e \in evidences: + LET conflicting == e.conflictingBlock IN + LET conflictingHeader == conflicting.header IN + \* the evidence by suspectingPeer can be verified by suspectingPeer in one step + LET SoundEvidence(suspectingPeer, peerBlocks) == + \/ e.peer /= suspectingPeer + \* the conflicting block from another peer verifies against the common height + \/ /\ "SUCCESS" = + LC!ValidAndVerifiedUntimed(peerBlocks[e.commonHeight], conflicting) + \* and the headers of the same height by the two peers do not match + /\ peerBlocks[conflictingHeader.height].header /= conflictingHeader + IN + /\ SoundEvidence("PRIMARY", fetchedLightBlocks1b) + /\ SoundEvidence("SECONDARY", fetchedLightBlocks2) + +(** + * If the light client does not find an evidence, + * then there is no attack on the light client. + *) +AccuracyInv == + (LC!InTrustingPeriodLocal(fetchedLightBlocks1[TARGET_HEIGHT].header) + /\ state = <<"NoEvidence", "SECONDARY">>) + => + (fetchedLightBlocks1[TARGET_HEIGHT].header = blockchain[TARGET_HEIGHT] + /\ fetchedLightBlocks2[TARGET_HEIGHT].header = blockchain[TARGET_HEIGHT]) + +(** + * The primary reports a corrupted block at the target height. If the secondary is + * correct and the algorithm has terminated, we should get the evidence. + * This property is violated due to clock drift. VerifyToTarget may fail with + * the correct secondary within the trusting period (due to clock drift, locally + * we think that we are outside of the trusting period). + *) +PrecisionInvGrayZone == + (/\ fetchedLightBlocks1[TARGET_HEIGHT].header /= blockchain[TARGET_HEIGHT] + /\ BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + /\ IS_SECONDARY_CORRECT + /\ IsTerminated) + => + evidences /= {} <: {ET} + +(** + * The primary reports a corrupted block at the target height. If the secondary is + * correct and the algorithm has terminated, we should get the evidence. + * This invariant does not fail, as we are using the local clock to check the trusting + * period. + *) +PrecisionInvLocal == + (/\ fetchedLightBlocks1[TARGET_HEIGHT].header /= blockchain[TARGET_HEIGHT] + /\ LC!InTrustingPeriodLocalSurely(blockchain[TRUSTED_HEIGHT]) + /\ IS_SECONDARY_CORRECT + /\ IsTerminated) + => + evidences /= {} <: {ET} + +==================================================================================== diff --git a/spec/light-client/detection/LCVerificationApi_003_draft.tla b/spec/light-client/detection/LCVerificationApi_003_draft.tla new file mode 100644 index 0000000000..909eab92b8 --- /dev/null +++ b/spec/light-client/detection/LCVerificationApi_003_draft.tla @@ -0,0 +1,192 @@ +-------------------- MODULE LCVerificationApi_003_draft -------------------------- +(** + * The common interface of the light client verification and detection. + *) +EXTENDS Integers, FiniteSets + +\* the parameters of Light Client +CONSTANTS + TRUSTING_PERIOD, + (* the period within which the validators are trusted *) + CLOCK_DRIFT, + (* the assumed precision of the clock *) + REAL_CLOCK_DRIFT, + (* the actual clock drift, which under normal circumstances should not + be larger than CLOCK_DRIFT (otherwise, there will be a bug) *) + FAULTY_RATIO + (* a pair <> that limits that ratio of faulty validator in the blockchain + from above (exclusive). Tendermint security model prescribes 1 / 3. *) + +VARIABLES + localClock (* current time as measured by the light client *) + +(* the header is still within the trusting period *) +InTrustingPeriodLocal(header) == + \* note that the assumption about the drift reduces the period of trust + localClock < header.time + TRUSTING_PERIOD - CLOCK_DRIFT + +(* the header is still within the trusting period, even if the clock can go backwards *) +InTrustingPeriodLocalSurely(header) == + \* note that the assumption about the drift reduces the period of trust + localClock < header.time + TRUSTING_PERIOD - 2 * CLOCK_DRIFT + +(* ensure that the local clock does not drift far away from the global clock *) +IsLocalClockWithinDrift(local, global) == + /\ global - REAL_CLOCK_DRIFT <= local + /\ local <= global + REAL_CLOCK_DRIFT + +(** + * Check that the commits in an untrusted block form 1/3 of the next validators + * in a trusted header. + *) +SignedByOneThirdOfTrusted(trusted, untrusted) == + LET TP == Cardinality(trusted.header.NextVS) + SP == Cardinality(untrusted.Commits \intersect trusted.header.NextVS) + IN + 3 * SP > TP + +(** + The first part of the precondition of ValidAndVerified, which does not take + the current time into account. + + [LCV-FUNC-VALID.1::TLA-PRE-UNTIMED.1] + *) +ValidAndVerifiedPreUntimed(trusted, untrusted) == + LET thdr == trusted.header + uhdr == untrusted.header + IN + /\ thdr.height < uhdr.height + \* the trusted block has been created earlier + /\ thdr.time < uhdr.time + /\ untrusted.Commits \subseteq uhdr.VS + /\ LET TP == Cardinality(uhdr.VS) + SP == Cardinality(untrusted.Commits) + IN + 3 * SP > 2 * TP + /\ thdr.height + 1 = uhdr.height => thdr.NextVS = uhdr.VS + (* As we do not have explicit hashes we ignore these three checks of the English spec: + + 1. "trusted.Commit is a commit is for the header trusted.Header, + i.e. it contains the correct hash of the header". + 2. untrusted.Validators = hash(untrusted.Header.Validators) + 3. untrusted.NextValidators = hash(untrusted.Header.NextValidators) + *) + +(** + Check the precondition of ValidAndVerified, including the time checks. + + [LCV-FUNC-VALID.1::TLA-PRE.1] + *) +ValidAndVerifiedPre(trusted, untrusted, checkFuture) == + LET thdr == trusted.header + uhdr == untrusted.header + IN + /\ InTrustingPeriodLocal(thdr) + \* The untrusted block is not from the future (modulo clock drift). + \* Do the check, if it is required. + /\ checkFuture => uhdr.time < localClock + CLOCK_DRIFT + /\ ValidAndVerifiedPreUntimed(trusted, untrusted) + + +(** + Check, whether an untrusted block is valid and verifiable w.r.t. a trusted header. + This test does take current time into account, but only looks at the block structure. + + [LCV-FUNC-VALID.1::TLA-UNTIMED.1] + *) +ValidAndVerifiedUntimed(trusted, untrusted) == + IF ~ValidAndVerifiedPreUntimed(trusted, untrusted) + THEN "INVALID" + ELSE IF untrusted.header.height = trusted.header.height + 1 + \/ SignedByOneThirdOfTrusted(trusted, untrusted) + THEN "SUCCESS" + ELSE "NOT_ENOUGH_TRUST" + +(** + Check, whether an untrusted block is valid and verifiable w.r.t. a trusted header. + + [LCV-FUNC-VALID.1::TLA.1] + *) +ValidAndVerified(trusted, untrusted, checkFuture) == + IF ~ValidAndVerifiedPre(trusted, untrusted, checkFuture) + THEN "INVALID" + ELSE IF ~InTrustingPeriodLocal(untrusted.header) + (* We leave the following test for the documentation purposes. + The implementation should do this test, as signature verification may be slow. + In the TLA+ specification, ValidAndVerified happens in no time. + *) + THEN "FAILED_TRUSTING_PERIOD" + ELSE IF untrusted.header.height = trusted.header.height + 1 + \/ SignedByOneThirdOfTrusted(trusted, untrusted) + THEN "SUCCESS" + ELSE "NOT_ENOUGH_TRUST" + + +(** + The invariant of the light store that is not related to the blockchain + *) +LightStoreInv(fetchedLightBlocks, lightBlockStatus) == + \A lh, rh \in DOMAIN fetchedLightBlocks: + \* for every pair of stored headers that have been verified + \/ lh >= rh + \/ lightBlockStatus[lh] /= "StateVerified" + \/ lightBlockStatus[rh] /= "StateVerified" + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh /\ lightBlockStatus[mh] = "StateVerified" + \* or the left header is outside the trusting period, so no guarantees + \/ LET lhdr == fetchedLightBlocks[lh] + rhdr == fetchedLightBlocks[rh] + IN + \* we can verify the right one using the left one + "SUCCESS" = ValidAndVerifiedUntimed(lhdr, rhdr) + +(** + Correctness states that all the obtained headers are exactly like in the blockchain. + + It is always the case that every verified header in LightStore was generated by + an instance of Tendermint consensus. + + [LCV-DIST-SAFE.1::CORRECTNESS-INV.1] + *) +CorrectnessInv(blockchain, fetchedLightBlocks, lightBlockStatus) == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" => + fetchedLightBlocks[h].header = blockchain[h] + +(** + * When the light client terminates, there are no failed blocks. + * (Otherwise, someone lied to us.) + *) +NoFailedBlocksOnSuccessInv(fetchedLightBlocks, lightBlockStatus) == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] /= "StateFailed" + +(** + The expected post-condition of VerifyToTarget. + *) +VerifyToTargetPost(blockchain, isPeerCorrect, + fetchedLightBlocks, lightBlockStatus, + trustedHeight, targetHeight, finalState) == + LET trustedHeader == fetchedLightBlocks[trustedHeight].header IN + \* The light client is not lying us on the trusted block. + \* It is straightforward to detect. + /\ lightBlockStatus[trustedHeight] = "StateVerified" + /\ trustedHeight \in DOMAIN fetchedLightBlocks + /\ trustedHeader = blockchain[trustedHeight] + \* the invariants we have found in the light client verification + \* there is a problem with trusting period + /\ isPeerCorrect + => CorrectnessInv(blockchain, fetchedLightBlocks, lightBlockStatus) + \* a correct peer should fail the light client, + \* if the trusted block is in the trusting period + /\ isPeerCorrect /\ InTrustingPeriodLocalSurely(trustedHeader) + => finalState = "finishedSuccess" + /\ finalState = "finishedSuccess" => + /\ lightBlockStatus[targetHeight] = "StateVerified" + /\ targetHeight \in DOMAIN fetchedLightBlocks + /\ NoFailedBlocksOnSuccessInv(fetchedLightBlocks, lightBlockStatus) + /\ LightStoreInv(fetchedLightBlocks, lightBlockStatus) + + +================================================================================== diff --git a/spec/light-client/detection/README.md b/spec/light-client/detection/README.md new file mode 100644 index 0000000000..f9c0820c0c --- /dev/null +++ b/spec/light-client/detection/README.md @@ -0,0 +1,75 @@ +--- +order: 1 +parent: + title: Fork Detection + order: 2 +--- + +# Tendermint fork detection and IBC fork detection + +## Status + +This is a work in progress. +This directory captures the ongoing work and discussion on fork +detection both in the context of a Tendermint light node and in the +context of IBC. It contains the following files + +### detection.md + +a draft of the light node fork detection including "proof of fork" + definition, that is, the data structure to submit evidence to full + nodes. + +### [discussions.md](./discussions.md) + +A collection of ideas and intuitions from recent discussions + +- the outcome of recent discussion +- a sketch of the light client supervisor to provide the context in + which fork detection happens +- a discussion about lightstore semantics + +### [req-ibc-detection.md](./req-ibc-detection.md) + +- a collection of requirements for fork detection in the IBC + context. In particular it contains a section "Required Changes in + ICS 007" with necessary updates to ICS 007 to support Tendermint + fork detection + +### [draft-functions.md](./draft-functions.md) + +In order to address the collected requirements, we started to sketch +some functions that we will need in the future when we specify in more +detail the + +- fork detections +- proof of fork generation +- proof of fork verification + +on the following components. + +- IBC on-chain components +- Relayer + +### TODOs + +We decided to merge the files while there are still open points to +address to record the current state an move forward. In particular, +the following points need to be addressed: + +- + +- + +- + +- + +Most likely we will write a specification on the light client +supervisor along the outcomes of + +- + +that also addresses initialization + +- diff --git a/spec/light-client/detection/detection_001_reviewed.md b/spec/light-client/detection/detection_001_reviewed.md new file mode 100644 index 0000000000..cebf9aebbb --- /dev/null +++ b/spec/light-client/detection/detection_001_reviewed.md @@ -0,0 +1,790 @@ + + +# ***This an unfinished draft. Comments are welcome!*** + +**TODO:** We will need to do small adaptations to the verification +spec to reflect the semantics in the LightStore (verified, trusted, +untrusted, etc. not needed anymore). In more detail: + +- The state of the Lightstore needs to go. Functions like `LatestVerified` can +keep the name but will ignore state as it will not exist anymore. + +- verification spec should be adapted to the second parameter of +`VerifyToTarget` +being a lightblock; new version number of function tag; + +- We should clarify what is the expectation of VerifyToTarget +so if it returns TimeoutError it can be assumed faulty. I guess that +VerifyToTarget with correct full node should never terminate with +TimeoutError. + +- We need to introduce a new version number for the new +specification. So we should decide how + to handle that. + +# Light Client Attack Detector + +In this specification, we strengthen the light client to be resistant +against so-called light client attacks. In a light client attack, all +the correct Tendermint full nodes agree on the sequence of generated +blocks (no fork), but a set of faulty full nodes attack a light client +by generating (signing) a block that deviates from the block of the +same height on the blockchain. In order to do so, some of these faulty +full nodes must have been validators before and violate +[[TMBC-FM-2THIRDS]](TMBC-FM-2THIRDS-link), as otherwise, if +[[TMBC-FM-2THIRDS]](TMBC-FM-2THIRDS-link) would hold, +[verification](verification) would satisfy +[[LCV-SEQ-SAFE.1]](LCV-SEQ-SAFE-link). + +An attack detector (or detector for short) is a mechanism that is used +by the light client [supervisor](supervisor) after +[verification](verification) of a new light block +with the primary, to cross-check the newly learned light block with +other peers (secondaries). It expects as input a light block with some +height *root* (that serves as a root of trust), and a verification +trace (a sequence of lightblocks) that the primary provided. + +In case the detector observes a light client attack, it computes +evidence data that can be used by Tendermint full nodes to isolate a +set of faulty full nodes that are still within the unbonding period +(more than 1/3 of the voting power of the validator set at some block of the chain), +and report them via ABCI to the application of a Tendermint blockchain +in order to punish faulty nodes. + +## Context of this document + +The light client [verification](verification) specification is +designed for the Tendermint failure model (1/3 assumption) +[[TMBC-FM-2THIRDS]](TMBC-FM-2THIRDS-link). It is safe under this +assumption, and live if it can reliably (that is, no message loss, no +duplication, and eventually delivered) and timely communicate with a +correct full node. If [[TMBC-FM-2THIRDS]](TMBC-FM-2THIRDS-link) assumption is violated, the light client +can be fooled to trust a light block that was not generated by +Tendermint consensus. + +This specification, the attack detector, is a "second line of +defense", in case the 1/3 assumption is violated. Its goal is to +detect a light client attack (conflicting light blocks) and collect +evidence. However, it is impractical to probe all full nodes. At this +time we consider a simple scheme of maintaining an address book of +known full nodes from which a small subset (e.g., 4) are chosen +initially to communicate with. More involved book keeping with +probabilistic guarantees can be considered at later stages of the +project. + +The light client maintains a simple address book containing addresses +of full nodes that it can pick as primary and secondaries. To obtain +a new light block, the light client first does +[verification](verification) with the primary, and then cross-checks +the light block (and the trace of light blocks that led to it) with +the secondaries using this specification. + +## Tendermint Consensus and Light Client Attacks + +In this section we will give some mathematical definitions of what we +mean by light client attacks (that are considered in this +specification) and how they differ from main-chain forks. To this end +we start by defining some properties of the sequence of blocks that is +decided upon by Tendermint consensus in normal operation (if the +Tendermint failure model holds +[[TMBC-FM-2THIRDS]](TMBC-FM-2THIRDS-link)), +and then define different +deviations that correspond to attack scenarios. + +#### **[TMBC-GENESIS.1]** + +Let *Genesis* be the agreed-upon initial block (file). + +#### **[TMBC-FUNC-SIGN.1]** + +Let *b* and *c* be two light blocks with *b.Header.Height + 1 = +c.Header.Height*. We define the predicate **signs(b,c)** to hold +iff *c.Header.LastCommit* is in *PossibleCommit(b)*. +[[TMBC-SOUND-DISTR-POSS-COMMIT.1]](TMBC-SOUND-DISTR-POSS-COMMIT-link). + +> The above encodes sequential verification, that is, intuitively, +> b.Header.NextValidators = c.Header.Validators and 2/3 of +> these Validators signed c? + +#### **[TMBC-FUNC-SUPPORT.1]** + +Let *b* and *c* be two light blocks. We define the predicate +**supports(b,c,t)** to hold iff + +- *t - trustingPeriod < b.Header.Time < t* +- the voting power in *b.NextValidators* of nodes in *c.Commit* + is more than 1/3 of *TotalVotingPower(b.Header.NextValidators)* + +> That is, if the [Tendermint failure model](TMBC-FM-2THIRDS-link) +> holds, then *c* has been signed by at least one correct full node, cf. +> [[TMBC-VAL-CONTAINS-CORR.1]](TMBC-VAL-CONTAINS-CORR-link). +> The following formalizes that *b* was properly generated by +> Tendermint; *b* can be traced back to genesis + +#### **[TMBC-SEQ-ROOTED.1]** + +Let *b* be a light block. +We define *sequ-rooted(b)* iff for all *i*, *1 <= i < h = b.Header.Height*, +there exist light blocks *a(i)* s.t. + +- *a(1) = Genesis* and +- *a(h) = b* and +- *signs( a(i) , a(i+1) )*. + +> The following formalizes that *c* is trusted based on *b* in +> skipping verification. Observe that we do not require here (yet) +> that *b* was properly generated. + +#### **[TMBC-SKIP-TRACE.1]** + +Let *b* and *c* be light blocks. We define *skip-trace(b,c,t)* if at +time t there exists an *h* and a sequence *a(1)*, ... *a(h)* s.t. + +- *a(1) = b* and +- *a(h) = c* and +- *supports( a(i), a(i+1), t)*, for all i, *1 <= i < h*. + +We call such a sequence *a(1)*, ... *a(h)* a **verification trace**. + +> The following formalizes that two light blocks of the same height +> should agree on the content of the header. Observe that *b* and *c* +> may disagree on the Commit. This is a special case if the canonical +> commit has not been decided on, that is, if b.Header.Height is the +> maximum height of all blocks decided upon by Tendermint at this +> moment. + +#### **[TMBC-SIGN-SKIP-MATCH.1]** + +Let *a*, *b*, *c*, be light blocks and *t* a time, we define +*sign-skip-match(a,b,c,t) = true* iff the following implication +evaluates to true: + +- *sequ-rooted(a)* and +- *b.Header.Height = c.Header.Height* and +- *skip-trace(a,b,t)* +- *skip-trace(a,c,t)* + +implies *b.Header = c.Header*. + +> Observe that *sign-skip-match* is defined via an implication. If it +> evaluates to false this means that the left-hand-side of the +> implication evaluates to true, and the right-hand-side evaluates to +> false. In particular, there are two **different** headers *b* and +> *c* that both can be verified from a common block *a* from the +> chain. Thus, the following describes an attack. + +#### **[TMBC-ATTACK.1]** + +If there exists three light blocks a, b, and c, with +*sign-skip-match(a,b,c,t) = false* then we have an *attack*. We say +we have **an attack at height** *b.Header.Height* and write +*attack(a,b,c,t)*. + +> The lightblock *a* need not be unique, that is, there may be +> several blocks that satisfy the above requirement for the same +> blocks *b* and *c*. + +[[TMBC-ATTACK.1]](#TMBC-ATTACK1) is a formalization of the violation +of the agreement property based on the result of consensus, that is, +the generated blocks. + +**Remark.** +Violation of agreement is only possible if more than 1/3 of the validators (or +next validators) of some previous block deviated from the protocol. The +upcoming "accountability" specification will describe how to compute +a set of at least 1/3 faulty nodes from two conflicting blocks. [] + +There are different ways to characterize forks +and attack scenarios. This specification uses the "node-based +characterization of attacks" which focuses on what kinds of nodes are +affected (light nodes vs. full nodes). For future reference and +discussion we also provide a +"block-based characterization of attacks" below. + +### Node-based characterization of attacks + +#### **[TMBC-MC-FORK.1]** + +We say there is a (main chain) fork at time *t* if + +- there are two correct full nodes *i* and *j* and +- *i* is different from *j* and +- *i* has decided on *b* and +- *j* has decided on *c* and +- there exist *a* such that *attack(a,b,c,t)*. + +#### **[TMBC-LC-ATTACK.1]** + +We say there is a light client attack at time *t*, if + +- there is **no** (main chain) fork [[TMBC-MC-FORK.1]](#TMBC-MC-FORK1), and +- there exist nodes that have computed light blocks *b* and *c* and +- there exist *a* such that *attack(a,b,c,t)*. + +We say the attack is at height *a.Header.Height*. + +> In this specification we consider detection of light client +> attacks. Intuitively, the case we consider is that +> light block *b* is the one from the +> blockchain, and some attacker has computed *c* and tries to wrongly +> convince +> the light client that *c* is the block from the chain. + +#### **[TMBC-LC-ATTACK-EVIDENCE.1]** + +We consider the following case of a light client attack +[[TMBC-LC-ATTACK.1]](#TMBC-LC-ATTACK1): + +- *attack(a,b,c,t)* +- there is a peer p1 that has a sequence *chain* of blocks from *a* to *b* +- *skip-trace(a,c,t)*: by [[TMBC-SKIP-TRACE.1]](#TMBC-SKIP-TRACE1) there is a + verification trace *v* of the form *a = v(1)*, ... *v(h) = c* + +Evidence for p1 (that proves an attack) consists for index i +of v(i) and v(i+1) such that + +- E1(i). v(i) is equal to the block of *chain* at height v(i).Height, and +- E2(i). v(i+1) that is different from the block of *chain* at + height v(i+1).height + +> Observe p1 can +> +> - check that v(i+1) differs from its block at that height, and +> - verify v(i+1) in one step from v(i) as v is a verification trace. + +**Proposition.** In the case of attack, evidence exists. +*Proof.* First observe that + +- (A). (NOT E2(i)) implies E1(i+1) + +Now by contradiction assume there is no evidence. Thus + +- for all i, we have NOT E1(i) or NOT E2(i) +- for i = 1 we have E1(1) and thus NOT E2(1) + thus by induction on i, by (A) we have for all i that **E1(i)** +- from attack we have E2(h-1), and as there is no evidence for + i = h - 1 we get **NOT E1(h-1)**. Contradiction. +QED. + +#### **[TMBC-LC-EVIDENCE-DATA.1]** + +To prove the attack to p1, because of Point E1, it is sufficient to +submit + +- v(i).Height (rather than v(i)). +- v(i+1) + +This information is *evidence for height v(i).Height*. + +### Block-based characterization of attacks + +In this section we provide a different characterization of attacks. It +is not defined on the nodes that are affected but purely on the +content of the blocks. In that sense these definitions are less +operational. + +> They might be relevant for a closer analysis of fork scenarios on the +> chain, which is out of the scope of this specification. + +#### **[TMBC-SIGN-UNIQUE.1]** + +Let *b* and *c* be light blocks, we define the predicate +*sign-unique(b,c)* to evaluate to true iff the following implication +evaluates to true: + +- *b.Header.Height = c.Header.Height* and +- *sequ-rooted(b)* and +- *sequ-rooted(c)* + +implies *b = c*. + +#### **[TMBC-BLOCKS-MCFORK.1]** + +If there exists two light blocks b and c, with *sign-unique(b,c) = +false* then we have a *fork*. + +> The difference of the above definition to +> [[TMBC-MC-FORK.1]](#TMBC-MC-FORK1) is subtle. The latter requires a +> full node being affected by a bad block while +> [[TMBC-BLOCKS-MCFORK.1]](#TMBC-BLOCKS-MCFORK1) just requires that a +> bad block exists, possibly in memory of an attacker. +> The following captures a light client fork. There is no fork up to +> the height of block b. However, c is of that height, is different, +> and passes skipping verification. It is a stricter property than +> [[TMBC-LC-ATTACK.1]](#TMBC-LC-ATTACK1), as +> [[TMBC-LC-ATTACK.1]](#TMBC-LC-ATTACK1) requires that no correct full +> node is affected. + +#### **[TMBC-BLOCKS-LCFORK.1]** + +Let *a*, *b*, *c*, be light blocks and *t* a time. We define +*light-client-fork(a,b,c,t)* iff + +- *sign-skip-match(a,b,c,t) = false* and +- *sequ-rooted(b)* and +- *b* is "unique", that is, for all *d*, *sequ-rooted(d)* and + *d.Header.Height = b.Header.Height* implies *d = b* + +> Finally, let us also define bogus blocks that have no support. +> Observe that bogus is even defined if there is a fork. +> Also, for the definition it would be sufficient to restrict *a* to +> *a.height < b.height* (which is implied by the definitions which +> unfold until *supports()*). + +#### **[TMBC-BOGUS.1]** + +Let *b* be a light block and *t* a time. We define *bogus(b,t)* iff + +- *sequ-rooted(b) = false* and +- for all *a*, *sequ-rooted(a)* implies *skip-trace(a,b,t) = false* + +### Informal Problem statement + +There is no sequential specification: the detector only makes sense +in a distributed systems where some nodes misbehave. + +We work under the assumption that full nodes and validators are +responsible for detecting attacks on the main chain, and the evidence +reactor takes care of broadcasting evidence to communicate +misbehaving nodes via ABCI to the application, and halt the chain in +case of a fork. The point of this specification is to shield a light +clients against attacks that cannot be detected by full nodes, and +are fully addressed at light clients (and consequently IBC relayers, +which use the light client protocols to observe the state of a +blockchain). In order to provide full nodes the incentive to follow +the protocols when communicating with the light client, this +specification also considers the generation of evidence that will +also be processed by the Tendermint blockchain. + +#### **[LCD-IP-MODEL.1]** + +The detector is designed under the assumption that + +- [[TMBC-FM-2THIRDS]](TMBC-FM-2THIRDS-link) may be violated +- there is no fork on the main chain. + +> As a result some faulty full nodes may launch an attack on a light +> client. + +The following requirements are operational in that they describe how +things should be done, rather than what should be done. However, they +do not constitute temporal logic verification conditions. For those, +see [LCD-DIST-*] below. + +The detector is called in the [supervisor](supervisor) as follows + +```go +Evidences := AttackDetector(root_of_trust, verifiedLS);` +``` + +where + +- `root-of-trust` is a light block that is trusted (that is, +except upon initialization, the primary and the secondaries +agreed on in the past), and +- `verifiedLS` is a lightstore that contains a verification trace that + starts from a lightblock that can be verified with the + `root-of-trust` in one step and ends with a lightblock of the height + requested by the user +- `Evidences` is a list of evidences for misbehavior + +#### **[LCD-IP-STATEMENT.1]** + +Whenever AttackDetector is called, the detector should for each +secondary try to replay the verification trace `verifiedLS` with the +secondary + +- in case replaying leads to detection of a light client attack + (one of the lightblocks differ from the one in verifiedLS with + the same height), we should return evidence +- if the secondary cannot provide a verification trace, we have no + proof for an attack. Block *b* may be bogus. In this case the + secondary is faulty and it should be replaced. + +## Assumptions/Incentives/Environment + +It is not in the interest of faulty full nodes to talk to the +detector as long as the detector is connected to at least one +correct full node. This would only increase the likelihood of +misbehavior being detected. Also we cannot punish them easily +(cheaply). The absence of a response need not be the fault of the full +node. + +Correct full nodes have the incentive to respond, because the +detector may help them to understand whether their header is a good +one. We can thus base liveness arguments of the detector on +the assumptions that correct full nodes reliably talk to the +detector. + +### Assumptions + +#### **[LCD-A-CorrFull.1]** + +At all times there is at least one correct full +node among the primary and the secondaries. + +> For this version of the detection we take this assumption. It +> allows us to establish the invariant that the lightblock +> `root-of-trust` is always the one from the blockchain, and we can +> use it as starting point for the evidence computation. Moreover, it +> allows us to establish the invariant at the supervisor that any +> lightblock in the (top-level) lightstore is from the blockchain. +> In the future we might design a lightclient based on the assumption +> that at least in regular intervals the lightclient is connected to a +> correct full node. This will require the detector to reconsider +> `root-of-trust`, and remove lightblocks from the top-level +> lightstore. + +#### **[LCD-A-RelComm.1]** + +Communication between the detector and a correct full node is +reliable and bounded in time. Reliable communication means that +messages are not lost, not duplicated, and eventually delivered. There +is a (known) end-to-end delay *Delta*, such that if a message is sent +at time *t* then it is received and processed by time *t + Delta*. +This implies that we need a timeout of at least *2 Delta* for remote +procedure calls to ensure that the response of a correct peer arrives +before the timeout expires. + +## Definitions + +### Evidence + +Following the definition of +[[TMBC-LC-ATTACK-EVIDENCE.1]](#TMBC-LC-ATTACK-EVIDENCE1), by evidence +we refer to a variable of the following type + +#### **[LC-DATA-EVIDENCE.1]** + +```go +type LightClientAttackEvidence struct { + ConflictingBlock LightBlock + CommonHeight int64 +} +``` + +As the above data is computed for a specific peer, the following +data structure wraps the evidence and adds the peerID. + +#### **[LC-DATA-EVIDENCE-INT.1]** + +```go +type InternalEvidence struct { + Evidence LightClientAttackEvidence + Peer PeerID +} +``` + +#### **[LC-SUMBIT-EVIDENCE.1]** + +```go +func submitEvidence(Evidences []InternalEvidence) +``` + +- Expected postcondition + - for each `ev` in `Evidences`: submit `ev.Evidence` to `ev.Peer` + +--- + +### LightStore + +Lightblocks and LightStores are defined in the verification +specification [LCV-DATA-LIGHTBLOCK.1] and [LCV-DATA-LIGHTSTORE.1]. See +the [verification specification][verification] for details. + +## (Distributed) Problem statement + +> As the attack detector is there to reduce the impact of faulty +> nodes, and faulty nodes imply that there is a distributed system, +> there is no sequential specification to which this distributed +> problem statement may refer to. + +The detector gets as input a trusted lightblock called *root* and an +auxiliary lightstore called *primary_trace* with lightblocks that have +been verified before, and that were provided by the primary. + +#### **[LCD-DIST-INV-ATTACK.1]** + +If the detector returns evidence for height *h* +[[TMBC-LC-EVIDENCE-DATA.1]](#TMBC-LC-EVIDENCE-DATA1), then there is an +attack at height *h*. [[TMBC-LC-ATTACK.1]](#TMBC-LC-ATTACK1) + +#### **[LCD-DIST-INV-STORE.1]** + +If the detector does not return evidence, then *primary_trace* +contains only blocks from the blockchain. + +#### **[LCD-DIST-LIVE.1]** + +The detector eventually terminates. + +#### **[LCD-DIST-TERM-NORMAL.1]** + +If + +- the *primary_trace* contains only blocks from the blockchain, and +- there is no attack, and +- *Secondaries* is always non-empty, and +- the age of *root* is always less than the trusting period, + +then the detector does not return evidence. + +#### **[LCD-DIST-TERM-ATTACK.1]** + +If + +- there is an attack, and +- a secondary reports a block that conflicts + with one of the blocks in *primary_trace*, and +- *Secondaries* is always non-empty, and +- the age of *root* is always less than the trusting period, + +then the detector returns evidence. + +> Observe that above we require that "a secondary reports a block that +> conflicts". If there is an attack, but no secondary tries to launch +> it against the detector (or the message from the secondary is lost +> by the network), then there is nothing to detect for us. + +#### **[LCD-DIST-SAFE-SECONDARY.1]** + +No correct secondary is ever replaced. + +#### **[LCD-DIST-SAFE-BOGUS.1]** + +If + +- a secondary reports a bogus lightblock, +- the age of *root* is always less than the trusting period, + +then the secondary is replaced before the detector terminates. + +> The above property is quite operational ("reports"), but it captures +> quite closely the requirement. As the +> detector only makes sense in a distributed setting, and does +> not have a sequential specification, less "pure" +> specification are acceptable. + +# Protocol + +## Functions and Data defined in other Specifications + +### From the supervisor + +```go +Replace_Secondary(addr Address, root-of-trust LightBlock) +``` + +### From the verifier + +```go +func VerifyToTarget(primary PeerID, root LightBlock, + targetHeight Height) (LightStore, Result) +``` + +> Note: the above differs from the current version in the second +> parameter. verification will be revised. + +Observe that `VerifyToTarget` does communication with the secondaries +via the function [FetchLightBlock](fetch). + +### Shared data of the light client + +- a pool of full nodes *FullNodes* that have not been contacted before +- peer set called *Secondaries* +- primary + +> Note that the lightStore is not needed to be shared. + +## Outline + +The problem laid out is solved by calling the function `AttackDetector` +with a lightstore that contains a light block that has just been +verified by the verifier. + +Then `AttackDetector` downloads headers from the secondaries. In case +a conflicting header is downloaded from a secondary, +`CreateEvidenceForPeer` which computes evidence in the case that +indeed an attack is confirmed. It could be that the secondary reports +a bogus block, which means that there need not be an attack, and the +secondary is replaced. + +## Details of the functions + +#### **[LCD-FUNC-DETECTOR.1]:** + +```go +func AttackDetector(root LightBlock, primary_trace []LightBlock) + ([]InternalEvidence) { + + Evidences := new []InternalEvidence; + + for each secondary in Secondaries { + // we replay the primary trace with the secondary, in + // order to generate evidence that we can submit to the + // secodary. We return the evidence + the trace the + // secondary told us that spans the evidence at its local store + + EvidenceForSecondary, newroot, secondary_trace, result := + CreateEvidenceForPeer(secondary, + root, + primary_trace); + if result == FaultyPeer { + Replace_Secondary(root); + } + else if result == FoundEvidence { + // the conflict is not bogus + Evidences.Add(EvidenceForSecondary); + // we replay the secondary trace with the primary, ... + EvidenceForPrimary, _, result := + CreateEvidenceForPeer(primary, + newroot, + secondary_trace); + if result == FoundEvidence { + Evidences.Add(EvidenceForPrimary); + } + // At this point we do not care about the other error + // codes. We already have generated evidence for an + // attack and need to stop the lightclient. It does not + // help to call replace_primary. Also we will use the + // same primary to check with other secondaries in + // later iterations of the loop + } + // In the case where the secondary reports NoEvidence + // we do nothing + } + return Evidences; +} +``` + +- Expected precondition + - root and primary trace are a verification trace +- Expected postcondition + - solves the problem statement (if attack found, then evidence is reported) +- Error condition + - `ErrorTrustExpired`: fails if root expires (outside trusting + period) [[LCV-INV-TP.1]](LCV-INV-TP1-link) + - `ErrorNoPeers`: if no peers are left to replace secondaries, and + no evidence was found before that happened + +--- + +```go +func CreateEvidenceForPeer(peer PeerID, root LightBlock, trace LightStore) + (Evidence, LightBlock, LightStore, result) { + + common := root; + + for i in 1 .. len(trace) { + auxLS, result := VerifyToTarget(peer, common, trace[i].Header.Height) + + if result != ResultSuccess { + // something went wrong; peer did not provide a verifyable block + return (nil, nil, nil, FaultyPeer) + } + else { + if auxLS.LatestVerified().Header != trace[i].Header { + // the header reported by the peer differs from the + // reference header in trace but both could be + // verified from common in one step. + // we can create evidence for submission to the secondary + ev := new InternalEvidence; + ev.Evidence.ConflictingBlock := trace[i]; + ev.Evidence.CommonHeight := common.Height; + ev.Peer := peer + return (ev, common, auxLS, FoundEvidence) + } + else { + // the peer agrees with the trace, we move common forward + // we could delete auxLS as it will be overwritten in + // the next iteration + common := trace[i] + } + } + } + return (nil, nil, nil, NoEvidence) +} +``` + +- Expected precondition + - root and trace are a verification trace +- Expected postcondition + - finds evidence where trace and peer diverge +- Error condition + - `ErrorTrustExpired`: fails if root expires (outside trusting + period) [[LCV-INV-TP.1]](LCV-INV-TP1-link) + - If `VerifyToTarget` returns error but root is not expired then return + `FaultyPeer` + +--- + +## Correctness arguments + +#### Argument for [[LCD-DIST-INV-ATTACK.1]](#LCD-DIST-INV-ATTACK1) + +Under the assumption that root and trace are a verification trace, +when in `CreateEvidenceForPeer` the detector the detector creates +evidence, then the lightclient has seen two different headers (one via +`trace` and one via `VerifyToTarget` for the same height that can both +be verified in one step. + +#### Argument for [[LCD-DIST-INV-STORE.1]](#LCD-DIST-INV-STORE1) + +We assume that there is at least one correct peer, and there is no +fork. As a result the correct peer has the correct sequence of +blocks. Since the primary_trace is checked block-by-block also against +each secondary, and at no point evidence was generated that means at +no point there were conflicting blocks. + +#### Argument for [[LCD-DIST-LIVE.1]](#LCD-DIST-LIVE1) + +At the latest when [[LCV-INV-TP.1]](LCV-INV-TP1-link) is violated, +`AttackDetector` terminates. + +#### Argument for [[LCD-DIST-TERM-NORMAL.1]](#LCD-DIST-TERM-NORMAL1) + +As there are finitely many peers, eventually the main loop +terminates. As there is no attack no evidence can be generated. + +#### Argument for [[LCD-DIST-TERM-ATTACK.1]](#LCD-DIST-TERM-ATTACK1) + +Argument similar to [[LCD-DIST-TERM-NORMAL.1]](#LCD-DIST-TERM-NORMAL1) + +#### Argument for [[LCD-DIST-SAFE-SECONDARY.1]](#LCD-DIST-SAFE-SECONDARY1) + +Secondaries are only replaced if they time-out or if they report bogus +blocks. The former is ruled out by the timing assumption, the latter +by correct peers only reporting blocks from the chain. + +#### Argument for [[LCD-DIST-SAFE-BOGUS.1]](#LCD-DIST-SAFE-BOGUS1) + +Once a bogus block is recognized as such the secondary is removed. + +# References + +> links to other specifications/ADRs this document refers to + +[[verification]] The specification of the light client verification. + +[[supervisor]] The specification of the light client supervisor. + +[verification]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification.md + +[supervisor]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/supervisor/supervisor.md + +[block]: https://github.com/tendermint/spec/blob/d46cd7f573a2c6a2399fcab2cde981330aa63f37/spec/core/data_structures.md + +[TMBC-FM-2THIRDS-link]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification.md#tmbc-fm-2thirds1 + +[TMBC-SOUND-DISTR-POSS-COMMIT-link]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification.md#tmbc-sound-distr-poss-commit1 + +[LCV-SEQ-SAFE-link]:https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification.md#lcv-seq-safe1 + +[TMBC-VAL-CONTAINS-CORR-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification.md#tmbc-val-contains-corr1 + +[fetch]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification.md#lcv-func-fetch1 + +[LCV-INV-TP1-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification.md#lcv-inv-tp1 diff --git a/spec/light-client/detection/detection_003_reviewed.md b/spec/light-client/detection/detection_003_reviewed.md new file mode 100644 index 0000000000..ae00cba065 --- /dev/null +++ b/spec/light-client/detection/detection_003_reviewed.md @@ -0,0 +1,841 @@ + + +# Light Client Attack Detector + +In this specification, we strengthen the light client to be resistant +against so-called light client attacks. In a light client attack, all +the correct Tendermint full nodes agree on the sequence of generated +blocks (no fork), but a set of faulty full nodes attack a light client +by generating (signing) a block that deviates from the block of the +same height on the blockchain. In order to do so, some of these faulty +full nodes must have been validators before and violate the assumption +of more than two thirds of "correct voting power" +[[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link], as otherwise, if +[[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link] would hold, +[verification][verification] would satisfy +[[LCV-SEQ-SAFE.1]][LCV-SEQ-SAFE-link]. + +An attack detector (or detector for short) is a mechanism that is used +by the light client [supervisor][supervisor] after +[verification][verification] of a new light block +with the primary, to cross-check the newly learned light block with +other peers (secondaries). It expects as input a light block with some +height *root* (that serves as a root of trust), and a verification +trace (a sequence of lightblocks) that the primary provided. + +In case the detector observes a light client attack, it computes +evidence data that can be used by Tendermint full nodes to isolate a +set of faulty full nodes that are still within the unbonding period +(more than 1/3 of the voting power of the validator set at some block +of the chain), and report them via ABCI (application/blockchain +interface) +to the application of a +Tendermint blockchain in order to punish faulty nodes. + +## Context of this document + +The light client [verification][verification] specification is +designed for the Tendermint failure model (1/3 assumption) +[[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link]. It is safe under this +assumption, and live if it can reliably (that is, no message loss, no +duplication, and eventually delivered) and timely communicate with a +correct full node. If [[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link] +assumption is violated, the light client can be fooled to trust a +light block that was not generated by Tendermint consensus. + +This specification, the attack detector, is a "second line of +defense", in case the 1/3 assumption is violated. Its goal is to +detect a light client attack (conflicting light blocks) and collect +evidence. However, it is impractical to probe all full nodes. At this +time we consider a simple scheme of maintaining an address book of +known full nodes from which a small subset (e.g., 4) are chosen +initially to communicate with. More involved book keeping with +probabilistic guarantees can be considered at later stages of the +project. + +The light client maintains a simple address book containing addresses +of full nodes that it can pick as primary and secondaries. To obtain +a new light block, the light client first does +[verification][verification] with the primary, and then cross-checks +the light block (and the trace of light blocks that led to it) with +the secondaries using this specification. + +# Outline + +- [Part I](#part-i---Tendermint-Consensus-and-Light-Client-Attacks): + Formal definitions of lightclient attacks, based on basic + properties of Tendermint consensus. + - [Node-based characterization of + attacks](#Node-based-characterization-of-attacks). The + definition of attacks used in the problem statement of + this specification. + + - [Block-based characterization of attacks](#Block-based-characterization-of-attacks). Alternative definitions + provided for future reference. + +- [Part II](#part-ii---problem-statement): Problem statement of + lightclient attack detection + + - [Informal Problem Statement](#informal-problem-statement) + - [Assumptions](#Assumptions) + - [Definitions](#definitions) + - [Distributed Problem statement](#Distributed-Problem-statement) + +- [Part III](#part-iii---protocol): The protocol + + - [Functions and Data defined in other Specifications](#Functions-and-Data-defined-in-other-Specifications) + - [Outline of Solution](#Outline-of-solution) + - [Details of the functions](#Details-of-the-functions) + - [Correctness arguments](#Correctness-arguments) + +# Part I - Tendermint Consensus and Light Client Attacks + +In this section we will give some mathematical definitions of what we +mean by light client attacks (that are considered in this +specification) and how they differ from main-chain forks. To this end, +we start by defining some properties of the sequence of blocks that is +decided upon by Tendermint consensus in normal operation (if the +Tendermint failure model holds +[[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link]), +and then define different +deviations that correspond to attack scenarios. We consider the notion +of [light blocks][LCV-LB-link] and [headers][LVC-HD-link]. + +#### **[TMBC-GENESIS.1]** + +Let *Genesis* be the agreed-upon initial block (file). + +#### **[TMBC-FUNC-SIGN.1]** + +Let *b* and *c* be two light blocks with *b.Header.Height + 1 = +c.Header.Height*. We define the predicate **signs(b,c)** to hold +iff *c.Header.LastCommit* is in *PossibleCommit(b)*. +[[TMBC-SOUND-DISTR-POSS-COMMIT.1]][TMBC-SOUND-DISTR-POSS-COMMIT-link]. + +> The above encodes sequential verification, that is, intuitively, +> b.Header.NextValidators = c.Header.Validators and 2/3 of +> these Validators signed c. + +#### **[TMBC-FUNC-SUPPORT.1]** + +Let *b* and *c* be two light blocks. We define the predicate +**supports(b,c,t)** to hold iff + +- *t - trustingPeriod < b.Header.Time < t* +- the voting power in *b.NextValidators* of nodes in *c.Commit* + is more than 1/3 of *TotalVotingPower(b.Header.NextValidators)* + +> That is, if the [Tendermint failure model][TMBC-FM-2THIRDS-link] +> holds, then *c* has been signed by at least one correct full node, cf. +> [[TMBC-VAL-CONTAINS-CORR.1]][TMBC-VAL-CONTAINS-CORR-link]. +> The following formalizes that *b* was properly generated by +> Tendermint; *b* can be traced back to genesis. + +#### **[TMBC-SEQ-ROOTED.1]** + +Let *b* be a light block. +We define *sequ-rooted(b)* iff for all *i*, *1 <= i < h = b.Header.Height*, +there exist light blocks *a(i)* s.t. + +- *a(1) = Genesis* and +- *a(h) = b* and +- *signs( a(i) , a(i+1) )*. + +> The following formalizes that *c* is trusted based on *b* in +> skipping verification. Observe that we do not require here (yet) +> that *b* was properly generated. + +#### **[TMBC-SKIP-TRACE.1]** + +Let *b* and *c* be light blocks. We define *skip-trace(b,c,t)* if at +time t there exists an integer *h* and a sequence *a(1)*, ... *a(h)* s.t. + +- *a(1) = b* and +- *a(h) = c* and +- *supports( a(i), a(i+1), t)*, for all i, *1 <= i < h*. + +We call such a sequence *a(1)*, ... *a(h)* a **verification trace**. + +> The following formalizes that two light blocks of the same height +> should agree on the content of the header. Observe that *b* and *c* +> may disagree on the Commit. This is a special case if the canonical +> commit has not been decided on yet, that is, if b.Header.Height is the +> maximum height of all blocks decided upon by Tendermint at this +> moment. + +#### **[TMBC-SIGN-SKIP-MATCH.1]** + +Let *a*, *b*, *c*, be light blocks and *t* a time, we define +*sign-skip-match(a,b,c,t) = true* iff the following implication +evaluates to true: + +- *sequ-rooted(a)* and +- *b.Header.Height = c.Header.Height* and +- *skip-trace(a,b,t)* +- *skip-trace(a,c,t)* + +implies *b.Header = c.Header*. + +> Observe that *sign-skip-match* is defined via an implication. If it +> evaluates to false this means that the left-hand-side of the +> implication evaluates to true, and the right-hand-side evaluates to +> false. In particular, there are two **different** headers *b* and +> *c* that both can be verified from a common block *a* from the +> chain. Thus, the following describes an attack. + +#### **[TMBC-ATTACK.1]** + +If there exists three light blocks a, b, and c, with +*sign-skip-match(a,b,c,t) = false* then we have an *attack*. We say +we have **an attack at height** *b.Header.Height* and write +*attack(a,b,c,t)*. + +> The lightblock *a* need not be unique, that is, there may be +> several blocks that satisfy the above requirement for the same +> blocks *b* and *c*. + +[[TMBC-ATTACK.1]](#TMBC-ATTACK1) is a formalization of the violation +of the agreement property based on the result of consensus, that is, +the generated blocks. + +**Remark.** +Violation of agreement is only possible if more than 1/3 of the validators (or +next validators) of some previous block deviated from the protocol. The +upcoming "accountability" specification will describe how to compute +a set of at least 1/3 faulty nodes from two conflicting blocks. [] + +There are different ways to characterize forks +and attack scenarios. This specification uses the "node-based +characterization of attacks" which focuses on what kinds of nodes are +affected (light nodes vs. full nodes). For future reference and +discussion we also provide a +"block-based characterization of attacks" below. + +## Node-based characterization of attacks + +#### **[TMBC-MC-FORK.1]** + +We say there is a (main chain) fork at time *t* if + +- there are two correct full nodes *i* and *j* and +- *i* is different from *j* and +- *i* has decided on *b* and +- *j* has decided on *c* and +- there exist *a* such that *attack(a,b,c,t)*. + +#### **[TMBC-LC-ATTACK.1]** + +We say there is a light client attack at time *t*, if + +- there is **no** (main chain) fork [[TMBC-MC-FORK.1]](#TMBC-MC-FORK1), and +- there exist nodes that have computed light blocks *b* and *c* and +- there exist *a* such that *attack(a,b,c,t)*. + +We say the attack is at height *a.Header.Height*. + +> In this specification we consider detection of light client +> attacks. Intuitively, the case we consider is that +> light block *b* is the one from the +> blockchain, and some attacker has computed *c* and tries to wrongly +> convince +> the light client that *c* is the block from the chain. + +#### **[TMBC-LC-ATTACK-EVIDENCE.1]** + +We consider the following case of a light client attack +[[TMBC-LC-ATTACK.1]](#TMBC-LC-ATTACK1): + +- *attack(a,b,c,t)* +- there is a peer p1 that has a sequence *chain* of blocks from *a* to *b* +- *skip-trace(a,c,t)*: by [[TMBC-SKIP-TRACE.1]](#TMBC-SKIP-TRACE1) there is a + verification trace *v* of the form *a = v(1)*, ... *v(h) = c* + +Evidence for p1 (that proves an attack to p1) consists for index i +of v(i) and v(i+1) such that + +- E1(i). v(i) is equal to the block of *chain* at height v(i).Height, and +- E2(i). v(i+1) that is different from the block of *chain* at + height v(i+1).height + +> Observe p1 can +> +> - check that v(i+1) differs from its block at that height, and +> - verify v(i+1) in one step from v(i) as v is a verification trace. + +#### **[TMBC-LC-EVIDENCE-DATA.1]** + +To prove the attack to p1, because of Point E1, it is sufficient to +submit + +- v(i).Height (rather than v(i)). +- v(i+1) + +This information is *evidence for height v(i).Height*. + +## Block-based characterization of attacks + +In this section we provide a different characterization of attacks. It +is not defined on the nodes that are affected but purely on the +content of the blocks. In that sense these definitions are less +operational. + +> They might be relevant for a closer analysis of fork scenarios on the +> chain, which is out of the scope of this specification. + +#### **[TMBC-SIGN-UNIQUE.1]** + +Let *b* and *c* be light blocks, we define the predicate +*sign-unique(b,c)* to evaluate to true iff the following implication +evaluates to true: + +- *b.Header.Height = c.Header.Height* and +- *sequ-rooted(b)* and +- *sequ-rooted(c)* + +implies *b = c*. + +#### **[TMBC-BLOCKS-MCFORK.1]** + +If there exists two light blocks b and c, with *sign-unique(b,c) = +false* then we have a *fork*. + +> The difference of the above definition to +> [[TMBC-MC-FORK.1]](#TMBC-MC-FORK1) is subtle. The latter requires a +> full node being affected by a bad block while +> [[TMBC-BLOCKS-MCFORK.1]](#TMBC-BLOCKS-MCFORK1) just requires that a +> bad block exists, possibly in memory of an attacker. +> The following captures a light client fork. There is no fork up to +> the height of block b. However, c is of that height, is different, +> and passes skipping verification. It is a stricter property than +> [[TMBC-LC-ATTACK.1]](#TMBC-LC-ATTACK1), as +> [[TMBC-LC-ATTACK.1]](#TMBC-LC-ATTACK1) requires that no correct full +> node is affected. + +#### **[TMBC-BLOCKS-LCFORK.1]** + +Let *a*, *b*, *c*, be light blocks and *t* a time. We define +*light-client-fork(a,b,c,t)* iff + +- *sign-skip-match(a,b,c,t) = false* and +- *sequ-rooted(b)* and +- *b* is "unique", that is, for all *d*, *sequ-rooted(d)* and + *d.Header.Height = b.Header.Height* implies *d = b* + +> Finally, let us also define bogus blocks that have no support. +> Observe that bogus is even defined if there is a fork. +> Also, for the definition it would be sufficient to restrict *a* to +> *a.height < b.height* (which is implied by the definitions which +> unfold until *supports()*). + +#### **[TMBC-BOGUS.1]** + +Let *b* be a light block and *t* a time. We define *bogus(b,t)* iff + +- *sequ-rooted(b) = false* and +- for all *a*, *sequ-rooted(a)* implies *skip-trace(a,b,t) = false* + +# Part II - Problem Statement + +## Informal Problem statement + +There is no sequential specification: the detector only makes sense +in a distributed systems where some nodes misbehave. + +We work under the assumption that full nodes and validators are +responsible for detecting attacks on the main chain, and the evidence +reactor takes care of broadcasting evidence to communicate +misbehaving nodes via ABCI to the application, and halt the chain in +case of a fork. The point of this specification is to shield a light +clients against attacks that cannot be detected by full nodes, and +are fully addressed at light clients (and consequently IBC relayers, +which use the light client protocols to observe the state of a +blockchain). In order to provide full nodes the incentive to follow +the protocols when communicating with the light client, this +specification also considers the generation of evidence that will +also be processed by the Tendermint blockchain. + +#### **[LCD-IP-MODEL.1]** + +The detector is designed under the assumption that + +- [[TMBC-FM-2THIRDS]][TMBC-FM-2THIRDS-link] may be violated +- there is no fork on the main chain. + +> As a result some faulty full nodes may launch an attack on a light +> client. + +The following requirements are operational in that they describe how +things should be done, rather than what should be done. However, they +do not constitute temporal logic verification conditions. For those, +see [LCD-DIST-*] below. + +The detector is called in the [supervisor][supervisor] as follows + +```go +Evidences := AttackDetector(root_of_trust, verifiedLS);` +``` + +where + +- `root-of-trust` is a light block that is trusted (that is, +except upon initialization, the primary and the secondaries +agreed on in the past), and +- `verifiedLS` is a lightstore that contains a verification trace that + starts from a lightblock that can be verified with the + `root-of-trust` in one step and ends with a lightblock of the height + requested by the user +- `Evidences` is a list of evidences for misbehavior + +#### **[LCD-IP-STATEMENT.1]** + +Whenever AttackDetector is called, the detector should for each +secondary cross check the largest header in verifiedLS with the +corresponding header of the same height provided by the secondary. If +there is a deviation, the detector should +try to replay the verification trace `verifiedLS` with the +secondary + +- in case replaying leads to detection of a light client attack + (one of the lightblocks differ from the one in verifiedLS with + the same height), we should return evidence +- if the secondary cannot provide a verification trace, we have no + proof for an attack. Block *b* may be bogus. In this case the + secondary is faulty and it should be replaced. + +## Assumptions + +It is not in the interest of faulty full nodes to talk to the +detector as long as the detector is connected to at least one +correct full node. This would only increase the likelihood of +misbehavior being detected. Also we cannot punish them easily +(cheaply). The absence of a response need not be the fault of the full +node. + +Correct full nodes have the incentive to respond, because the +detector may help them to understand whether their header is a good +one. We can thus base liveness arguments of the detector on +the assumptions that correct full nodes reliably talk to the +detector. + +#### **[LCD-A-CorrFull.1]** + +At all times there is at least one correct full +node among the primary and the secondaries. + +> For this version of the detection we take this assumption. It +> allows us to establish the invariant that the lightblock +> `root-of-trust` is always the one from the blockchain, and we can +> use it as starting point for the evidence computation. Moreover, it +> allows us to establish the invariant at the supervisor that any +> lightblock in the (top-level) lightstore is from the blockchain. +> In the future we might design a lightclient based on the assumption +> that at least in regular intervals the lightclient is connected to a +> correct full node. This will require the detector to reconsider +> `root-of-trust`, and remove lightblocks from the top-level +> lightstore. + +#### **[LCD-A-RelComm.1]** + +Communication between the detector and a correct full node is +reliable and bounded in time. Reliable communication means that +messages are not lost, not duplicated, and eventually delivered. There +is a (known) end-to-end delay *Delta*, such that if a message is sent +at time *t* then it is received and processed by time *t + Delta*. +This implies that we need a timeout of at least *2 Delta* for remote +procedure calls to ensure that the response of a correct peer arrives +before the timeout expires. + +## Definitions + +### Evidence + +Following the definition of +[[TMBC-LC-ATTACK-EVIDENCE.1]](#TMBC-LC-ATTACK-EVIDENCE1), by evidence +we refer to a variable of the following type + +#### **[LC-DATA-EVIDENCE.1]** + +```go +type LightClientAttackEvidence struct { + ConflictingBlock LightBlock + CommonHeight int64 + + // Evidence also includes application specific data which is not + // part of verification but is sent to the application once the + // evidence gets committed on chain. +} +``` + +As the above data is computed for a specific peer, the following +data structure wraps the evidence and adds the peerID. + +#### **[LC-DATA-EVIDENCE-INT.1]** + +```go +type InternalEvidence struct { + Evidence LightClientAttackEvidence + Peer PeerID +} +``` + +#### **[LC-SUMBIT-EVIDENCE.1]** + +```go +func submitEvidence(Evidences []InternalEvidence) +``` + +- Expected postcondition + - for each `ev` in `Evidences`: submit `ev.Evidence` to `ev.Peer` + +--- + +### LightStore + +Lightblocks and LightStores are defined in the verification +specification [[LCV-DATA-LIGHTBLOCK.1]][LCV-LB-link] +and [[LCV-DATA-LIGHTSTORE.2]][LCV-LS-link]. See +the [verification specification][verification] for details. + +## Distributed Problem statement + +> As the attack detector is there to reduce the impact of faulty +> nodes, and faulty nodes imply that there is a distributed system, +> there is no sequential specification to which this distributed +> problem statement may refer to. + +The detector gets as input a trusted lightblock called *root* and an +auxiliary lightstore called *primary_trace* with lightblocks that have +been verified before, and that were provided by the primary. + +#### **[LCD-DIST-INV-ATTACK.1]** + +If the detector returns evidence for height *h* +[[TMBC-LC-EVIDENCE-DATA.1]](#TMBC-LC-EVIDENCE-DATA1), then there is an +attack at height *h*. [[TMBC-LC-ATTACK.1]](#TMBC-LC-ATTACK1) + +#### **[LCD-DIST-INV-STORE.1]** + +If the detector does not return evidence, then *primary_trace* +contains only blocks from the blockchain. + +#### **[LCD-DIST-LIVE.1]** + +The detector eventually terminates. + +#### **[LCD-DIST-TERM-NORMAL.1]** + +If + +- the *primary_trace* contains only blocks from the blockchain, and +- there is no attack, and +- *Secondaries* is always non-empty, and +- the age of *root* is always less than the trusting period, + +then the detector does not return evidence. + +#### **[LCD-DIST-TERM-ATTACK.1]** + +If + +- there is an attack, and +- a secondary reports a block that conflicts + with one of the blocks in *primary_trace*, and +- *Secondaries* is always non-empty, and +- the age of *root* is always less than the trusting period, + +then the detector returns evidence. + +> Observe that above we require that "a secondary reports a block that +> conflicts". If there is an attack, but no secondary tries to launch +> it against the detector (or the message from the secondary is lost +> by the network), then there is nothing to detect for us. + +#### **[LCD-DIST-SAFE-SECONDARY.1]** + +No correct secondary is ever replaced. + +#### **[LCD-DIST-SAFE-BOGUS.1]** + +If + +- a secondary reports a bogus lightblock, +- the age of *root* is always less than the trusting period, + +then the secondary is replaced before the detector terminates. + +> The above property is quite operational (e.g., the usage of +> "reports"), but it captures closely the requirement. As the +> detector only makes sense in a distributed setting, and does not +> have a sequential specification, a less "pure" specification are +> acceptable. + +# Part III - Protocol + +## Functions and Data defined in other Specifications + +### From the [supervisor][supervisor] + +[[LC-FUNC-REPLACE-SECONDARY.1]][repl] + +```go +Replace_Secondary(addr Address, root-of-trust LightBlock) +``` + +### From the [verifier][verification] + +[[LCV-FUNC-MAIN.2]][vtt] + +```go +func VerifyToTarget(primary PeerID, root LightBlock, + targetHeight Height) (LightStore, Result) +``` + +Observe that `VerifyToTarget` does communication with the secondaries +via the function [FetchLightBlock][fetch]. + +### Shared data of the light client + +- a pool of full nodes *FullNodes* that have not been contacted before +- peer set called *Secondaries* +- primary + +> Note that the lightStore is not needed to be shared. + +## Outline of solution + +The problem laid out is solved by calling the function `AttackDetector` +with a lightstore that contains a light block that has just been +verified by the verifier. + +Then `AttackDetector` downloads headers from the secondaries. In case +a conflicting header is downloaded from a secondary, it calls +`CreateEvidenceForPeer` which computes evidence in the case that +indeed an attack is confirmed. It could be that the secondary reports +a bogus block, which means that there need not be an attack, and the +secondary is replaced. + +## Details of the functions + +#### **[LCD-FUNC-DETECTOR.2]:** + +```go +func AttackDetector(root LightBlock, primary_trace []LightBlock) + ([]InternalEvidence) { + + Evidences := new []InternalEvidence; + + for each secondary in Secondaries { + lb, result := FetchLightBlock(secondary,primary_trace.Latest().Header.Height); + if result != ResultSuccess { + Replace_Secondary(root); + } + else if lb.Header != primary_trace.Latest().Header { + + // we replay the primary trace with the secondary, in + // order to generate evidence that we can submit to the + // secondary. We return the evidence + the trace the + // secondary told us that spans the evidence at its local store + + EvidenceForSecondary, newroot, secondary_trace, result := + CreateEvidenceForPeer(secondary, + root, + primary_trace); + if result == FaultyPeer { + Replace_Secondary(root); + } + else if result == FoundEvidence { + // the conflict is not bogus + Evidences.Add(EvidenceForSecondary); + // we replay the secondary trace with the primary, ... + EvidenceForPrimary, _, result := + CreateEvidenceForPeer(primary, + newroot, + secondary_trace); + if result == FoundEvidence { + Evidences.Add(EvidenceForPrimary); + } + // At this point we do not care about the other error + // codes. We already have generated evidence for an + // attack and need to stop the lightclient. It does not + // help to call replace_primary. Also we will use the + // same primary to check with other secondaries in + // later iterations of the loop + } + // In the case where the secondary reports NoEvidence + // after initially it reported a conflicting header. + // secondary is faulty + Replace_Secondary(root); + } + } + return Evidences; +} +``` + +- Expected precondition + - root and primary trace are a verification trace +- Expected postcondition + - solves the problem statement (if attack found, then evidence is reported) +- Error condition + - `ErrorTrustExpired`: fails if root expires (outside trusting + period) [[LCV-INV-TP.1]][LCV-INV-TP1-link] + - `ErrorNoPeers`: if no peers are left to replace secondaries, and + no evidence was found before that happened + +--- + +```go +func CreateEvidenceForPeer(peer PeerID, root LightBlock, trace LightStore) + (Evidence, LightBlock, LightStore, result) { + + common := root; + + for i in 1 .. len(trace) { + auxLS, result := VerifyToTarget(peer, common, trace[i].Header.Height) + + if result != ResultSuccess { + // something went wrong; peer did not provide a verifiable block + return (nil, nil, nil, FaultyPeer) + } + else { + if auxLS.LatestVerified().Header != trace[i].Header { + // the header reported by the peer differs from the + // reference header in trace but both could be + // verified from common in one step. + // we can create evidence for submission to the secondary + ev := new InternalEvidence; + ev.Evidence.ConflictingBlock := trace[i]; + // CommonHeight is used to indicate the type of attack + // if the CommonHeight != ConflictingBlock.Height this + // is by definition a lunatic attack else it is an + // equivocation attack + ev.Evidence.CommonHeight := common.Height; + ev.Peer := peer + return (ev, common, auxLS, FoundEvidence) + } + else { + // the peer agrees with the trace, we move common forward. + // we could delete auxLS as it will be overwritten in + // the next iteration + common := trace[i] + } + } + } + return (nil, nil, nil, NoEvidence) +} +``` + +- Expected precondition + - root and trace are a verification trace +- Expected postcondition + - finds evidence where trace and peer diverge +- Error condition + - `ErrorTrustExpired`: fails if root expires (outside trusting + period) [[LCV-INV-TP.1]][LCV-INV-TP1-link] + - If `VerifyToTarget` returns error but root is not expired then return + `FaultyPeer` + +--- + +## Correctness arguments + +#### On the existence of evidence + +**Proposition.** In the case of attack, +evidence [[TMBC-LC-ATTACK-EVIDENCE.1]](#TMBC-LC-ATTACK-EVIDENCE1) + exists. +*Proof.* First observe that + +- (A). (NOT E2(i)) implies E1(i+1) + +Now by contradiction assume there is no evidence. Thus + +- for all i, we have NOT E1(i) or NOT E2(i) +- for i = 1 we have E1(1) and thus NOT E2(1) + thus by induction on i, by (A) we have for all i that **E1(i)** +- from attack we have E2(h-1), and as there is no evidence for + i = h - 1 we get **NOT E1(h-1)**. Contradiction. +QED. + +#### Argument for [[LCD-DIST-INV-ATTACK.1]](#LCD-DIST-INV-ATTACK1) + +Under the assumption that root and trace are a verification trace, +when in `CreateEvidenceForPeer` the detector creates +evidence, then the lightclient has seen two different headers (one via +`trace` and one via `VerifyToTarget`) for the same height that can both +be verified in one step. + +#### Argument for [[LCD-DIST-INV-STORE.1]](#LCD-DIST-INV-STORE1) + +We assume that there is at least one correct peer, and there is no +fork. As a result, the correct peer has the correct sequence of +blocks. Since the primary_trace is checked block-by-block also against +each secondary, and at no point evidence was generated that means at +no point there were conflicting blocks. + +#### Argument for [[LCD-DIST-LIVE.1]](#LCD-DIST-LIVE1) + +At the latest when [[LCV-INV-TP.1]][LCV-INV-TP1-link] is violated, +`AttackDetector` terminates. + +#### Argument for [[LCD-DIST-TERM-NORMAL.1]](#LCD-DIST-TERM-NORMAL1) + +As there are finitely many peers, eventually the main loop +terminates. As there is no attack no evidence can be generated. + +#### Argument for [[LCD-DIST-TERM-ATTACK.1]](#LCD-DIST-TERM-ATTACK1) + +Argument similar to [[LCD-DIST-TERM-NORMAL.1]](#LCD-DIST-TERM-NORMAL1) + +#### Argument for [[LCD-DIST-SAFE-SECONDARY.1]](#LCD-DIST-SAFE-SECONDARY1) + +Secondaries are only replaced if they time-out or if they report bogus +blocks. The former is ruled out by the timing assumption, the latter +by correct peers only reporting blocks from the chain. + +#### Argument for [[LCD-DIST-SAFE-BOGUS.1]](#LCD-DIST-SAFE-BOGUS1) + +Once a bogus block is recognized as such the secondary is removed. + +# References + +> links to other specifications/ADRs this document refers to + +[[verification]] The specification of the light client verification. + +[[supervisor]] The specification of the light client supervisor. + +[verification]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md + +[supervisor]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/supervisor/supervisor_001_draft.md + +[block]: https://github.com/tendermint/spec/blob/d46cd7f573a2c6a2399fcab2cde981330aa63f37/spec/core/data_structures.md + +[TMBC-FM-2THIRDS-link]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#tmbc-fm-2thirds1 + +[TMBC-SOUND-DISTR-POSS-COMMIT-link]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#tmbc-sound-distr-poss-commit1 + +[LCV-SEQ-SAFE-link]:https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-seq-safe1 + +[TMBC-VAL-CONTAINS-CORR-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#tmbc-val-contains-corr1 + +[fetch]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-func-fetch1 + +[LCV-INV-TP1-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-inv-tp1 + +[LCV-LB-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-data-lightblock1 + +[LCV-LS-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-data-lightstore2 + +[LVC-HD-link]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#tmbc-header-fields2 + +[repl]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/supervisor/supervisor_001_draft.md#lc-func-replace-secondary1 + +[vtt]: +https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/verification/verification_002_draft.md#lcv-func-main2 diff --git a/spec/light-client/detection/discussions.md b/spec/light-client/detection/discussions.md new file mode 100644 index 0000000000..82702dd69d --- /dev/null +++ b/spec/light-client/detection/discussions.md @@ -0,0 +1,178 @@ +# Results of Discussions and Decisions + +- Generating a minimal proof of fork (as suggested in [Issue #5083](https://github.com/tendermint/tendermint/issues/5083)) is too costly at the light client + - we do not know all lightblocks from the primary + - therefore there are many scenarios. we might even need to ask + the primary again for additional lightblocks to isolate the + branch. + +> For instance, the light node starts with block at height 1 and the +> primary provides a block of height 10 that the light node can +> verify immediately. In cross-checking, a secondary now provides a +> conflicting header b10 of height 10 that needs another header b5 +> of height 5 to +> verify. Now, in order for the light node to convince the primary: +> +> - The light node cannot just sent b5, as it is not clear whether +> the fork happened before or after 5 +> - The light node cannot just send b10, as the primary would also +> need b5 for verification +> - In order to minimize the evidence, the light node may try to +> figure out where the branch happens, e.g., by asking the primary +> for height 5 (it might be that more queries are required, also +> to the secondary. However, assuming that in this scenario the +> primary is faulty it may not respond. + + As the main goal is to catch misbehavior of the primary, + evidence generation and punishment must not depend on their + cooperation. So the moment we have proof of fork (even if it + contains several light blocks) we should submit right away. + +- decision: "full" proof of fork consists of two traces that originate in the + same lightblock and lead to conflicting headers of the same height. + +- For submission of proof of fork, we may do some optimizations, for + instance, we might just submit a trace of lightblocks that verifies a block + different from the one the full node knows (we do not send the trace + the primary gave us back to the primary) + +- The light client attack is via the primary. Thus we try to + catch if the primary installs a bad light block + - We do not check secondary against secondary + - For each secondary, we check the primary against one secondary + +- Observe that just two blocks for the same height are not +sufficient proof of fork. +One of the blocks may be bogus [TMBC-BOGUS.1] which does +not constitute slashable behavior. +Which leads to the question whether the light node should try to do +fork detection on its initial block (from subjective +initialization). This could be done by doing backwards verification +(with the hashes) until a bifurcation block is found. +While there are scenarios where a +fork could be found, there is also the scenario where a faulty full +node feeds the light node with bogus light blocks and forces the light +node to check hashes until a bogus chain is out of the trusting period. +As a result, the light client +should not try to detect a fork for its initial header. **The initial +header must be trusted as is.** + +# Light Client Sequential Supervisor + +**TODO:** decide where (into which specification) to put the +following: + +We describe the context on which the fork detector is called by giving +a sequential version of the supervisor function. +Roughly, it alternates two phases namely: + +- Light Client Verification. As a result, a header of the required + height has been downloaded from and verified with the primary. +- Light Client Fork Detections. As a result the header has been + cross-checked with the secondaries. In case there is a fork we + submit "proof of fork" and exit. + +#### **[LC-FUNC-SUPERVISOR.1]:** + +```go +func Sequential-Supervisor () (Error) { + loop { + // get the next height + nextHeight := input(); + + // Verify + result := NoResult; + while result != ResultSuccess { + lightStore,result := VerifyToTarget(primary, lightStore, nextHeight); + if result == ResultFailure { + // pick new primary (promote a secondary to primary) + /// and delete all lightblocks above + // LastTrusted (they have not been cross-checked) + Replace_Primary(); + } + } + + // Cross-check + PoFs := Forkdetector(lightStore, PoFs); + if PoFs.Empty { + // no fork detected with secondaries, we trust the new + // lightblock + LightStore.Update(testedLB, StateTrusted); + } + else { + // there is a fork, we submit the proofs and exit + for i, p range PoFs { + SubmitProofOfFork(p); + } + return(ErrorFork); + } + } +} +``` + +**TODO:** finish conditions + +- Implementation remark +- Expected precondition + - *lightStore* initialized with trusted header + - *PoFs* empty +- Expected postcondition + - runs forever, or + - is terminated by user and satisfies LightStore invariant, or **TODO** + - has submitted proof of fork upon detecting a fork +- Error condition + - none + +---- + +# Semantics of the LightStore + +Currently, a lightblock in the lightstore can be in one of the +following states: + +- StateUnverified +- StateVerified +- StateFailed +- StateTrusted + +The intuition is that `StateVerified` captures that the lightblock has +been verified with the primary, and `StateTrusted` is the state after +successful cross-checking with the secondaries. + +Assuming there is **always one correct node among primary and +secondaries**, and there is no fork on the blockchain, lightblocks that +are in `StateTrusted` can be used by the user with the guarantee of +"finality". If a block in `StateVerified` is used, it might be that +detection later finds a fork, and a roll-back might be needed. + +**Remark:** The assumption of one correct node, does not render +verification useless. It is true that if the primary and the +secondaries return the same block we may trust it. However, if there +is a node that provides a different block, the light node still needs +verification to understand whether there is a fork, or whether the +different block is just bogus (without any support of some previous +validator set). + +**Remark:** A light node may choose the full nodes it communicates +with (the light node and the full node might even belong to the same +stakeholder) so the assumption might be justified in some cases. + +In the future, we will do the following changes + +- we assume that only from time to time, the light node is + connected to a correct full node +- this means for some limited time, the light node might have no + means to defend against light client attacks +- as a result we do not have finality +- once the light node reconnects with a correct full node, it + should detect the light client attack and submit evidence. + +Under these assumptions, `StateTrusted` loses its meaning. As a +result, it should be removed from the API. We suggest that we replace +it with a flag "trusted" that can be used + +- internally for efficiency reasons (to maintain + [LCD-INV-TRUSTED-AGREED.1] until a fork is detected) +- by light client based on the "one correct full node" assumption + +---- diff --git a/spec/light-client/detection/draft-functions.md b/spec/light-client/detection/draft-functions.md new file mode 100644 index 0000000000..c56594a533 --- /dev/null +++ b/spec/light-client/detection/draft-functions.md @@ -0,0 +1,289 @@ +# Draft of Functions for Fork detection and Proof of Fork Submisstion + +This document collects drafts of function for generating and +submitting proof of fork in the IBC context + +- [IBC](#on---chain-ibc-component) + +- [Relayer](#relayer) + +## On-chain IBC Component + +> The following is a suggestions to change the function defined in ICS 007 + +#### [TAG-IBC-MISBEHAVIOR.1] + +```go +func checkMisbehaviourAndUpdateState(cs: ClientState, PoF: LightNodeProofOfFork) +``` + +**TODO:** finish conditions + +- Implementation remark +- Expected precondition + - PoF.TrustedBlock.Header is equal to lightBlock on store with + same height + - both traces end with header of same height + - headers are different + - both traces are supported by PoF.TrustedBlock (`supports` + defined in [TMBC-FUNC]), that is, for `t = currentTimestamp()` (see + ICS 024) + - supports(PoF.TrustedBlock, PoF.PrimaryTrace[1], t) + - supports(PoF.PrimaryTrace[i], PoF.PrimaryTrace[i+1], t) for + *0 < i < length(PoF.PrimaryTrace)* + - supports(PoF.TrustedBlock, PoF.SecondaryTrace[1], t) + - supports(PoF.SecondaryTrace[i], PoF.SecondaryTrace[i+1], t) for + *0 < i < length(PoF.SecondaryTrace)* +- Expected postcondition + - set cs.FrozenHeight to min(cs.FrozenHeight, PoF.TrustedBlock.Header.Height) +- Error condition + - none + +---- + +> The following is a suggestions to add functionality to ICS 002 and 007. +> I suppose the above is the most efficient way to get the required +> information. Another option is to subscribe to "header install" +> events via CosmosSDK + +#### [TAG-IBC-HEIGHTS.1] + +```go +func QueryHeightsRange(id, from, to) ([]Height) +``` + +- Expected postcondition + - returns all heights *h*, with *from <= h <= to* for which the + IBC component has a consensus state. + +---- + +> This function can be used if the relayer has no information about +> the IBC component. This allows late-joining relayers to also +> participate in fork dection and the generation in proof of +> fork. Alternatively, we may also postulate that relayers are not +> responsible to detect forks for heights before they started (and +> subscribed to the transactions reporting fresh headers being +> installed at the IBC component). + +## Relayer + +### Auxiliary Functions to be implemented in the Light Client + +#### [LCV-LS-FUNC-GET-PREV.1] + +```go +func (ls LightStore) GetPreviousVerified(height Height) (LightBlock, bool) +``` + +- Expected postcondition + - returns a verified LightBlock, whose height is maximal among all + verified lightblocks with height smaller than `height` + +---- + +### Relayer Submitting Proof of Fork to the IBC Component + +There are two ways the relayer can detect a fork + +- by the fork detector of one of its lightclients +- be checking the consensus state of the IBC component + +The following function ignores how the proof of fork was generated. +It takes a proof of fork as input and computes a proof of fork that + will be accepted by the IBC component. +The problem addressed here is that both, the relayer's light client + and the IBC component have incomplete light stores, that might + not have all light blocks in common. +Hence the relayer has to figure out what the IBC component knows + (intuitively, a meeting point between the two lightstores + computed in `commonRoot`) and compute a proof of fork + (`extendPoF`) that the IBC component will accept based on its + knowledge. + +The auxiliary functions `commonRoot` and `extendPoF` are +defined below. + +#### [TAG-SUBMIT-POF-IBC.1] + +```go +func SubmitIBCProofOfFork( + lightStore LightStore, + PoF: LightNodeProofOfFork, + ibc IBCComponent) (Error) { + if ibc.queryChainConsensusState(PoF.TrustedBlock.Height) = PoF.TrustedBlock { + // IBC component has root of PoF on store, we can just submit + ibc.submitMisbehaviourToClient(ibc.id,PoF) + return Success + // note sure about the id parameter + } + else { + // the ibc component does not have the TrustedBlock and might + // even be on yet a different branch. We have to compute a PoF + // that the ibc component can verifiy based on its current + // knowledge + + ibcLightBlock, lblock, _, result := commonRoot(lightStore, ibc, PoF.TrustedBlock) + + if result = Success { + newPoF = extendPoF(ibcLightBlock, lblock, lightStore, PoF) + ibc.submitMisbehaviourToClient(ibc.id, newPoF) + return Success + } + else{ + return CouldNotGeneratePoF + } + } +} +``` + +**TODO:** finish conditions + +- Implementation remark +- Expected precondition +- Expected postcondition +- Error condition + - none + +---- + +### Auxiliary Functions at the Relayer + +> If the relayer detects a fork, it has to compute a proof of fork that +> will convince the IBC component. That is it has to compare the +> relayer's local lightstore against the lightstore of the IBC +> component, and find common ancestor lightblocks. + +#### [TAG-COMMON-ROOT.1] + +```go +func commonRoot(lightStore LightStore, ibc IBCComponent, lblock +LightBlock) (LightBlock, LightBlock, LightStore, Result) { + + auxLS.Init + + // first we ask for the heights the ibc component is aware of + ibcHeights = ibc.QueryHeightsRange( + ibc.id, + lightStore.LowestVerified().Height, + lblock.Height - 1); + // this function does not exist yet. Alternatively, we may + // request all transactions that installed headers via CosmosSDK + + + for { + h, result = max(ibcHeights) + if result = Empty { + return (_, _, _, NoRoot) + } + ibcLightBlock = ibc.queryChainConsensusState(h) + auxLS.Update(ibcLightBlock, StateVerified); + connector, result := Connector(lightStore, ibcLightBlock, lblock.Header.Height) + if result = success { + return (ibcLightBlock, connector, auxLS, Success) + } + else{ + ibcHeights.remove(h) + } + } +} +``` + +- Expected postcondition + - returns + - a lightBlock b1 from the IBC component, and + - a lightBlock b2 + from the local lightStore with height less than + lblock.Header.Hight, s.t. b1 supports b2, and + - a lightstore with the blocks downloaded from + the ibc component + +---- + +#### [TAG-LS-FUNC-CONNECT.1] + +```go +func Connector (lightStore LightStore, lb LightBlock, h Height) (LightBlock, bool) +``` + +- Expected postcondition + - returns a verified LightBlock from lightStore with height less + than *h* that can be + verified by lb in one step. + +**TODO:** for the above to work we need an invariant that all verified +lightblocks form a chain of trust. Otherwise, we need a lightblock +that has a chain of trust to height. + +> Once the common root is found, a proof of fork that will be accepted +> by the IBC component needs to be generated. This is done in the +> following function. + +#### [TAG-EXTEND-POF.1] + +```go +func extendPoF (root LightBlock, + connector LightBlock, + lightStore LightStore, + Pof LightNodeProofofFork) (LightNodeProofofFork} +``` + +- Implementation remark + - PoF is not sufficient to convince an IBC component, so we extend + the proof of fork farther in the past +- Expected postcondition + - returns a newPOF: + - newPoF.TrustedBlock = root + - let prefix = + connector + + lightStore.Subtrace(connector.Header.Height, PoF.TrustedBlock.Header.Height-1) + + PoF.TrustedBlock + - newPoF.PrimaryTrace = prefix + PoF.PrimaryTrace + - newPoF.SecondaryTrace = prefix + PoF.SecondaryTrace + +### Detection a fork at the IBC component + +The following functions is assumed to be called regularly to check +that latest consensus state of the IBC component. Alternatively, this +logic can be executed whenever the relayer is informed (via an event) +that a new header has been installed. + +#### [TAG-HANDLER-DETECT-FORK.1] + +```go +func DetectIBCFork(ibc IBCComponent, lightStore LightStore) (LightNodeProofOfFork, Error) { + cs = ibc.queryClientState(ibc); + lb, found := lightStore.Get(cs.Header.Height) + if !found { + **TODO:** need verify to target + lb, result = LightClient.Main(primary, lightStore, cs.Header.Height) + // [LCV-FUNC-IBCMAIN.1] + **TODO** decide what to do following the outcome of Issue #499 + + // I guess here we have to get into the light client + + } + if cs != lb { + // IBC component disagrees with my primary. + // I fetch the + ibcLightBlock, lblock, ibcStore, result := commonRoot(lightStore, ibc, lb) + pof = new LightNodeProofOfFork; + pof.TrustedBlock := ibcLightBlock + pof.PrimaryTrace := ibcStore + cs + pof.SecondaryTrace := lightStore.Subtrace(lblock.Header.Height, + lb.Header.Height); + return(pof, Fork) + } + return(nil , NoFork) +} +``` + +**TODO:** finish conditions + +- Implementation remark + - we ask the handler for the lastest check. Cross-check with the + chain. In case they deviate we generate PoF. + - we assume IBC component is correct. It has verified the + consensus state +- Expected precondition +- Expected postcondition diff --git a/spec/light-client/detection/req-ibc-detection.md b/spec/light-client/detection/req-ibc-detection.md new file mode 100644 index 0000000000..da5dc7a2af --- /dev/null +++ b/spec/light-client/detection/req-ibc-detection.md @@ -0,0 +1,347 @@ + + +# Requirements for Fork Detection in the IBC Context + +## What you need to know about IBC + +In the following, I distilled what I considered relevant from + + + +### Components and their interface + +#### Tendermint Blockchains + +> I assume you know what that is. + +#### An IBC/Tendermint correspondence + +| IBC Term | Tendermint-RS Spec Term | Comment | +|----------|-------------------------| --------| +| `CommitmentRoot` | AppState | app hash | +| `ConsensusState` | Lightblock | not all fields are there. NextValidator is definitly needed | +| `ClientState` | latest light block + configuration parameters (e.g., trusting period + `frozenHeight` | NextValidators missing; what is `proofSpecs`?| +| `frozenHeight` | height of fork | set when a fork is detected | +| "would-have-been-fooled" | light node fork detection | light node may submit proof of fork to IBC component to halt it | +| `Height` | (no epochs) | (epoch,height) pair in lexicographical order (`compare`) | +| `Header` | ~signed header | validatorSet explicit (no hash); nextValidators missing | +| `Evidence` | t.b.d. | definition unclear "which the light client would have considered valid". Data structure will need to change | +| `verify` | `ValidAndVerified` | signature does not match perfectly (ClientState vs. LightBlock) + in `checkMisbehaviourAndUpdateState` it is unclear whether it uses traces or goes to h1 and h2 in one step | + +#### Some IBC links + +- [QueryConsensusState](https://github.com/cosmos/cosmos-sdk/blob/2651427ab4c6ea9f81d26afa0211757fc76cf747/x/ibc/02-client/client/utils/utils.go#L68) + +#### Required Changes in ICS 007 + +- `assert(height > 0)` in definition of `initialise` doesn't match + definition of `Height` as *(epoch,height)* pair. + +- `initialise` needs to be updated to new data structures + +- `clientState.frozenHeight` semantics seem not totally consistent in + document. E.g., `min` needs to be defined over optional value in + `checkMisbehaviourAndUpdateState`. Also, if you are frozen, why do + you accept more evidence. + +- `checkValidityAndUpdateState` + - `verify`: it needs to be clarified that checkValidityAndUpdateState + does not perform "bisection" (as currently hinted in the text) but + performs a single step of "skipping verification", called, + `ValidAndVerified` + - `assert (header.height > clientState.latestHeight)`: no old + headers can be installed. This might be OK, but we need to check + interplay with misbehavior + - clienstState needs to be updated according to complete data + structure + +- `checkMisbehaviourAndUpdateState`: as evidence will contain a trace + (or two), the assertion that uses verify will need to change. + +- ICS 002 states w.r.t. `queryChainConsensusState` that "Note that + retrieval of past consensus states by height (as opposed to just the + current consensus state) is convenient but not required." For + Tendermint fork detection, this seems to be a necessity. + +- `Header` should become a lightblock + +- `Evidence` should become `LightNodeProofOfFork` [LCV-DATA-POF.1] + +- `upgradeClientState` what is the semantics (in particular what is + `height` doing?). + +- `checkMisbehaviourAndUpdateState(cs: ClientState, PoF: + LightNodeProofOfFork)` needs to be adapted + +#### Handler + +A blockchain runs a **handler** that passively collects information about + other blockchains. It can be thought of a state machine that takes + input events. + +- the state includes a lightstore (I guess called `ConsensusState` + in IBC) + +- The following function is used to pass a header to a handler + +```go +type checkValidityAndUpdateState = (Header) => Void +``` + + For Tendermint, it will perform + `ValidandVerified`, that is, it does the trusting period check and the + +1/3 check (+2/3 for sequential headers). + If it verifies a header, it adds it to its lightstore, + if it does not pass verification it drops it. + Right now it only accepts a header more recent then the latest + header, + and drops older + ones or ones that could not be verified. + +> The above paragraph captures what I believe what is the current + logic of `checkValidityAndUpdateState`. It may be subject to + change. E.g., maintain a lightstore with state (unverified, verified) + +- The following function is used to pass "evidence" (this we + will need to make precise eventually) to a handler + +```go +type checkMisbehaviourAndUpdateState = (bytes) => Void +``` + + We have to design this, and the data that the handler can use to + check that there was some misbehavior (fork) in order react on + it, e.g., flagging a situation and + stop the protocol. + +- The following function is used to query the light store (`ConsensusState`) + +```go +type queryChainConsensusState = (height: uint64) => ConsensusState +``` + +#### Relayer + +- The active components are called **relayer**. + +- a relayer contains light clients to two (or more?) blockchains + +- the relayer send headers and data to the handler to invoke + `checkValidityAndUpdateState` and + `checkMisbehaviourAndUpdateState`. It may also query + `queryChainConsensusState`. + +- multiple relayers may talk to one handler. Some relayers might be + faulty. We assume existence of at least single correct relayer. + +## Informal Problem Statement: Fork detection in IBC + +### Relayer requirement: Evidence for Handler + +- The relayer should provide the handler with + "evidence" that there was a fork. + +- The relayer can read the handler's consensus state. Thus the relayer can + feed the handler precisely the information the handler needs to detect a + fork. + What is this + information needs to be specified. + +- The information depends on the verification the handler does. It + might be necessary to provide a bisection proof (list of + lightblocks) so that the handler can verify based on its local + lightstore a header *h* that is conflicting with a header *h'* in the + local lightstore, that is, *h != h'* and *h.Height = h'.Height* + +### Relayer requirement: Fork detection + +Let's assume there is a fork at chain A. There are two ways the +relayer can figure that out: + +1. as the relayer contains a light client for A, it also includes a fork + detector that can detect a fork. + +2. the relayer may also detect a fork by observing that the + handler for chain A (on chain B) + is on a different branch than the relayer + +- in both detection scenarios, the relayer should submit evidence to + full nodes of chain A where there is a fork. As we assume a fullnode + has a complete list of blocks, it is sufficient to send "Bucky's + evidence" (), + that is, + - two lightblocks from different branches + + - a lightblock (perhaps just a height) from which both blocks + can be verified. + +- in the scenario 2., the relayer must feed the A-handler (on chain B) + a proof of a fork on A so that chain B can react accordingly + +### Handler requirement + +- there are potentially many relayers, some correct some faulty + +- a handler cannot trust the information provided by the relayer, + but must verify + (Доверя́й, но проверя́й) + +- in case of a fork, we accept that the handler temporarily stores + headers (tagged as verified). + +- eventually, a handler should be informed + (`checkMisbehaviourAndUpdateState`) + by some relayer that it has + verified a header from a fork. Then the handler should do what is + required by IBC in this case (stop?) + +### Challenges in the handler requirement + +- handlers and relayers work on different lightstores. In principle + the lightstore need not intersect in any heights a priori + +- if a relayer sees a header *h* it doesn't know at a handler (`queryChainConsensusState`), the + relayer needs to + verify that header. If it cannot do it locally based on downloaded + and verified (trusted?) light blocks, it might need to use + `VerifyToTarget` (bisection). To call `VerifyToTarget` we might keep + *h* in the lightstore. If verification fails, we need to download the + "alternative" header of height *h.Height* to generate evidence for + the handler. + +- we have to specify what precisely `queryChainConsensusState` + returns. It cannot be the complete lightstore. Is the last header enough? + +- we would like to assume that every now and then (smaller than the + trusting period) a correct relayer checks whether the handler is on a + different branch than the relayer. + And we would like that this is enough to achieve + the Handler requirement. + + - here the correctness argument would be easy if a correct relayer is + based on a light client with a *trusted* state, that is, a light + client who never changes its opinion about trusted. Then if such a + correct relayer checks-in with a handler, it will detect a fork, and + act in time. + + - if the light client does not provide this interface, in the case of + a fork, we need some assumption about a correct relayer being on a + different branch than the handler, and we need such a relayer to + check-in not too late. Also + what happens if the relayer's light client is forced to roll-back + its lightstore? + Does it have to re-check all handlers? + +## On the interconnectedness of things + +In the broader discussion of so-called "fork accountability" there are +several subproblems + +- Fork detection + +- Evidence creation and submission + +- Isolating misbehaving nodes (and report them for punishment over abci) + +### Fork detection + +The preliminary specification ./detection.md formalizes the notion of +a fork. Roughly, a fork exists if there are two conflicting headers +for the same height, where both are supported by bonded full nodes +(that have been validators in the near past, that is, within the +trusting period). We distinguish between *fork on the chain* where two +conflicting blocks are signed by +2/3 of the validators of that +height, and a *light client fork* where one of the conflicting headers +is not signed by +2/3 of the current height, but by +1/3 of the +validators of some smaller height. + +In principle everyone can detect a fork + +- ./detection talks about the Tendermint light client with a focus on + light nodes. A relayer runs such light clients and may detect + forks in this way + +- in IBC, a relayer can see that a handler is on a conflicting branch + - the relayer should feed the handler the necessary information so + that it can halt + - the relayer should report the fork to a full node + +### Evidence creation and submission + +- the information sent from the relayer to the handler could be called + evidence, but this is perhaps a bad idea because the information sent to a + full node can also be called evidence. But this evidence might still + not be enough as the full node might need to run the "fork + accountability" protocol to generate evidence in the form of + consensus messages. So perhaps we should + introduce different terms for: + + - proof of fork for the handler (basically consisting of lightblocks) + - proof of fork for a full node (basically consisting of (fewer) lightblocks) + - proof of misbehavior (consensus messages) + +### Isolating misbehaving nodes + +- this is the job of a full node. + +- might be subjective in the future: the protocol depends on what the + full node believes is the "correct" chain. Right now we postulate + that every full node is on the correct chain, that is, there is no + fork on the chain. + +- The full node figures out which nodes are + - lunatic + - double signing + - amnesic; **using the challenge response protocol** + +- We do not punish "phantom" validators + - currently we understand a phantom validator as a node that + - signs a block for a height in which it is not in the + validator set + - the node is not part of the +1/3 of previous validators that + are used to support the header. Whether we call a validator + phantom might be subjective and depend on the header we + check against. Their formalization actually seems not so + clear. + - they can only do something if there are +1/3 faulty validators + that are either lunatic, double signing, or amnesic. + - abci requires that we only report bonded validators. So if a + node is a "phantom", we would need the check whether the node is + bonded, which currently is expensive, as it requires checking + blocks from the last three weeks. + - in the future, with state sync, a correct node might be + convinced by faulty nodes that it is in the validator set. Then + it might appear to be "phantom" although it behaves correctly + +## Next steps + +> The following points are subject to my limited knowledge of the +> state of the work on IBC. Some/most of it might already exist and we +> will just need to bring everything together. + +- "proof of fork for a full node" defines a clean interface between + fork detection and misbehavior isolation. So it should be produced + by protocols (light client, the relayer). So we should fix that + first. + +- Given the problems of not having a light client architecture spec, + for the relayer we should start with this. E.g. + + - the relayer runs light clients for two chains + - the relayer regularly queries consensus state of a handler + - the relayer needs to check the consensus state + - this involves local checks + - this involves calling the light client + - the relayer uses the light client to do IBC business (channels, + packets, connections, etc.) + - the relayer submits proof of fork to handlers and full nodes + +> the list is definitely not complete. I think part of this +> (perhaps all) is +> covered by what Anca presented recently. + +We will need to define what we expect from these components + +- for the parts where the relayer talks to the handler, we need to fix + the interface, and what the handler does + +- we write specs for these components. diff --git a/spec/light-client/experiments.png b/spec/light-client/experiments.png new file mode 100644 index 0000000000..94166ffa31 Binary files /dev/null and b/spec/light-client/experiments.png differ diff --git a/spec/light-client/supervisor/supervisor_001_draft.md b/spec/light-client/supervisor/supervisor_001_draft.md new file mode 100644 index 0000000000..eb43f6034d --- /dev/null +++ b/spec/light-client/supervisor/supervisor_001_draft.md @@ -0,0 +1,639 @@ + + +# Draft of Light Client Supervisor for discussion + +## TODOs + +This specification in done in parallel with updates on the +verification specification. So some hyperlinks have to be placed to +the correct files eventually. + +# Light Client Sequential Supervisor + +The light client implements a read operation of a +[header](TMBC-HEADER-link) from the [blockchain](TMBC-SEQ-link), by +communicating with full nodes, a so-called primary and several +so-called witnesses. As some full nodes may be faulty, this +functionality must be implemented in a fault-tolerant way. + +In the Tendermint blockchain, the validator set may change with every +new block. The staking and unbonding mechanism induces a [security +model](TMBC-FM-2THIRDS-link): starting at time *Time* of the +[header](TMBC-HEADER-link), +more than two-thirds of the next validators of a new block are correct +for the duration of *TrustedPeriod*. + +[Light Client Verification](https://informal.systems) implements the fault-tolerant read +operation designed for this security model. That is, it is safe if the +model assumptions are satisfied and makes progress if it communicates +to a correct primary. + +However, if the [security model](TMBC-FM-2THIRDS-link) is violated, +faulty peers (that have been validators at some point in the past) may +launch attacks on the Tendermint network, and on the light +client. These attacks as well as an axiomatization of blocks in +general are defined in [a document that contains the definitions that +are currently in detection.md](https://informal.systems). + +If there is a light client attack (but no +successful attack on the network), the safety of the verification step +may be violated (as we operate outside its basic assumption). +The light client also +contains a defense mechanism against light clients attacks, called detection. + +[Light Client Detection](https://informal.systems) implements a cross check of the result +of the verification step. If there is a light client attack, and the +light client is connected to a correct peer, the light client as a +whole is safe, that is, it will not operate on invalid +blocks. However, in this case it cannot successfully read, as +inconsistent blocks are in the system. However, in this case the +detection performs a distributed computation that results in so-called +evidence. Evidence can be used to prove +to a correct full node that there has been a +light client attack. + +[Light Client Evidence Accountability](https://informal.systems) is a protocol run on a +full node to check whether submitted evidence indeed proves the +existence of a light client attack. Further, from the evidence and its +own knowledge about the blockchain, the full node computes a set of +bonded full nodes (that at some point had more than one third of the +voting power) that participated in the attack that will be reported +via ABCI to the application. + +In this document we specify + +- Initialization of the Light Client +- The interaction of [verification](https://informal.systems) and [detection](https://informal.systems) + +The details of these two protocols are captured in their own +documents, as is the [accountability](https://informal.systems) protocol. + +> Another related line is IBC attack detection and submission at the +> relayer, as well as attack verification at the IBC handler. This +> will call for yet another spec. + +# Status + +This document is work in progress. In order to develop the +specification step-by-step, +it assumes certain details of [verification](https://informal.systems) and +[detection](https://informal.systems) that are not specified in the respective current +versions yet. This inconsistencies will be addresses over several +upcoming PRs. + +# Part I - Tendermint Blockchain + +See [verification spec](addLinksWhenDone) + +# Part II - Sequential Problem Definition + +#### **[LC-SEQ-INIT-LIVE.1]** + +Upon initialization, the light client gets as input a header of the +blockchain, or the genesis file of the blockchain, and eventually +stores a header of the blockchain. + +#### **[LC-SEQ-LIVE.1]** + +The light client gets a sequence of heights as inputs. For each input +height *targetHeight*, it eventually stores the header of height +*targetHeight*. + +#### **[LC-SEQ-SAFE.1]** + +The light client never stores a header which is not in the blockchain. + +# Part III - Light Client as Distributed System + +## Computational Model + +The light client communicates with remote processes only via the +[verification](TODO) and the [detection](TODO) protocols. The +respective assumptions are given there. + +## Distributed Problem Statement + +### Two Kinds of Liveness + +In case of light client attacks, the sequential problem statement +cannot always be satisfied. The lightclient cannot decide which block +is from the chain and which is not. As a result, the light client just +creates evidence, submits it, and terminates. +For the liveness property, we thus add the +possibility that instead of adding a lightblock, we also might terminate +in case there is an attack. + +#### **[LC-DIST-TERM.1]** + +The light client either runs forever or it *terminates on attack*. + +### Design choices + +#### [LC-DIST-STORE.1] + +The light client has a local data structure called LightStore +that contains light blocks (that contain a header). + +> The light store exposes functions to query and update it. They are +> specified [here](TODO:onceVerificationIsMerged). + +**TODO:** reference light store invariant [LCV-INV-LS-ROOT.2] once +verification is merged + +#### **[LC-DIST-SAFE.1]** + +It is always the case that every header in *LightStore* was +generated by an instance of Tendermint consensus. + +#### **[LC-DIST-LIVE.1]** + +Whenever the light client gets a new height *h* as input, + +- and there is +no light client attack up to height *h*, then the lightclient +eventually puts the lightblock of height *h* in the lightstore and +wait for another input. +- otherwise, that is, if there +is a light client attack on height *h*, then the light client +must perform one of the following: + - it terminates on attack. + - it eventually puts the lightblock of height *h* in the lightstore and +wait for another input. + +> Observe that the "existence of a lightclient attack" just means that some node has generated a conflicting block. It does not necessarily mean that a (faulty) peer sends such a block to "our" lightclient. Thus, even if there is an attack somewhere in the system, our lightclient might still continue to operate normally. + +### Solving the sequential specification + +[LC-DIST-SAFE.1] is guaranteed by the detector; in particular it +follows from +[[LCD-DIST-INV-STORE.1]](TODO) +[[LCD-DIST-LIVE.1]](TODO) + +# Part IV - Light Client Supervisor Protocol + +We provide a specification for a sequential Light Client Supervisor. +The local code for verification is presented by a sequential function +`Sequential-Supervisor` to highlight the control flow of this +functionality. Each lightblock is first verified with a primary, and then +cross-checked with secondaries, and if all goes well, the lightblock +is +added (with the attribute "trusted") to the +lightstore. Intermiate lightblocks that were used to verify the target +block but were not cross-checked are stored as "verified" + +> We note that if a different concurrency model is considered +> for an implementation, the semantics of the lightstore might change: +> In a concurrent implementation, we might do verification for some +> height *h*, add the +> lightblock to the lightstore, and start concurrent threads that +> +> - do verification for the next height *h' != h* +> - do cross-checking for height *h*. If we find an attack, we remove +> *h* from the lightstore. +> - the user might already start to use *h* +> +> Thus, this concurrency model changes the semantics of the +> lightstore (not all lightblocks that are read by the user are +> trusted; they may be removed if +> we find a problem). Whether this is desirable, and whether the gain in +> performance is worth it, we keep for future versions/discussion of +> lightclient protocols. + +## Definitions + +### Peers + +#### **[LC-DATA-PEERS.1]:** + +A fixed set of full nodes is provided in the configuration upon +initialization. Initially this set is partitioned into + +- one full node that is the *primary* (singleton set), +- a set *Secondaries* (of fixed size, e.g., 3), +- a set *FullNodes*; it excludes *primary* and *Secondaries* nodes. +- A set *FaultyNodes* of nodes that the light client suspects of + being faulty; it is initially empty + +#### **[LC-INV-NODES.1]:** + +The detector shall maintain the following invariants: + +- *FullNodes \intersect Secondaries = {}* +- *FullNodes \intersect FaultyNodes = {}* +- *Secondaries \intersect FaultyNodes = {}* + +and the following transition invariant + +- *FullNodes' \union Secondaries' \union FaultyNodes' = FullNodes + \union Secondaries \union FaultyNodes* + +#### **[LC-FUNC-REPLACE-PRIMARY.1]:** + +```go +Replace_Primary(root-of-trust LightBlock) +``` + +- Implementation remark + - the primary is replaced by a secondary + - to maintain a constant size of secondaries, need to + - pick a new secondary *nsec* while ensuring [LC-INV-ROOT-AGREED.1] + - that is, we need to ensure that root-of-trust = FetchLightBlock(nsec, root-of-trust.Header.Height) +- Expected precondition + - *FullNodes* is nonempty +- Expected postcondition + - *primary* is moved to *FaultyNodes* + - a secondary *s* is moved from *Secondaries* to primary +- Error condition + - if precondition is violated + +#### **[LC-FUNC-REPLACE-SECONDARY.1]:** + +```go +Replace_Secondary(addr Address, root-of-trust LightBlock) +``` + +- Implementation remark + - maintain [LC-INV-ROOT-AGREED.1], that is, + ensure root-of-trust = FetchLightBlock(nsec, root-of-trust.Header.Height) +- Expected precondition + - *FullNodes* is nonempty +- Expected postcondition + - addr is moved from *Secondaries* to *FaultyNodes* + - an address *nsec* is moved from *FullNodes* to *Secondaries* +- Error condition + - if precondition is violated + +### Data Types + +The core data structure of the protocol is the LightBlock. + +#### **[LC-DATA-LIGHTBLOCK.1]** + +```go +type LightBlock struct { + Header Header + Commit Commit + Validators ValidatorSet + NextValidators ValidatorSet + Provider PeerID +} +``` + +#### **[LC-DATA-LIGHTSTORE.1]** + +LightBlocks are stored in a structure which stores all LightBlock from +initialization or received from peers. + +```go +type LightStore struct { + ... +} + +``` + +We use the functions that the LightStore exposes, which +are defined in the [verification specification](TODO). + +### Inputs + +The lightclient is initialized with LCInitData + +#### **[LC-DATA-INIT.1]** + +```go +type LCInitData struct { + lightBlock LightBlock + genesisDoc GenesisDoc +} +``` + +where only one of the components must be provided. `GenesisDoc` is +defined in the [Tendermint +Types](https://github.com/tendermint/tendermint/blob/master/types/genesis.go). + +#### **[LC-DATA-GENESIS.1]** + +```go +type GenesisDoc struct { + GenesisTime time.Time `json:"genesis_time"` + ChainID string `json:"chain_id"` + InitialHeight int64 `json:"initial_height"` + ConsensusParams *tmproto.ConsensusParams `json:"consensus_params,omitempty"` + Validators []GenesisValidator `json:"validators,omitempty"` + AppHash tmbytes.HexBytes `json:"app_hash"` + AppState json.RawMessage `json:"app_state,omitempty"` +} +``` + +We use the following function +`makeblock` so that we create a lightblock from the genesis +file in order to do verification based on the data from the genesis +file using the same verification function we use in normal operation. + +#### **[LC-FUNC-MAKEBLOCK.1]** + +```go +func makeblock (genesisDoc GenesisDoc) (lightBlock LightBlock)) +``` + +- Implementation remark + - none +- Expected precondition + - none +- Expected postcondition + - lightBlock.Header.Height = genesisDoc.InitialHeight + - lightBlock.Header.Time = genesisDoc.GenesisTime + - lightBlock.Header.LastBlockID = nil + - lightBlock.Header.LastCommit = nil + - lightBlock.Header.Validators = genesisDoc.Validators + - lightBlock.Header.NextValidators = genesisDoc.Validators + - lightBlock.Header.Data = nil + - lightBlock.Header.AppState = genesisDoc.AppState + - lightBlock.Header.LastResult = nil + - lightBlock.Commit = nil + - lightBlock.Validators = genesisDoc.Validators + - lightBlock.NextValidators = genesisDoc.Validators + - lightBlock.Provider = nil +- Error condition + - none + +---- + +### Configuration Parameters + +#### **[LC-INV-ROOT-AGREED.1]** + +In the Sequential-Supervisor, it is always the case that the primary +and all secondaries agree on lightStore.Latest(). + +### Assumptions + +We have to assume that the initialization data (the lightblock or the +genesis file) are consistent with the blockchain. This is subjective +initialization and it cannot be checked locally. + +### Invariants + +#### **[LC-INV-PEERLIST.1]:** + +The peer list contains a primary and a secondary. + +> If the invariant is violated, the light client does not have enough +> peers to download headers from. As a result, the light client +> needs to terminate in case this invariant is violated. + +## Supervisor + +### Outline + +The supervisor implements the functionality of the lightclient. It is +initialized with a genesis file or with a lightblock the user +trusts. This initialization is subjective, that is, the security of +the lightclient is based on the validity of the input. If the genesis +file or the lightblock deviate from the actual ones on the blockchain, +the lightclient provides no guarantees. + +After initialization, the supervisor awaits an input, that is, the +height of the next lightblock that should be obtained. Then it +downloads, verifies, and cross-checks a lightblock, and if all tests +go through, the light block (and possibly other lightblocks) are added +to the lightstore, which is returned in an output event to the user. + +The following main loop does the interaction with the user (input, +output) and calls the following two functions: + +- `InitLightClient`: it initializes the lightstore either with the + provided lightblock or with the lightblock that corresponds to the + first block generated by the blockchain (by the validators defined + by the genesis file) +- `VerifyAndDetect`: takes as input a lightstore and a height and + returns the updated lightstore. + +#### **[LC-FUNC-SUPERVISOR.1]:** + +```go +func Sequential-Supervisor (initdata LCInitData) (Error) { + + lightStore,result := InitLightClient(initData); + if result != OK { + return result; + } + + loop { + // get the next height + nextHeight := input(); + + lightStore,result := VerifyAndDetect(lightStore, nextHeight); + + if result == OK { + output(LightStore.Get(targetHeight)); + // we only output a trusted lightblock + } + else { + return result + } + // QUESTION: is it OK to generate output event in normal case, + // and terminate with failure in the (light client) attack case? + } +} +``` + +- Implementation remark + - infinite loop unless a light client attack is detected + - In typical implementations (e.g., the one in Rust), + there are mutliple input actions: + `VerifytoLatest`, `LatestTrusted`, and `GetStatus`. The + information can be easily obtained from the lightstore, so that + we do not treat these requests explicitly here but just consider + the request for a block of a given height which requires more + involved computation and communication. +- Expected precondition + - *LCInitData* contains a genesis file or a lightblock. +- Expected postcondition + - if a light client attack is detected: it stops and submits + evidence (in `InitLightClient` or `VerifyAndDetect`) + - otherwise: non. It runs forever. +- Invariant: *lightStore* contains trusted lightblocks only. +- Error condition + - if `InitLightClient` or `VerifyAndDetect` fails (if a attack is + detected, or if [LCV-INV-TP.1] is violated) + +---- + +### Details of the Functions + +#### Initialization + +The light client is based on subjective initialization. It has to +trust the initial data given to it by the user. It cannot do any +detection of attack. So either upon initialization we obtain a +lightblock and just initialize the lightstore with it. Or in case of a +genesis file, we download, verify, and cross-check the first block, to +initialize the lightstore with this first block. The reason is that +we want to maintain [LCV-INV-TP.1] from the beginning. + +> If the lightclient is initialized with a lightblock, one might think +> it may increase trust, when one cross-checks the initial light +> block. However, if a peer provides a conflicting +> lightblock, the question is to distinguish the case of a +> [bogus](https://informal.systems) block (upon which operation should proceed) from a +> [light client attack](https://informal.systems) (upon which operation should stop). In +> case of a bogus block, the lightclient might be forced to do +> backwards verification until the blocks are out of the trusting +> period, to make sure no previous validator set could have generated +> the bogus block, which effectively opens up a DoS attack on the lightclient +> without adding effective robustness. + +#### **[LC-FUNC-INIT.1]:** + +```go +func InitLightClient (initData LCInitData) (LightStore, Error) { + + if LCInitData.LightBlock != nil { + // we trust the provided initial block. + newblock := LCInitData.LightBlock + } + else { + genesisBlock := makeblock(initData.genesisDoc); + + result := NoResult; + while result != ResultSuccess { + current = FetchLightBlock(PeerList.primary(), genesisBlock.Header.Height + 1) + // QUESTION: is the height with "+1" OK? + + if CANNOT_VERIFY = ValidAndVerify(genesisBlock, current) { + Replace_Primary(); + } + else { + result = ResultSuccess + } + } + + // cross-check + auxLS := new LightStore + auxLS.Add(current) + Evidences := AttackDetector(genesisBlock, auxLS) + if Evidences.Empty { + newBlock := current + } + else { + // [LC-SUMBIT-EVIDENCE.1] + submitEvidence(Evidences); + return(nil, ErrorAttack); + } + } + + lightStore := new LightStore; + lightStore.Add(newBlock); + return (lightStore, OK); +} + +``` + +- Implementation remark + - none +- Expected precondition + - *LCInitData* contains either a genesis file of a lightblock + - if genesis it passes `ValidateAndComplete()` see [Tendermint](https://informal.systems) +- Expected postcondition + - *lightStore* initialized with trusted lightblock. It has either been + cross-checked (from genesis) or it has initial trust from the + user. +- Error condition + - if precondition is violated + - empty peerList + +---- + +#### Main verification and detection logic + +#### **[LC-FUNC-MAIN-VERIF-DETECT.1]:** + +```go +func VerifyAndDetect (lightStore LightStore, targetHeight Height) + (LightStore, Result) { + + b1, r1 = lightStore.Get(targetHeight) + if r1 == true { + if b1.State == StateTrusted { + // block already there and trusted + return (lightStore, ResultSuccess) + } + else { + // We have a lightblock in the store, but it has not been + // cross-checked by now. We do that now. + root_of_trust, auxLS := lightstore.TraceTo(b1); + + // Cross-check + Evidences := AttackDetector(root_of_trust, auxLS); + if Evidences.Empty { + // no attack detected, we trust the new lightblock + lightStore.Update(auxLS.Latest(), + StateTrusted, + verfiedLS.Latest().verification-root); + return (lightStore, OK); + } + else { + // there is an attack, we exit + submitEvidence(Evidences); + return(lightStore, ErrorAttack); + } + } + } + + // get the lightblock with maximum height smaller than targetHeight + // would typically be the heighest, if we always move forward + root_of_trust, r2 = lightStore.LatestPrevious(targetHeight); + + if r2 = false { + // there is no lightblock from which we can do forward + // (skipping) verification. Thus we have to go backwards. + // No cross-check needed. We trust hashes. Therefore, we + // directly return the result + return Backwards(primary, lightStore.Lowest(), targetHeight) + } + else { + // Forward verification + detection + result := NoResult; + while result != ResultSuccess { + verifiedLS,result := VerifyToTarget(primary, + root_of_trust, + nextHeight); + if result == ResultFailure { + // pick new primary (promote a secondary to primary) + Replace_Primary(root_of_trust); + } + else if result == ResultExpired { + return (lightStore, result) + } + } + + // Cross-check + Evidences := AttackDetector(root_of_trust, verifiedLS); + if Evidences.Empty { + // no attack detected, we trust the new lightblock + verifiedLS.Update(verfiedLS.Latest(), + StateTrusted, + verfiedLS.Latest().verification-root); + lightStore.store_chain(verifidLS); + return (lightStore, OK); + } + else { + // there is an attack, we exit + return(lightStore, ErrorAttack); + } + } +} +``` + +- Implementation remark + - none +- Expected precondition + - none +- Expected postcondition + - lightblock of height *targetHeight* (and possibly additional blocks) added to *lightStore* +- Error condition + - an attack is detected + - [LC-DATA-PEERLIST-INV.1] is violated + +---- diff --git a/spec/light-client/supervisor/supervisor_001_draft.tla b/spec/light-client/supervisor/supervisor_001_draft.tla new file mode 100644 index 0000000000..949a7c0200 --- /dev/null +++ b/spec/light-client/supervisor/supervisor_001_draft.tla @@ -0,0 +1,71 @@ +------------------------- MODULE supervisor_001_draft ------------------------ +(* +This is the beginning of a spec that will eventually use verification and detector API +*) + +EXTENDS Integers, FiniteSets + +VARIABLES + state, + output + +vars == <> + +CONSTANT + INITDATA + +Init == + /\ state = "Init" + /\ output = "none" + +NextInit == + /\ state = "Init" + /\ \/ state' = "EnterLoop" + \/ state' = "FailedToInitialize" + /\ UNCHANGED output + +NextVerifyToTarget == + /\ state = "EnterLoop" + /\ \/ state' = "EnterLoop" \* replace primary + \/ state' = "EnterDetect" + \/ state' = "ExhaustedPeersPrimary" + /\ UNCHANGED output + +NextAttackDetector == + /\ state = "EnterDetect" + /\ \/ state' = "NoEvidence" + \/ state' = "EvidenceFound" + \/ state' = "ExhaustedPeersSecondaries" + /\ UNCHANGED output + +NextVerifyAndDetect == + \/ NextVerifyToTarget + \/ NextAttackDetector + +NextOutput == + /\ state = "NoEvidence" + /\ state' = "EnterLoop" + /\ output' = "data" \* to generate a trace + +NextTerminated == + /\ \/ state = "FailedToInitialize" + \/ state = "ExhaustedPeersPrimary" + \/ state = "EvidenceFound" + \/ state = "ExhaustedPeersSecondaries" + /\ UNCHANGED vars + +Next == + \/ NextInit + \/ NextVerifyAndDetect + \/ NextOutput + \/ NextTerminated + +InvEnoughPeers == + /\ state /= "ExhaustedPeersPrimary" + /\ state /= "ExhaustedPeersSecondaries" + + +============================================================================= +\* Modification History +\* Last modified Sun Oct 18 11:48:45 CEST 2020 by widder +\* Created Sun Oct 18 11:18:53 CEST 2020 by widder diff --git a/spec/light-client/supervisor/supervisor_002_draft.md b/spec/light-client/supervisor/supervisor_002_draft.md new file mode 100644 index 0000000000..691196ac55 --- /dev/null +++ b/spec/light-client/supervisor/supervisor_002_draft.md @@ -0,0 +1,131 @@ +# Draft of Light Client Supervisor for discussion + +## Modification to the initialization + +The lightclient is initialized with LCInitData + +### **[LC-DATA-INIT.2]** + +```go +type LCInitData struct { + TrustedBlock LightBlock + Genesis GenesisDoc + TrustedHash []byte + TrustedHeight int64 +} +``` + +where only one of the components must be provided. `GenesisDoc` is +defined in the [Tendermint +Types](https://github.com/tendermint/tendermint/blob/master/types/genesis.go). + + +### Initialization + +The light client is based on subjective initialization. It has to +trust the initial data given to it by the user. It cannot perform any +detection of an attack yet instead requires an initial point of trust. +There are three forms of initial data which are used to obtain the +first trusted block: + +- A trusted block from a prior initialization +- A trusted height and hash +- A genesis file + +The golang light client implementation checks this initial data in that +order; first attempting to find a trusted block from the trusted store, +then acquiring a light block from the primary at the trusted height and matching +the hash, or finally checking for a genesis file to verify the initial header. + +The light client doesn't need to check if the trusted block is within the +trusted period because it already trusts it, however, if the light block is +outside the trust period, there is a higher chance the light client won't be +able to verify anything. + +Cross-checking this trusted block with providers upon initialization is helpful +for ensuring that the node is responsive and correctly configured but does not +increase trust since proving a conflicting block is a +[light client attack](https://github.com/tendermint/tendermint/blob/master/spec/light-client/detection/detection_003_reviewed.md#tmbc-lc-attack1) +and not just a [bogus](https://github.com/tendermint/tendermint/blob/master/spec/light-client/detection/detection_003_reviewed.md#tmbc-bogus1) block could result in +performing backwards verification beyond the trusted period, thus a fruitless +endeavour. + +However, with the notion of it's better to fail earlier than later, the golang +light client implementation will perform a consistency check on all providers +and will error if one returns a different header, allowing the user +the opportunity to reinitialize. + +#### **[LC-FUNC-INIT.2]:** + +```go +func InitLightClient(initData LCInitData) (LightStore, Error) { + var initialBlock LightBlock + + switch { + case LCInitData.TrustedBlock != nil: + // we trust the block from a prior initialization + initialBlock = LCInitData.TrustedBlock + + case LCInitData.TrustedHash != nil: + untrustedBlock := FetchLightBlock(PeerList.Primary(), LCInitData.TrustedHeight) + + + // verify that the hashes match + if untrustedBlock.Hash() != LCInitData.TrustedHash { + return nil, Error("Primary returned block with different hash") + } + // after checking the hash we now trust the block + initialBlock = untrustedBlock + } + case LCInitData.Genesis != nil: + untrustedBlock := FetchLightBlock(PeerList.Primary(), LCInitData.Genesis.InitialHeight) + + // verify that 2/3+ of the validator set signed the untrustedBlock + if err := VerifyCommitFull(untrustedBlock.Commit, LCInitData.Genesis.Validators); err != nil { + return nil, err + } + + // we can now trust the block + initialBlock = untrustedBlock + default: + return nil, Error("No initial data was provided") + + // This is done in the golang version but is optional and not strictly part of the protocol + if err := CrossCheck(initialBlock, PeerList.Witnesses()); err != nil { + return nil, err + } + + // initialize light store + lightStore := new LightStore; + lightStore.Add(newBlock); + return (lightStore, OK); +} + +func CrossCheck(lb LightBlock, witnesses []Provider) error { + for _, witness := range witnesses { + witnessBlock := FetchLightBlock(witness, lb.Height) + + if witnessBlock.Hash() != lb.Hash() { + return Error("Witness has different block") + } + } + return OK +} + +``` + +- Implementation remark + - none +- Expected precondition + - *LCInitData* contains either a genesis file of a lightblock + - if genesis it passes `ValidateAndComplete()` see [Tendermint](https://informal.systems) +- Expected postcondition + - *lightStore* initialized with trusted lightblock. It has either been + cross-checked (from genesis) or it has initial trust from the + user. +- Error condition + - if precondition is violated + - empty peerList + +---- + diff --git a/spec/light-client/verification/001bmc-apalache.csv b/spec/light-client/verification/001bmc-apalache.csv new file mode 100644 index 0000000000..8d5ad8ea3a --- /dev/null +++ b/spec/light-client/verification/001bmc-apalache.csv @@ -0,0 +1,49 @@ +no,filename,tool,timeout,init,inv,next,args +1,MC4_3_correct.tla,apalache,1h,,PositiveBeforeTrustedHeaderExpires,,--length=30 +2,MC4_3_correct.tla,apalache,1h,,CorrectnessInv,,--length=30 +3,MC4_3_correct.tla,apalache,1h,,PrecisionInv,,--length=30 +4,MC4_3_correct.tla,apalache,1h,,SuccessOnCorrectPrimaryAndChainOfTrust,,--length=30 +5,MC4_3_correct.tla,apalache,1h,,NoFailedBlocksOnSuccessInv,,--length=30 +6,MC4_3_correct.tla,apalache,1h,,StoredHeadersAreVerifiedOrNotTrustedInv,,--length=30 +7,MC4_3_correct.tla,apalache,1h,,CorrectPrimaryAndTimeliness,,--length=30 +8,MC4_3_correct.tla,apalache,1h,,Complexity,,--length=30 +9,MC4_3_faulty.tla,apalache,1h,,PositiveBeforeTrustedHeaderExpires,,--length=30 +10,MC4_3_faulty.tla,apalache,1h,,CorrectnessInv,,--length=30 +11,MC4_3_faulty.tla,apalache,1h,,PrecisionInv,,--length=30 +12,MC4_3_faulty.tla,apalache,1h,,SuccessOnCorrectPrimaryAndChainOfTrust,,--length=30 +13,MC4_3_faulty.tla,apalache,1h,,NoFailedBlocksOnSuccessInv,,--length=30 +14,MC4_3_faulty.tla,apalache,1h,,StoredHeadersAreVerifiedOrNotTrustedInv,,--length=30 +15,MC4_3_faulty.tla,apalache,1h,,CorrectPrimaryAndTimeliness,,--length=30 +16,MC4_3_faulty.tla,apalache,1h,,Complexity,,--length=30 +17,MC5_5_correct.tla,apalache,1h,,PositiveBeforeTrustedHeaderExpires,,--length=30 +18,MC5_5_correct.tla,apalache,1h,,CorrectnessInv,,--length=30 +19,MC5_5_correct.tla,apalache,1h,,PrecisionInv,,--length=30 +20,MC5_5_correct.tla,apalache,1h,,SuccessOnCorrectPrimaryAndChainOfTrust,,--length=30 +21,MC5_5_correct.tla,apalache,1h,,NoFailedBlocksOnSuccessInv,,--length=30 +22,MC5_5_correct.tla,apalache,1h,,StoredHeadersAreVerifiedOrNotTrustedInv,,--length=30 +23,MC5_5_correct.tla,apalache,1h,,CorrectPrimaryAndTimeliness,,--length=30 +24,MC5_5_correct.tla,apalache,1h,,Complexity,,--length=30 +25,MC5_5_faulty.tla,apalache,1h,,PositiveBeforeTrustedHeaderExpires,,--length=30 +26,MC5_5_faulty.tla,apalache,1h,,CorrectnessInv,,--length=30 +27,MC5_5_faulty.tla,apalache,1h,,PrecisionInv,,--length=30 +28,MC5_5_faulty.tla,apalache,1h,,SuccessOnCorrectPrimaryAndChainOfTrust,,--length=30 +29,MC5_5_faulty.tla,apalache,1h,,NoFailedBlocksOnSuccessInv,,--length=30 +30,MC5_5_faulty.tla,apalache,1h,,StoredHeadersAreVerifiedOrNotTrustedInv,,--length=30 +31,MC5_5_faulty.tla,apalache,1h,,CorrectPrimaryAndTimeliness,,--length=30 +32,MC5_5_faulty.tla,apalache,1h,,Complexity,,--length=30 +33,MC7_5_faulty.tla,apalache,10h,,PositiveBeforeTrustedHeaderExpires,,--length=30 +34,MC7_5_faulty.tla,apalache,10h,,CorrectnessInv,,--length=30 +35,MC7_5_faulty.tla,apalache,10h,,PrecisionInv,,--length=30 +36,MC7_5_faulty.tla,apalache,10h,,SuccessOnCorrectPrimaryAndChainOfTrust,,--length=30 +37,MC7_5_faulty.tla,apalache,10h,,NoFailedBlocksOnSuccessInv,,--length=30 +38,MC7_5_faulty.tla,apalache,10h,,StoredHeadersAreVerifiedOrNotTrustedInv,,--length=30 +39,MC7_5_faulty.tla,apalache,10h,,CorrectPrimaryAndTimeliness,,--length=30 +40,MC7_5_faulty.tla,apalache,10h,,Complexity,,--length=30 +41,MC4_7_faulty.tla,apalache,10h,,PositiveBeforeTrustedHeaderExpires,,--length=30 +42,MC4_7_faulty.tla,apalache,10h,,CorrectnessInv,,--length=30 +43,MC4_7_faulty.tla,apalache,10h,,PrecisionInv,,--length=30 +44,MC4_7_faulty.tla,apalache,10h,,SuccessOnCorrectPrimaryAndChainOfTrust,,--length=30 +45,MC4_7_faulty.tla,apalache,10h,,NoFailedBlocksOnSuccessInv,,--length=30 +46,MC4_7_faulty.tla,apalache,10h,,StoredHeadersAreVerifiedOrNotTrustedInv,,--length=30 +47,MC4_7_faulty.tla,apalache,10h,,CorrectPrimaryAndTimeliness,,--length=30 +48,MC4_7_faulty.tla,apalache,10h,,Complexity,,--length=30 diff --git a/spec/light-client/verification/002bmc-apalache-ok.csv b/spec/light-client/verification/002bmc-apalache-ok.csv new file mode 100644 index 0000000000..eb26aa89e5 --- /dev/null +++ b/spec/light-client/verification/002bmc-apalache-ok.csv @@ -0,0 +1,55 @@ +no;filename;tool;timeout;init;inv;next;args +1;MC4_3_correct.tla;apalache;1h;;TargetHeightOnSuccessInv;;--length=5 +2;MC4_3_correct.tla;apalache;1h;;StoredHeadersAreVerifiedOrNotTrustedInv;;--length=5 +3;MC4_3_correct.tla;apalache;1h;;CorrectnessInv;;--length=5 +4;MC4_3_correct.tla;apalache;1h;;NoTrustOnFaultyBlockInv;;--length=5 +5;MC4_3_correct.tla;apalache;1h;;ProofOfChainOfTrustInv;;--length=5 +6;MC4_3_correct.tla;apalache;1h;;NoFailedBlocksOnSuccessInv;;--length=5 +7;MC4_3_correct.tla;apalache;1h;;Complexity;;--length=5 +8;MC4_3_correct.tla;apalache;1h;;ApiPostInv;;--length=5 +9;MC4_4_correct.tla;apalache;1h;;TargetHeightOnSuccessInv;;--length=7 +10;MC4_4_correct.tla;apalache;1h;;CorrectnessInv;;--length=7 +11;MC4_4_correct.tla;apalache;1h;;NoTrustOnFaultyBlockInv;;--length=7 +12;MC4_4_correct.tla;apalache;1h;;ProofOfChainOfTrustInv;;--length=7 +13;MC4_4_correct.tla;apalache;1h;;NoFailedBlocksOnSuccessInv;;--length=7 +14;MC4_4_correct.tla;apalache;1h;;Complexity;;--length=7 +15;MC4_4_correct.tla;apalache;1h;;ApiPostInv;;--length=7 +16;MC4_5_correct.tla;apalache;1h;;TargetHeightOnSuccessInv;;--length=11 +17;MC4_5_correct.tla;apalache;1h;;CorrectnessInv;;--length=11 +18;MC4_5_correct.tla;apalache;1h;;NoTrustOnFaultyBlockInv;;--length=11 +19;MC4_5_correct.tla;apalache;1h;;ProofOfChainOfTrustInv;;--length=11 +20;MC4_5_correct.tla;apalache;1h;;NoFailedBlocksOnSuccessInv;;--length=11 +21;MC4_5_correct.tla;apalache;1h;;Complexity;;--length=11 +22;MC4_5_correct.tla;apalache;1h;;ApiPostInv;;--length=11 +23;MC5_5_correct.tla;apalache;1h;;TargetHeightOnSuccessInv;;--length=11 +24;MC5_5_correct.tla;apalache;1h;;CorrectnessInv;;--length=11 +25;MC5_5_correct.tla;apalache;1h;;NoTrustOnFaultyBlockInv;;--length=11 +26;MC5_5_correct.tla;apalache;1h;;ProofOfChainOfTrustInv;;--length=11 +27;MC5_5_correct.tla;apalache;1h;;NoFailedBlocksOnSuccessInv;;--length=11 +28;MC5_5_correct.tla;apalache;1h;;Complexity;;--length=11 +29;MC5_5_correct.tla;apalache;1h;;ApiPostInv;;--length=11 +30;MC4_3_faulty.tla;apalache;1h;;TargetHeightOnSuccessInv;;--length=5 +31;MC4_3_faulty.tla;apalache;1h;;StoredHeadersAreVerifiedOrNotTrustedInv;;--length=5 +32;MC4_3_faulty.tla;apalache;1h;;CorrectnessInv;;--length=5 +33;MC4_3_faulty.tla;apalache;1h;;NoTrustOnFaultyBlockInv;;--length=5 +34;MC4_3_faulty.tla;apalache;1h;;ProofOfChainOfTrustInv;;--length=5 +35;MC4_3_faulty.tla;apalache;1h;;NoFailedBlocksOnSuccessInv;;--length=5 +36;MC4_3_faulty.tla;apalache;1h;;Complexity;;--length=5 +37;MC4_3_faulty.tla;apalache;1h;;ApiPostInv;;--length=5 +38;MC4_4_faulty.tla;apalache;1h;;TargetHeightOnSuccessInv;;--length=7 +39;MC4_4_faulty.tla;apalache;1h;;StoredHeadersAreVerifiedOrNotTrustedInv;;--length=7 +40;MC4_4_faulty.tla;apalache;1h;;CorrectnessInv;;--length=7 +41;MC4_4_faulty.tla;apalache;1h;;NoTrustOnFaultyBlockInv;;--length=7 +42;MC4_4_faulty.tla;apalache;1h;;ProofOfChainOfTrustInv;;--length=7 +43;MC4_4_faulty.tla;apalache;1h;;NoFailedBlocksOnSuccessInv;;--length=7 +44;MC4_4_faulty.tla;apalache;1h;;Complexity;;--length=7 +45;MC4_4_faulty.tla;apalache;1h;;ApiPostInv;;--length=7 +46;MC4_5_faulty.tla;apalache;1h;;TargetHeightOnSuccessInv;;--length=11 +47;MC4_5_faulty.tla;apalache;1h;;StoredHeadersAreVerifiedOrNotTrustedInv;;--length=11 +48;MC4_5_faulty.tla;apalache;1h;;CorrectnessInv;;--length=11 +49;MC4_5_faulty.tla;apalache;1h;;NoTrustOnFaultyBlockInv;;--length=11 +50;MC4_5_faulty.tla;apalache;1h;;ProofOfChainOfTrustInv;;--length=11 +51;MC4_5_faulty.tla;apalache;1h;;NoFailedBlocksOnSuccessInv;;--length=11 +52;MC4_5_faulty.tla;apalache;1h;;Complexity;;--length=11 +53;MC4_5_faulty.tla;apalache;1h;;ApiPostInv;;--length=11 + diff --git a/spec/light-client/verification/003bmc-apalache-error.csv b/spec/light-client/verification/003bmc-apalache-error.csv new file mode 100644 index 0000000000..ad5ef96548 --- /dev/null +++ b/spec/light-client/verification/003bmc-apalache-error.csv @@ -0,0 +1,45 @@ +no;filename;tool;timeout;init;inv;next;args +1;MC4_3_correct.tla;apalache;1h;;StoredHeadersAreVerifiedInv;;--length=5 +2;MC4_3_correct.tla;apalache;1h;;PositiveBeforeTrustedHeaderExpires;;--length=5 +3;MC4_3_correct.tla;apalache;1h;;CorrectPrimaryAndTimeliness;;--length=5 +4;MC4_3_correct.tla;apalache;1h;;PrecisionInv;;--length=5 +5;MC4_3_correct.tla;apalache;1h;;PrecisionBuggyInv;;--length=5 +6;MC4_3_correct.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustGlobal;;--length=5 +7;MC4_3_correct.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustLocal;;--length=5 +8;MC4_4_correct.tla;apalache;1h;;StoredHeadersAreVerifiedInv;;--length=7 +9;MC4_4_correct.tla;apalache;1h;;PositiveBeforeTrustedHeaderExpires;;--length=7 +10;MC4_4_correct.tla;apalache;1h;;CorrectPrimaryAndTimeliness;;--length=7 +11;MC4_4_correct.tla;apalache;1h;;PrecisionInv;;--length=7 +12;MC4_4_correct.tla;apalache;1h;;PrecisionBuggyInv;;--length=7 +13;MC4_4_correct.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustGlobal;;--length=7 +14;MC4_4_correct.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustLocal;;--length=7 +15;MC4_5_correct.tla;apalache;1h;;StoredHeadersAreVerifiedInv;;--length=11 +16;MC4_5_correct.tla;apalache;1h;;PositiveBeforeTrustedHeaderExpires;;--length=11 +17;MC4_5_correct.tla;apalache;1h;;CorrectPrimaryAndTimeliness;;--length=11 +18;MC4_5_correct.tla;apalache;1h;;PrecisionInv;;--length=11 +19;MC4_5_correct.tla;apalache;1h;;PrecisionBuggyInv;;--length=11 +20;MC4_5_correct.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustGlobal;;--length=11 +21;MC4_5_correct.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustLocal;;--length=11 +22;MC4_5_correct.tla;apalache;1h;;StoredHeadersAreVerifiedOrNotTrustedInv;;--length=11 +23;MC4_3_faulty.tla;apalache;1h;;StoredHeadersAreVerifiedInv;;--length=5 +24;MC4_3_faulty.tla;apalache;1h;;PositiveBeforeTrustedHeaderExpires;;--length=5 +25;MC4_3_faulty.tla;apalache;1h;;CorrectPrimaryAndTimeliness;;--length=5 +26;MC4_3_faulty.tla;apalache;1h;;PrecisionInv;;--length=5 +27;MC4_3_faulty.tla;apalache;1h;;PrecisionBuggyInv;;--length=5 +28;MC4_3_faulty.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustGlobal;;--length=5 +29;MC4_3_faulty.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustLocal;;--length=5 +30;MC4_4_faulty.tla;apalache;1h;;StoredHeadersAreVerifiedInv;;--length=7 +31;MC4_4_faulty.tla;apalache;1h;;PositiveBeforeTrustedHeaderExpires;;--length=7 +32;MC4_4_faulty.tla;apalache;1h;;CorrectPrimaryAndTimeliness;;--length=7 +33;MC4_4_faulty.tla;apalache;1h;;PrecisionInv;;--length=7 +34;MC4_4_faulty.tla;apalache;1h;;PrecisionBuggyInv;;--length=7 +35;MC4_4_faulty.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustGlobal;;--length=7 +36;MC4_4_faulty.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustLocal;;--length=7 +37;MC4_5_faulty.tla;apalache;1h;;StoredHeadersAreVerifiedInv;;--length=11 +38;MC4_5_faulty.tla;apalache;1h;;PositiveBeforeTrustedHeaderExpires;;--length=11 +39;MC4_5_faulty.tla;apalache;1h;;CorrectPrimaryAndTimeliness;;--length=11 +40;MC4_5_faulty.tla;apalache;1h;;PrecisionInv;;--length=11 +41;MC4_5_faulty.tla;apalache;1h;;PrecisionBuggyInv;;--length=11 +42;MC4_5_faulty.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustGlobal;;--length=11 +43;MC4_5_faulty.tla;apalache;1h;;SuccessOnCorrectPrimaryAndChainOfTrustLocal;;--length=11 +44;MC4_5_faulty.tla;apalache;1h;;StoredHeadersAreVerifiedOrNotTrustedInv;;--length=11 diff --git a/spec/light-client/verification/004bmc-apalache-ok.csv b/spec/light-client/verification/004bmc-apalache-ok.csv new file mode 100644 index 0000000000..bf4f53ea2a --- /dev/null +++ b/spec/light-client/verification/004bmc-apalache-ok.csv @@ -0,0 +1,10 @@ +no;filename;tool;timeout;init;inv;next;args +1;LCD_MC3_3_faulty.tla;apalache;1h;;CommonHeightOnEvidenceInv;;--length=10 +2;LCD_MC3_3_faulty.tla;apalache;1h;;AccuracyInv;;--length=10 +3;LCD_MC3_3_faulty.tla;apalache;1h;;PrecisionInvLocal;;--length=10 +4;LCD_MC3_4_faulty.tla;apalache;1h;;CommonHeightOnEvidenceInv;;--length=10 +5;LCD_MC3_4_faulty.tla;apalache;1h;;AccuracyInv;;--length=10 +6;LCD_MC3_4_faulty.tla;apalache;1h;;PrecisionInvLocal;;--length=10 +7;LCD_MC4_4_faulty.tla;apalache;1h;;CommonHeightOnEvidenceInv;;--length=10 +8;LCD_MC4_4_faulty.tla;apalache;1h;;AccuracyInv;;--length=10 +9;LCD_MC4_4_faulty.tla;apalache;1h;;PrecisionInvLocal;;--length=10 diff --git a/spec/light-client/verification/005bmc-apalache-error.csv b/spec/light-client/verification/005bmc-apalache-error.csv new file mode 100644 index 0000000000..1b9dd05ca9 --- /dev/null +++ b/spec/light-client/verification/005bmc-apalache-error.csv @@ -0,0 +1,4 @@ +no;filename;tool;timeout;init;inv;next;args +1;LCD_MC3_3_faulty.tla;apalache;1h;;PrecisionInvGrayZone;;--length=10 +2;LCD_MC3_4_faulty.tla;apalache;1h;;PrecisionInvGrayZone;;--length=10 +3;LCD_MC4_4_faulty.tla;apalache;1h;;PrecisionInvGrayZone;;--length=10 diff --git a/spec/light-client/verification/Blockchain_002_draft.tla b/spec/light-client/verification/Blockchain_002_draft.tla new file mode 100644 index 0000000000..f2ca5aba5a --- /dev/null +++ b/spec/light-client/verification/Blockchain_002_draft.tla @@ -0,0 +1,171 @@ +------------------------ MODULE Blockchain_002_draft ----------------------------- +(* + This is a high-level specification of Tendermint blockchain + that is designed specifically for the light client. + Validators have the voting power of one. If you like to model various + voting powers, introduce multiple copies of the same validator + (do not forget to give them unique names though). + *) +EXTENDS Integers, FiniteSets + +Min(a, b) == IF a < b THEN a ELSE b + +CONSTANT + AllNodes, + (* a set of all nodes that can act as validators (correct and faulty) *) + ULTIMATE_HEIGHT, + (* a maximal height that can be ever reached (modelling artifact) *) + TRUSTING_PERIOD + (* the period within which the validators are trusted *) + +Heights == 1..ULTIMATE_HEIGHT (* possible heights *) + +(* A commit is just a set of nodes who have committed the block *) +Commits == SUBSET AllNodes + +(* The set of all block headers that can be on the blockchain. + This is a simplified version of the Block data structure in the actual implementation. *) +BlockHeaders == [ + height: Heights, + \* the block height + time: Int, + \* the block timestamp in some integer units + lastCommit: Commits, + \* the nodes who have voted on the previous block, the set itself instead of a hash + (* in the implementation, only the hashes of V and NextV are stored in a block, + as V and NextV are stored in the application state *) + VS: SUBSET AllNodes, + \* the validators of this bloc. We store the validators instead of the hash. + NextVS: SUBSET AllNodes + \* the validators of the next block. We store the next validators instead of the hash. +] + +(* A signed header is just a header together with a set of commits *) +LightBlocks == [header: BlockHeaders, Commits: Commits] + +VARIABLES + now, + (* the current global time in integer units *) + blockchain, + (* A sequence of BlockHeaders, which gives us a bird view of the blockchain. *) + Faulty + (* A set of faulty nodes, which can act as validators. We assume that the set + of faulty processes is non-decreasing. If a process has recovered, it should + connect using a different id. *) + +(* all variables, to be used with UNCHANGED *) +vars == <> + +(* The set of all correct nodes in a state *) +Corr == AllNodes \ Faulty + +(* APALACHE annotations *) +a <: b == a \* type annotation + +NT == STRING +NodeSet(S) == S <: {NT} +EmptyNodeSet == NodeSet({}) + +BT == [height |-> Int, time |-> Int, lastCommit |-> {NT}, VS |-> {NT}, NextVS |-> {NT}] + +LBT == [header |-> BT, Commits |-> {NT}] +(* end of APALACHE annotations *) + +(****************************** BLOCKCHAIN ************************************) + +(* the header is still within the trusting period *) +InTrustingPeriod(header) == + now < header.time + TRUSTING_PERIOD + +(* + Given a function pVotingPower \in D -> Powers for some D \subseteq AllNodes + and pNodes \subseteq D, test whether the set pNodes \subseteq AllNodes has + more than 2/3 of voting power among the nodes in D. + *) +TwoThirds(pVS, pNodes) == + LET TP == Cardinality(pVS) + SP == Cardinality(pVS \intersect pNodes) + IN + 3 * SP > 2 * TP \* when thinking in real numbers, not integers: SP > 2.0 / 3.0 * TP + +(* + Given a set of FaultyNodes, test whether the voting power of the correct nodes in D + is more than 2/3 of the voting power of the faulty nodes in D. + *) +IsCorrectPower(pFaultyNodes, pVS) == + LET FN == pFaultyNodes \intersect pVS \* faulty nodes in pNodes + CN == pVS \ pFaultyNodes \* correct nodes in pNodes + CP == Cardinality(CN) \* power of the correct nodes + FP == Cardinality(FN) \* power of the faulty nodes + IN + \* CP + FP = TP is the total voting power, so we write CP > 2.0 / 3 * TP as follows: + CP > 2 * FP \* Note: when FP = 0, this implies CP > 0. + +(* This is what we believe is the assumption about failures in Tendermint *) +FaultAssumption(pFaultyNodes, pNow, pBlockchain) == + \A h \in Heights: + pBlockchain[h].time + TRUSTING_PERIOD > pNow => + IsCorrectPower(pFaultyNodes, pBlockchain[h].NextVS) + +(* Can a block be produced by a correct peer, or an authenticated Byzantine peer *) +IsLightBlockAllowedByDigitalSignatures(ht, block) == + \/ block.header = blockchain[ht] \* signed by correct and faulty (maybe) + \/ block.Commits \subseteq Faulty /\ block.header.height = ht /\ block.header.time >= 0 \* signed only by faulty + +(* + Initialize the blockchain to the ultimate height right in the initial states. + We pick the faulty validators statically, but that should not affect the light client. + *) +InitToHeight == + /\ Faulty \in SUBSET AllNodes \* some nodes may fail + \* pick the validator sets and last commits + /\ \E vs, lastCommit \in [Heights -> SUBSET AllNodes]: + \E timestamp \in [Heights -> Int]: + \* now is at least as early as the timestamp in the last block + /\ \E tm \in Int: now = tm /\ tm >= timestamp[ULTIMATE_HEIGHT] + \* the genesis starts on day 1 + /\ timestamp[1] = 1 + /\ vs[1] = AllNodes + /\ lastCommit[1] = EmptyNodeSet + /\ \A h \in Heights \ {1}: + /\ lastCommit[h] \subseteq vs[h - 1] \* the non-validators cannot commit + /\ TwoThirds(vs[h - 1], lastCommit[h]) \* the commit has >2/3 of validator votes + /\ IsCorrectPower(Faulty, vs[h]) \* the correct validators have >2/3 of power + /\ timestamp[h] > timestamp[h - 1] \* the time grows monotonically + /\ timestamp[h] < timestamp[h - 1] + TRUSTING_PERIOD \* but not too fast + \* form the block chain out of validator sets and commits (this makes apalache faster) + /\ blockchain = [h \in Heights |-> + [height |-> h, + time |-> timestamp[h], + VS |-> vs[h], + NextVS |-> IF h < ULTIMATE_HEIGHT THEN vs[h + 1] ELSE AllNodes, + lastCommit |-> lastCommit[h]] + ] \****** + + +(* is the blockchain in the faulty zone where the Tendermint security model does not apply *) +InFaultyZone == + ~FaultAssumption(Faulty, now, blockchain) + +(********************* BLOCKCHAIN ACTIONS ********************************) +(* + Advance the clock by zero or more time units. + *) +AdvanceTime == + \E tm \in Int: tm >= now /\ now' = tm + /\ UNCHANGED <> + +(* + One more process fails. As a result, the blockchain may move into the faulty zone. + The light client is not using this action, as the faults are picked in the initial state. + However, this action may be useful when reasoning about fork detection. + *) +OneMoreFault == + /\ \E n \in AllNodes \ Faulty: + /\ Faulty' = Faulty \cup {n} + /\ Faulty' /= AllNodes \* at least process remains non-faulty + /\ UNCHANGED <> +============================================================================= +\* Modification History +\* Last modified Wed Jun 10 14:10:54 CEST 2020 by igor +\* Created Fri Oct 11 15:45:11 CEST 2019 by igor diff --git a/spec/light-client/verification/Blockchain_003_draft.tla b/spec/light-client/verification/Blockchain_003_draft.tla new file mode 100644 index 0000000000..2b37c1b181 --- /dev/null +++ b/spec/light-client/verification/Blockchain_003_draft.tla @@ -0,0 +1,164 @@ +------------------------ MODULE Blockchain_003_draft ----------------------------- +(* + This is a high-level specification of Tendermint blockchain + that is designed specifically for the light client. + Validators have the voting power of one. If you like to model various + voting powers, introduce multiple copies of the same validator + (do not forget to give them unique names though). + *) +EXTENDS Integers, FiniteSets + +Min(a, b) == IF a < b THEN a ELSE b + +CONSTANT + AllNodes, + (* a set of all nodes that can act as validators (correct and faulty) *) + ULTIMATE_HEIGHT, + (* a maximal height that can be ever reached (modelling artifact) *) + TRUSTING_PERIOD + (* the period within which the validators are trusted *) + +Heights == 1..ULTIMATE_HEIGHT (* possible heights *) + +(* A commit is just a set of nodes who have committed the block *) +Commits == SUBSET AllNodes + +(* The set of all block headers that can be on the blockchain. + This is a simplified version of the Block data structure in the actual implementation. *) +BlockHeaders == [ + height: Heights, + \* the block height + time: Int, + \* the block timestamp in some integer units + lastCommit: Commits, + \* the nodes who have voted on the previous block, the set itself instead of a hash + (* in the implementation, only the hashes of V and NextV are stored in a block, + as V and NextV are stored in the application state *) + VS: SUBSET AllNodes, + \* the validators of this bloc. We store the validators instead of the hash. + NextVS: SUBSET AllNodes + \* the validators of the next block. We store the next validators instead of the hash. +] + +(* A signed header is just a header together with a set of commits *) +LightBlocks == [header: BlockHeaders, Commits: Commits] + +VARIABLES + refClock, + (* the current global time in integer units as perceived by the reference chain *) + blockchain, + (* A sequence of BlockHeaders, which gives us a bird view of the blockchain. *) + Faulty + (* A set of faulty nodes, which can act as validators. We assume that the set + of faulty processes is non-decreasing. If a process has recovered, it should + connect using a different id. *) + +(* all variables, to be used with UNCHANGED *) +vars == <> + +(* The set of all correct nodes in a state *) +Corr == AllNodes \ Faulty + +(* APALACHE annotations *) +a <: b == a \* type annotation + +NT == STRING +NodeSet(S) == S <: {NT} +EmptyNodeSet == NodeSet({}) + +BT == [height |-> Int, time |-> Int, lastCommit |-> {NT}, VS |-> {NT}, NextVS |-> {NT}] + +LBT == [header |-> BT, Commits |-> {NT}] +(* end of APALACHE annotations *) + +(****************************** BLOCKCHAIN ************************************) + +(* the header is still within the trusting period *) +InTrustingPeriod(header) == + refClock < header.time + TRUSTING_PERIOD + +(* + Given a function pVotingPower \in D -> Powers for some D \subseteq AllNodes + and pNodes \subseteq D, test whether the set pNodes \subseteq AllNodes has + more than 2/3 of voting power among the nodes in D. + *) +TwoThirds(pVS, pNodes) == + LET TP == Cardinality(pVS) + SP == Cardinality(pVS \intersect pNodes) + IN + 3 * SP > 2 * TP \* when thinking in real numbers, not integers: SP > 2.0 / 3.0 * TP + +(* + Given a set of FaultyNodes, test whether the voting power of the correct nodes in D + is more than 2/3 of the voting power of the faulty nodes in D. + + Parameters: + - pFaultyNodes is a set of nodes that are considered faulty + - pVS is a set of all validators, maybe including Faulty, intersecting with it, etc. + - pMaxFaultRatio is a pair <> that limits the ratio a / b of the faulty + validators from above (exclusive) + *) +FaultyValidatorsFewerThan(pFaultyNodes, pVS, maxRatio) == + LET FN == pFaultyNodes \intersect pVS \* faulty nodes in pNodes + CN == pVS \ pFaultyNodes \* correct nodes in pNodes + CP == Cardinality(CN) \* power of the correct nodes + FP == Cardinality(FN) \* power of the faulty nodes + IN + \* CP + FP = TP is the total voting power + LET TP == CP + FP IN + FP * maxRatio[2] < TP * maxRatio[1] + +(* Can a block be produced by a correct peer, or an authenticated Byzantine peer *) +IsLightBlockAllowedByDigitalSignatures(ht, block) == + \/ block.header = blockchain[ht] \* signed by correct and faulty (maybe) + \/ /\ block.Commits \subseteq Faulty + /\ block.header.height = ht + /\ block.header.time >= 0 \* signed only by faulty + +(* + Initialize the blockchain to the ultimate height right in the initial states. + We pick the faulty validators statically, but that should not affect the light client. + + Parameters: + - pMaxFaultyRatioExclusive is a pair <> that bound the number of + faulty validators in each block by the ratio a / b (exclusive) + *) +InitToHeight(pMaxFaultyRatioExclusive) == + /\ Faulty \in SUBSET AllNodes \* some nodes may fail + \* pick the validator sets and last commits + /\ \E vs, lastCommit \in [Heights -> SUBSET AllNodes]: + \E timestamp \in [Heights -> Int]: + \* refClock is at least as early as the timestamp in the last block + /\ \E tm \in Int: refClock = tm /\ tm >= timestamp[ULTIMATE_HEIGHT] + \* the genesis starts on day 1 + /\ timestamp[1] = 1 + /\ vs[1] = AllNodes + /\ lastCommit[1] = EmptyNodeSet + /\ \A h \in Heights \ {1}: + /\ lastCommit[h] \subseteq vs[h - 1] \* the non-validators cannot commit + /\ TwoThirds(vs[h - 1], lastCommit[h]) \* the commit has >2/3 of validator votes + \* the faulty validators have the power below the threshold + /\ FaultyValidatorsFewerThan(Faulty, vs[h], pMaxFaultyRatioExclusive) + /\ timestamp[h] > timestamp[h - 1] \* the time grows monotonically + /\ timestamp[h] < timestamp[h - 1] + TRUSTING_PERIOD \* but not too fast + \* form the block chain out of validator sets and commits (this makes apalache faster) + /\ blockchain = [h \in Heights |-> + [height |-> h, + time |-> timestamp[h], + VS |-> vs[h], + NextVS |-> IF h < ULTIMATE_HEIGHT THEN vs[h + 1] ELSE AllNodes, + lastCommit |-> lastCommit[h]] + ] \****** + +(********************* BLOCKCHAIN ACTIONS ********************************) +(* + Advance the clock by zero or more time units. + *) +AdvanceTime == + /\ \E tm \in Int: tm >= refClock /\ refClock' = tm + /\ UNCHANGED <> + +============================================================================= +\* Modification History +\* Last modified Wed Jun 10 14:10:54 CEST 2020 by igor +\* Created Fri Oct 11 15:45:11 CEST 2019 by igor diff --git a/spec/light-client/verification/Blockchain_A_1.tla b/spec/light-client/verification/Blockchain_A_1.tla new file mode 100644 index 0000000000..70f59bf975 --- /dev/null +++ b/spec/light-client/verification/Blockchain_A_1.tla @@ -0,0 +1,171 @@ +------------------------ MODULE Blockchain_A_1 ----------------------------- +(* + This is a high-level specification of Tendermint blockchain + that is designed specifically for the light client. + Validators have the voting power of one. If you like to model various + voting powers, introduce multiple copies of the same validator + (do not forget to give them unique names though). + *) +EXTENDS Integers, FiniteSets + +Min(a, b) == IF a < b THEN a ELSE b + +CONSTANT + AllNodes, + (* a set of all nodes that can act as validators (correct and faulty) *) + ULTIMATE_HEIGHT, + (* a maximal height that can be ever reached (modelling artifact) *) + TRUSTING_PERIOD + (* the period within which the validators are trusted *) + +Heights == 1..ULTIMATE_HEIGHT (* possible heights *) + +(* A commit is just a set of nodes who have committed the block *) +Commits == SUBSET AllNodes + +(* The set of all block headers that can be on the blockchain. + This is a simplified version of the Block data structure in the actual implementation. *) +BlockHeaders == [ + height: Heights, + \* the block height + time: Int, + \* the block timestamp in some integer units + lastCommit: Commits, + \* the nodes who have voted on the previous block, the set itself instead of a hash + (* in the implementation, only the hashes of V and NextV are stored in a block, + as V and NextV are stored in the application state *) + VS: SUBSET AllNodes, + \* the validators of this bloc. We store the validators instead of the hash. + NextVS: SUBSET AllNodes + \* the validators of the next block. We store the next validators instead of the hash. +] + +(* A signed header is just a header together with a set of commits *) +LightBlocks == [header: BlockHeaders, Commits: Commits] + +VARIABLES + now, + (* the current global time in integer units *) + blockchain, + (* A sequence of BlockHeaders, which gives us a bird view of the blockchain. *) + Faulty + (* A set of faulty nodes, which can act as validators. We assume that the set + of faulty processes is non-decreasing. If a process has recovered, it should + connect using a different id. *) + +(* all variables, to be used with UNCHANGED *) +vars == <> + +(* The set of all correct nodes in a state *) +Corr == AllNodes \ Faulty + +(* APALACHE annotations *) +a <: b == a \* type annotation + +NT == STRING +NodeSet(S) == S <: {NT} +EmptyNodeSet == NodeSet({}) + +BT == [height |-> Int, time |-> Int, lastCommit |-> {NT}, VS |-> {NT}, NextVS |-> {NT}] + +LBT == [header |-> BT, Commits |-> {NT}] +(* end of APALACHE annotations *) + +(****************************** BLOCKCHAIN ************************************) + +(* the header is still within the trusting period *) +InTrustingPeriod(header) == + now <= header.time + TRUSTING_PERIOD + +(* + Given a function pVotingPower \in D -> Powers for some D \subseteq AllNodes + and pNodes \subseteq D, test whether the set pNodes \subseteq AllNodes has + more than 2/3 of voting power among the nodes in D. + *) +TwoThirds(pVS, pNodes) == + LET TP == Cardinality(pVS) + SP == Cardinality(pVS \intersect pNodes) + IN + 3 * SP > 2 * TP \* when thinking in real numbers, not integers: SP > 2.0 / 3.0 * TP + +(* + Given a set of FaultyNodes, test whether the voting power of the correct nodes in D + is more than 2/3 of the voting power of the faulty nodes in D. + *) +IsCorrectPower(pFaultyNodes, pVS) == + LET FN == pFaultyNodes \intersect pVS \* faulty nodes in pNodes + CN == pVS \ pFaultyNodes \* correct nodes in pNodes + CP == Cardinality(CN) \* power of the correct nodes + FP == Cardinality(FN) \* power of the faulty nodes + IN + \* CP + FP = TP is the total voting power, so we write CP > 2.0 / 3 * TP as follows: + CP > 2 * FP \* Note: when FP = 0, this implies CP > 0. + +(* This is what we believe is the assumption about failures in Tendermint *) +FaultAssumption(pFaultyNodes, pNow, pBlockchain) == + \A h \in Heights: + pBlockchain[h].time + TRUSTING_PERIOD > pNow => + IsCorrectPower(pFaultyNodes, pBlockchain[h].NextVS) + +(* Can a block be produced by a correct peer, or an authenticated Byzantine peer *) +IsLightBlockAllowedByDigitalSignatures(ht, block) == + \/ block.header = blockchain[ht] \* signed by correct and faulty (maybe) + \/ block.Commits \subseteq Faulty /\ block.header.height = ht \* signed only by faulty + +(* + Initialize the blockchain to the ultimate height right in the initial states. + We pick the faulty validators statically, but that should not affect the light client. + *) +InitToHeight == + /\ Faulty \in SUBSET AllNodes \* some nodes may fail + \* pick the validator sets and last commits + /\ \E vs, lastCommit \in [Heights -> SUBSET AllNodes]: + \E timestamp \in [Heights -> Int]: + \* now is at least as early as the timestamp in the last block + /\ \E tm \in Int: now = tm /\ tm >= timestamp[ULTIMATE_HEIGHT] + \* the genesis starts on day 1 + /\ timestamp[1] = 1 + /\ vs[1] = AllNodes + /\ lastCommit[1] = EmptyNodeSet + /\ \A h \in Heights \ {1}: + /\ lastCommit[h] \subseteq vs[h - 1] \* the non-validators cannot commit + /\ TwoThirds(vs[h - 1], lastCommit[h]) \* the commit has >2/3 of validator votes + /\ IsCorrectPower(Faulty, vs[h]) \* the correct validators have >2/3 of power + /\ timestamp[h] > timestamp[h - 1] \* the time grows monotonically + /\ timestamp[h] < timestamp[h - 1] + TRUSTING_PERIOD \* but not too fast + \* form the block chain out of validator sets and commits (this makes apalache faster) + /\ blockchain = [h \in Heights |-> + [height |-> h, + time |-> timestamp[h], + VS |-> vs[h], + NextVS |-> IF h < ULTIMATE_HEIGHT THEN vs[h + 1] ELSE AllNodes, + lastCommit |-> lastCommit[h]] + ] \****** + + +(* is the blockchain in the faulty zone where the Tendermint security model does not apply *) +InFaultyZone == + ~FaultAssumption(Faulty, now, blockchain) + +(********************* BLOCKCHAIN ACTIONS ********************************) +(* + Advance the clock by zero or more time units. + *) +AdvanceTime == + \E tm \in Int: tm >= now /\ now' = tm + /\ UNCHANGED <> + +(* + One more process fails. As a result, the blockchain may move into the faulty zone. + The light client is not using this action, as the faults are picked in the initial state. + However, this action may be useful when reasoning about fork detection. + *) +OneMoreFault == + /\ \E n \in AllNodes \ Faulty: + /\ Faulty' = Faulty \cup {n} + /\ Faulty' /= AllNodes \* at least process remains non-faulty + /\ UNCHANGED <> +============================================================================= +\* Modification History +\* Last modified Wed Jun 10 14:10:54 CEST 2020 by igor +\* Created Fri Oct 11 15:45:11 CEST 2019 by igor diff --git a/spec/light-client/verification/LCVerificationApi_003_draft.tla b/spec/light-client/verification/LCVerificationApi_003_draft.tla new file mode 100644 index 0000000000..909eab92b8 --- /dev/null +++ b/spec/light-client/verification/LCVerificationApi_003_draft.tla @@ -0,0 +1,192 @@ +-------------------- MODULE LCVerificationApi_003_draft -------------------------- +(** + * The common interface of the light client verification and detection. + *) +EXTENDS Integers, FiniteSets + +\* the parameters of Light Client +CONSTANTS + TRUSTING_PERIOD, + (* the period within which the validators are trusted *) + CLOCK_DRIFT, + (* the assumed precision of the clock *) + REAL_CLOCK_DRIFT, + (* the actual clock drift, which under normal circumstances should not + be larger than CLOCK_DRIFT (otherwise, there will be a bug) *) + FAULTY_RATIO + (* a pair <> that limits that ratio of faulty validator in the blockchain + from above (exclusive). Tendermint security model prescribes 1 / 3. *) + +VARIABLES + localClock (* current time as measured by the light client *) + +(* the header is still within the trusting period *) +InTrustingPeriodLocal(header) == + \* note that the assumption about the drift reduces the period of trust + localClock < header.time + TRUSTING_PERIOD - CLOCK_DRIFT + +(* the header is still within the trusting period, even if the clock can go backwards *) +InTrustingPeriodLocalSurely(header) == + \* note that the assumption about the drift reduces the period of trust + localClock < header.time + TRUSTING_PERIOD - 2 * CLOCK_DRIFT + +(* ensure that the local clock does not drift far away from the global clock *) +IsLocalClockWithinDrift(local, global) == + /\ global - REAL_CLOCK_DRIFT <= local + /\ local <= global + REAL_CLOCK_DRIFT + +(** + * Check that the commits in an untrusted block form 1/3 of the next validators + * in a trusted header. + *) +SignedByOneThirdOfTrusted(trusted, untrusted) == + LET TP == Cardinality(trusted.header.NextVS) + SP == Cardinality(untrusted.Commits \intersect trusted.header.NextVS) + IN + 3 * SP > TP + +(** + The first part of the precondition of ValidAndVerified, which does not take + the current time into account. + + [LCV-FUNC-VALID.1::TLA-PRE-UNTIMED.1] + *) +ValidAndVerifiedPreUntimed(trusted, untrusted) == + LET thdr == trusted.header + uhdr == untrusted.header + IN + /\ thdr.height < uhdr.height + \* the trusted block has been created earlier + /\ thdr.time < uhdr.time + /\ untrusted.Commits \subseteq uhdr.VS + /\ LET TP == Cardinality(uhdr.VS) + SP == Cardinality(untrusted.Commits) + IN + 3 * SP > 2 * TP + /\ thdr.height + 1 = uhdr.height => thdr.NextVS = uhdr.VS + (* As we do not have explicit hashes we ignore these three checks of the English spec: + + 1. "trusted.Commit is a commit is for the header trusted.Header, + i.e. it contains the correct hash of the header". + 2. untrusted.Validators = hash(untrusted.Header.Validators) + 3. untrusted.NextValidators = hash(untrusted.Header.NextValidators) + *) + +(** + Check the precondition of ValidAndVerified, including the time checks. + + [LCV-FUNC-VALID.1::TLA-PRE.1] + *) +ValidAndVerifiedPre(trusted, untrusted, checkFuture) == + LET thdr == trusted.header + uhdr == untrusted.header + IN + /\ InTrustingPeriodLocal(thdr) + \* The untrusted block is not from the future (modulo clock drift). + \* Do the check, if it is required. + /\ checkFuture => uhdr.time < localClock + CLOCK_DRIFT + /\ ValidAndVerifiedPreUntimed(trusted, untrusted) + + +(** + Check, whether an untrusted block is valid and verifiable w.r.t. a trusted header. + This test does take current time into account, but only looks at the block structure. + + [LCV-FUNC-VALID.1::TLA-UNTIMED.1] + *) +ValidAndVerifiedUntimed(trusted, untrusted) == + IF ~ValidAndVerifiedPreUntimed(trusted, untrusted) + THEN "INVALID" + ELSE IF untrusted.header.height = trusted.header.height + 1 + \/ SignedByOneThirdOfTrusted(trusted, untrusted) + THEN "SUCCESS" + ELSE "NOT_ENOUGH_TRUST" + +(** + Check, whether an untrusted block is valid and verifiable w.r.t. a trusted header. + + [LCV-FUNC-VALID.1::TLA.1] + *) +ValidAndVerified(trusted, untrusted, checkFuture) == + IF ~ValidAndVerifiedPre(trusted, untrusted, checkFuture) + THEN "INVALID" + ELSE IF ~InTrustingPeriodLocal(untrusted.header) + (* We leave the following test for the documentation purposes. + The implementation should do this test, as signature verification may be slow. + In the TLA+ specification, ValidAndVerified happens in no time. + *) + THEN "FAILED_TRUSTING_PERIOD" + ELSE IF untrusted.header.height = trusted.header.height + 1 + \/ SignedByOneThirdOfTrusted(trusted, untrusted) + THEN "SUCCESS" + ELSE "NOT_ENOUGH_TRUST" + + +(** + The invariant of the light store that is not related to the blockchain + *) +LightStoreInv(fetchedLightBlocks, lightBlockStatus) == + \A lh, rh \in DOMAIN fetchedLightBlocks: + \* for every pair of stored headers that have been verified + \/ lh >= rh + \/ lightBlockStatus[lh] /= "StateVerified" + \/ lightBlockStatus[rh] /= "StateVerified" + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh /\ lightBlockStatus[mh] = "StateVerified" + \* or the left header is outside the trusting period, so no guarantees + \/ LET lhdr == fetchedLightBlocks[lh] + rhdr == fetchedLightBlocks[rh] + IN + \* we can verify the right one using the left one + "SUCCESS" = ValidAndVerifiedUntimed(lhdr, rhdr) + +(** + Correctness states that all the obtained headers are exactly like in the blockchain. + + It is always the case that every verified header in LightStore was generated by + an instance of Tendermint consensus. + + [LCV-DIST-SAFE.1::CORRECTNESS-INV.1] + *) +CorrectnessInv(blockchain, fetchedLightBlocks, lightBlockStatus) == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" => + fetchedLightBlocks[h].header = blockchain[h] + +(** + * When the light client terminates, there are no failed blocks. + * (Otherwise, someone lied to us.) + *) +NoFailedBlocksOnSuccessInv(fetchedLightBlocks, lightBlockStatus) == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] /= "StateFailed" + +(** + The expected post-condition of VerifyToTarget. + *) +VerifyToTargetPost(blockchain, isPeerCorrect, + fetchedLightBlocks, lightBlockStatus, + trustedHeight, targetHeight, finalState) == + LET trustedHeader == fetchedLightBlocks[trustedHeight].header IN + \* The light client is not lying us on the trusted block. + \* It is straightforward to detect. + /\ lightBlockStatus[trustedHeight] = "StateVerified" + /\ trustedHeight \in DOMAIN fetchedLightBlocks + /\ trustedHeader = blockchain[trustedHeight] + \* the invariants we have found in the light client verification + \* there is a problem with trusting period + /\ isPeerCorrect + => CorrectnessInv(blockchain, fetchedLightBlocks, lightBlockStatus) + \* a correct peer should fail the light client, + \* if the trusted block is in the trusting period + /\ isPeerCorrect /\ InTrustingPeriodLocalSurely(trustedHeader) + => finalState = "finishedSuccess" + /\ finalState = "finishedSuccess" => + /\ lightBlockStatus[targetHeight] = "StateVerified" + /\ targetHeight \in DOMAIN fetchedLightBlocks + /\ NoFailedBlocksOnSuccessInv(fetchedLightBlocks, lightBlockStatus) + /\ LightStoreInv(fetchedLightBlocks, lightBlockStatus) + + +================================================================================== diff --git a/spec/light-client/verification/Lightclient_002_draft.tla b/spec/light-client/verification/Lightclient_002_draft.tla new file mode 100644 index 0000000000..32c807f6e6 --- /dev/null +++ b/spec/light-client/verification/Lightclient_002_draft.tla @@ -0,0 +1,465 @@ +-------------------------- MODULE Lightclient_002_draft ---------------------------- +(** + * A state-machine specification of the lite client, following the English spec: + * + * https://github.com/informalsystems/tendermint-rs/blob/master/docs/spec/lightclient/verification.md + *) + +EXTENDS Integers, FiniteSets + +\* the parameters of Light Client +CONSTANTS + TRUSTED_HEIGHT, + (* an index of the block header that the light client trusts by social consensus *) + TARGET_HEIGHT, + (* an index of the block header that the light client tries to verify *) + TRUSTING_PERIOD, + (* the period within which the validators are trusted *) + IS_PRIMARY_CORRECT + (* is primary correct? *) + +VARIABLES (* see TypeOK below for the variable types *) + state, (* the current state of the light client *) + nextHeight, (* the next height to explore by the light client *) + nprobes (* the lite client iteration, or the number of block tests *) + +(* the light store *) +VARIABLES + fetchedLightBlocks, (* a function from heights to LightBlocks *) + lightBlockStatus, (* a function from heights to block statuses *) + latestVerified (* the latest verified block *) + +(* the variables of the lite client *) +lcvars == <> + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevNow, + prevVerdict + +InitMonitor(verified, current, now, verdict) == + /\ prevVerified = verified + /\ prevCurrent = current + /\ prevNow = now + /\ prevVerdict = verdict + +NextMonitor(verified, current, now, verdict) == + /\ prevVerified' = verified + /\ prevCurrent' = current + /\ prevNow' = now + /\ prevVerdict' = verdict + + +(******************* Blockchain instance ***********************************) + +\* the parameters that are propagated into Blockchain +CONSTANTS + AllNodes + (* a set of all nodes that can act as validators (correct and faulty) *) + +\* the state variables of Blockchain, see Blockchain.tla for the details +VARIABLES now, blockchain, Faulty + +\* All the variables of Blockchain. For some reason, BC!vars does not work +bcvars == <> + +(* Create an instance of Blockchain. + We could write EXTENDS Blockchain, but then all the constants and state variables + would be hidden inside the Blockchain module. + *) +ULTIMATE_HEIGHT == TARGET_HEIGHT + 1 + +BC == INSTANCE Blockchain_002_draft WITH + now <- now, blockchain <- blockchain, Faulty <- Faulty + +(************************** Lite client ************************************) + +(* the heights on which the light client is working *) +HEIGHTS == TRUSTED_HEIGHT..TARGET_HEIGHT + +(* the control states of the lite client *) +States == { "working", "finishedSuccess", "finishedFailure" } + +(** + Check the precondition of ValidAndVerified. + + [LCV-FUNC-VALID.1::TLA-PRE.1] + *) +ValidAndVerifiedPre(trusted, untrusted) == + LET thdr == trusted.header + uhdr == untrusted.header + IN + /\ BC!InTrustingPeriod(thdr) + /\ thdr.height < uhdr.height + \* the trusted block has been created earlier (no drift here) + /\ thdr.time < uhdr.time + \* the untrusted block is not from the future + /\ uhdr.time < now + /\ untrusted.Commits \subseteq uhdr.VS + /\ LET TP == Cardinality(uhdr.VS) + SP == Cardinality(untrusted.Commits) + IN + 3 * SP > 2 * TP + /\ thdr.height + 1 = uhdr.height => thdr.NextVS = uhdr.VS + (* As we do not have explicit hashes we ignore these three checks of the English spec: + + 1. "trusted.Commit is a commit is for the header trusted.Header, + i.e. it contains the correct hash of the header". + 2. untrusted.Validators = hash(untrusted.Header.Validators) + 3. untrusted.NextValidators = hash(untrusted.Header.NextValidators) + *) + +(** + * Check that the commits in an untrusted block form 1/3 of the next validators + * in a trusted header. + *) +SignedByOneThirdOfTrusted(trusted, untrusted) == + LET TP == Cardinality(trusted.header.NextVS) + SP == Cardinality(untrusted.Commits \intersect trusted.header.NextVS) + IN + 3 * SP > TP + +(** + Check, whether an untrusted block is valid and verifiable w.r.t. a trusted header. + + [LCV-FUNC-VALID.1::TLA.1] + *) +ValidAndVerified(trusted, untrusted) == + IF ~ValidAndVerifiedPre(trusted, untrusted) + THEN "INVALID" + ELSE IF ~BC!InTrustingPeriod(untrusted.header) + (* We leave the following test for the documentation purposes. + The implementation should do this test, as signature verification may be slow. + In the TLA+ specification, ValidAndVerified happens in no time. + *) + THEN "FAILED_TRUSTING_PERIOD" + ELSE IF untrusted.header.height = trusted.header.height + 1 + \/ SignedByOneThirdOfTrusted(trusted, untrusted) + THEN "SUCCESS" + ELSE "NOT_ENOUGH_TRUST" + +(* + Initial states of the light client. + Initially, only the trusted light block is present. + *) +LCInit == + /\ state = "working" + /\ nextHeight = TARGET_HEIGHT + /\ nprobes = 0 \* no tests have been done so far + /\ LET trustedBlock == blockchain[TRUSTED_HEIGHT] + trustedLightBlock == [header |-> trustedBlock, Commits |-> AllNodes] + IN + \* initially, fetchedLightBlocks is a function of one element, i.e., TRUSTED_HEIGHT + /\ fetchedLightBlocks = [h \in {TRUSTED_HEIGHT} |-> trustedLightBlock] + \* initially, lightBlockStatus is a function of one element, i.e., TRUSTED_HEIGHT + /\ lightBlockStatus = [h \in {TRUSTED_HEIGHT} |-> "StateVerified"] + \* the latest verified block the the trusted block + /\ latestVerified = trustedLightBlock + /\ InitMonitor(trustedLightBlock, trustedLightBlock, now, "SUCCESS") + +\* block should contain a copy of the block from the reference chain, with a matching commit +CopyLightBlockFromChain(block, height) == + LET ref == blockchain[height] + lastCommit == + IF height < ULTIMATE_HEIGHT + THEN blockchain[height + 1].lastCommit + \* for the ultimate block, which we never use, as ULTIMATE_HEIGHT = TARGET_HEIGHT + 1 + ELSE blockchain[height].VS + IN + block = [header |-> ref, Commits |-> lastCommit] + +\* Either the primary is correct and the block comes from the reference chain, +\* or the block is produced by a faulty primary. +\* +\* [LCV-FUNC-FETCH.1::TLA.1] +FetchLightBlockInto(block, height) == + IF IS_PRIMARY_CORRECT + THEN CopyLightBlockFromChain(block, height) + ELSE BC!IsLightBlockAllowedByDigitalSignatures(height, block) + +\* add a block into the light store +\* +\* [LCV-FUNC-UPDATE.1::TLA.1] +LightStoreUpdateBlocks(lightBlocks, block) == + LET ht == block.header.height IN + [h \in DOMAIN lightBlocks \union {ht} |-> + IF h = ht THEN block ELSE lightBlocks[h]] + +\* update the state of a light block +\* +\* [LCV-FUNC-UPDATE.1::TLA.1] +LightStoreUpdateStates(statuses, ht, blockState) == + [h \in DOMAIN statuses \union {ht} |-> + IF h = ht THEN blockState ELSE statuses[h]] + +\* Check, whether newHeight is a possible next height for the light client. +\* +\* [LCV-FUNC-SCHEDULE.1::TLA.1] +CanScheduleTo(newHeight, pLatestVerified, pNextHeight, pTargetHeight) == + LET ht == pLatestVerified.header.height IN + \/ /\ ht = pNextHeight + /\ ht < pTargetHeight + /\ pNextHeight < newHeight + /\ newHeight <= pTargetHeight + \/ /\ ht < pNextHeight + /\ ht < pTargetHeight + /\ ht < newHeight + /\ newHeight < pNextHeight + \/ /\ ht = pTargetHeight + /\ newHeight = pTargetHeight + +\* The loop of VerifyToTarget. +\* +\* [LCV-FUNC-MAIN.1::TLA-LOOP.1] +VerifyToTargetLoop == + \* the loop condition is true + /\ latestVerified.header.height < TARGET_HEIGHT + \* pick a light block, which will be constrained later + /\ \E current \in BC!LightBlocks: + \* Get next LightBlock for verification + /\ IF nextHeight \in DOMAIN fetchedLightBlocks + THEN \* copy the block from the light store + /\ current = fetchedLightBlocks[nextHeight] + /\ UNCHANGED fetchedLightBlocks + ELSE \* retrieve a light block and save it in the light store + /\ FetchLightBlockInto(current, nextHeight) + /\ fetchedLightBlocks' = LightStoreUpdateBlocks(fetchedLightBlocks, current) + \* Record that one more probe has been done (for complexity and model checking) + /\ nprobes' = nprobes + 1 + \* Verify the current block + /\ LET verdict == ValidAndVerified(latestVerified, current) IN + NextMonitor(latestVerified, current, now, verdict) /\ + \* Decide whether/how to continue + CASE verdict = "SUCCESS" -> + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateVerified") + /\ latestVerified' = current + /\ state' = + IF latestVerified'.header.height < TARGET_HEIGHT + THEN "working" + ELSE "finishedSuccess" + /\ \E newHeight \in HEIGHTS: + /\ CanScheduleTo(newHeight, current, nextHeight, TARGET_HEIGHT) + /\ nextHeight' = newHeight + + [] verdict = "NOT_ENOUGH_TRUST" -> + (* + do nothing: the light block current passed validation, but the validator + set is too different to verify it. We keep the state of + current at StateUnverified. For a later iteration, Schedule + might decide to try verification of that light block again. + *) + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateUnverified") + /\ \E newHeight \in HEIGHTS: + /\ CanScheduleTo(newHeight, latestVerified, nextHeight, TARGET_HEIGHT) + /\ nextHeight' = newHeight + /\ UNCHANGED <> + + [] OTHER -> + \* verdict is some error code + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateFailed") + /\ state' = "finishedFailure" + /\ UNCHANGED <> + +\* The terminating condition of VerifyToTarget. +\* +\* [LCV-FUNC-MAIN.1::TLA-LOOPCOND.1] +VerifyToTargetDone == + /\ latestVerified.header.height >= TARGET_HEIGHT + /\ state' = "finishedSuccess" + /\ UNCHANGED <> + /\ UNCHANGED <> + +(********************* Lite client + Blockchain *******************) +Init == + \* the blockchain is initialized immediately to the ULTIMATE_HEIGHT + /\ BC!InitToHeight + \* the light client starts + /\ LCInit + +(* + The system step is very simple. + The light client is either executing VerifyToTarget, or it has terminated. + (In the latter case, a model checker reports a deadlock.) + Simultaneously, the global clock may advance. + *) +Next == + /\ state = "working" + /\ VerifyToTargetLoop \/ VerifyToTargetDone + /\ BC!AdvanceTime \* the global clock is advanced by zero or more time units + +(************************* Types ******************************************) +TypeOK == + /\ state \in States + /\ nextHeight \in HEIGHTS + /\ latestVerified \in BC!LightBlocks + /\ \E HS \in SUBSET HEIGHTS: + /\ fetchedLightBlocks \in [HS -> BC!LightBlocks] + /\ lightBlockStatus + \in [HS -> {"StateVerified", "StateUnverified", "StateFailed"}] + +(************************* Properties ******************************************) + +(* The properties to check *) +\* this invariant candidate is false +NeverFinish == + state = "working" + +\* this invariant candidate is false +NeverFinishNegative == + state /= "finishedFailure" + +\* This invariant holds true, when the primary is correct. +\* This invariant candidate is false when the primary is faulty. +NeverFinishNegativeWhenTrusted == + (*(minTrustedHeight <= TRUSTED_HEIGHT)*) + BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + => state /= "finishedFailure" + +\* this invariant candidate is false +NeverFinishPositive == + state /= "finishedSuccess" + +(** + Correctness states that all the obtained headers are exactly like in the blockchain. + + It is always the case that every verified header in LightStore was generated by + an instance of Tendermint consensus. + + [LCV-DIST-SAFE.1::CORRECTNESS-INV.1] + *) +CorrectnessInv == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" => + fetchedLightBlocks[h].header = blockchain[h] + +(** + Check that the sequence of the headers in storedLightBlocks satisfies ValidAndVerified = "SUCCESS" pairwise + This property is easily violated, whenever a header cannot be trusted anymore. + *) +StoredHeadersAreVerifiedInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: \* for every pair of different stored headers + \/ lh >= rh + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh + \* or we can verify the right one using the left one + \/ "SUCCESS" = ValidAndVerified(fetchedLightBlocks[lh], fetchedLightBlocks[rh]) + +\* An improved version of StoredHeadersAreSound, assuming that a header may be not trusted. +\* This invariant candidate is also violated, +\* as there may be some unverified blocks left in the middle. +StoredHeadersAreVerifiedOrNotTrustedInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: \* for every pair of different stored headers + \/ lh >= rh + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh + \* or we can verify the right one using the left one + \/ "SUCCESS" = ValidAndVerified(fetchedLightBlocks[lh], fetchedLightBlocks[rh]) + \* or the left header is outside the trusting period, so no guarantees + \/ ~BC!InTrustingPeriod(fetchedLightBlocks[lh].header) + +(** + * An improved version of StoredHeadersAreSoundOrNotTrusted, + * checking the property only for the verified headers. + * This invariant holds true. + *) +ProofOfChainOfTrustInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: + \* for every pair of stored headers that have been verified + \/ lh >= rh + \/ lightBlockStatus[lh] = "StateUnverified" + \/ lightBlockStatus[rh] = "StateUnverified" + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh /\ lightBlockStatus[mh] = "StateVerified" + \* or the left header is outside the trusting period, so no guarantees + \/ ~(BC!InTrustingPeriod(fetchedLightBlocks[lh].header)) + \* or we can verify the right one using the left one + \/ "SUCCESS" = ValidAndVerified(fetchedLightBlocks[lh], fetchedLightBlocks[rh]) + +(** + * When the light client terminates, there are no failed blocks. (Otherwise, someone lied to us.) + *) +NoFailedBlocksOnSuccessInv == + state = "finishedSuccess" => + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] /= "StateFailed" + +\* This property states that whenever the light client finishes with a positive outcome, +\* the trusted header is still within the trusting period. +\* We expect this property to be violated. And Apalache shows us a counterexample. +PositiveBeforeTrustedHeaderExpires == + (state = "finishedSuccess") => BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + +\* If the primary is correct and the initial trusted block has not expired, +\* then whenever the algorithm terminates, it reports "success" +CorrectPrimaryAndTimeliness == + (BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + /\ state /= "working" /\ IS_PRIMARY_CORRECT) => + state = "finishedSuccess" + +(** + If the primary is correct and there is a trusted block that has not expired, + then whenever the algorithm terminates, it reports "success". + + [LCV-DIST-LIVE.1::SUCCESS-CORR-PRIMARY-CHAIN-OF-TRUST.1] + *) +SuccessOnCorrectPrimaryAndChainOfTrust == + (\E h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" /\ BC!InTrustingPeriod(blockchain[h]) + /\ state /= "working" /\ IS_PRIMARY_CORRECT) => + state = "finishedSuccess" + +\* Lite Client Completeness: If header h was correctly generated by an instance +\* of Tendermint consensus (and its age is less than the trusting period), +\* then the lite client should eventually set trust(h) to true. +\* +\* Note that Completeness assumes that the lite client communicates with a correct full node. +\* +\* We decompose completeness into Termination (liveness) and Precision (safety). +\* Once again, Precision is an inverse version of the safety property in Completeness, +\* as A => B is logically equivalent to ~B => ~A. +PrecisionInv == + (state = "finishedFailure") + => \/ ~BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) \* outside of the trusting period + \/ \E h \in DOMAIN fetchedLightBlocks: + LET lightBlock == fetchedLightBlocks[h] IN + \* the full node lied to the lite client about the block header + \/ lightBlock.header /= blockchain[h] + \* the full node lied to the lite client about the commits + \/ lightBlock.Commits /= lightBlock.header.VS + +\* the old invariant that was found to be buggy by TLC +PrecisionBuggyInv == + (state = "finishedFailure") + => \/ ~BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) \* outside of the trusting period + \/ \E h \in DOMAIN fetchedLightBlocks: + LET lightBlock == fetchedLightBlocks[h] IN + \* the full node lied to the lite client about the block header + lightBlock.header /= blockchain[h] + +\* the worst complexity +Complexity == + LET N == TARGET_HEIGHT - TRUSTED_HEIGHT + 1 IN + state /= "working" => + (2 * nprobes <= N * (N - 1)) + +(* + We omit termination, as the algorithm deadlocks in the end. + So termination can be demonstrated by finding a deadlock. + Of course, one has to analyze the deadlocked state and see that + the algorithm has indeed terminated there. +*) +============================================================================= +\* Modification History +\* Last modified Fri Jun 26 12:08:28 CEST 2020 by igor +\* Created Wed Oct 02 16:39:42 CEST 2019 by igor diff --git a/spec/light-client/verification/Lightclient_003_draft.tla b/spec/light-client/verification/Lightclient_003_draft.tla new file mode 100644 index 0000000000..e17a88491b --- /dev/null +++ b/spec/light-client/verification/Lightclient_003_draft.tla @@ -0,0 +1,493 @@ +-------------------------- MODULE Lightclient_003_draft ---------------------------- +(** + * A state-machine specification of the lite client verification, + * following the English spec: + * + * https://github.com/informalsystems/tendermint-rs/blob/master/docs/spec/lightclient/verification.md + *) + +EXTENDS Integers, FiniteSets + +\* the parameters of Light Client +CONSTANTS + TRUSTED_HEIGHT, + (* an index of the block header that the light client trusts by social consensus *) + TARGET_HEIGHT, + (* an index of the block header that the light client tries to verify *) + TRUSTING_PERIOD, + (* the period within which the validators are trusted *) + CLOCK_DRIFT, + (* the assumed precision of the clock *) + REAL_CLOCK_DRIFT, + (* the actual clock drift, which under normal circumstances should not + be larger than CLOCK_DRIFT (otherwise, there will be a bug) *) + IS_PRIMARY_CORRECT, + (* is primary correct? *) + FAULTY_RATIO + (* a pair <> that limits that ratio of faulty validator in the blockchain + from above (exclusive). Tendermint security model prescribes 1 / 3. *) + +VARIABLES (* see TypeOK below for the variable types *) + localClock, (* the local clock of the light client *) + state, (* the current state of the light client *) + nextHeight, (* the next height to explore by the light client *) + nprobes (* the lite client iteration, or the number of block tests *) + +(* the light store *) +VARIABLES + fetchedLightBlocks, (* a function from heights to LightBlocks *) + lightBlockStatus, (* a function from heights to block statuses *) + latestVerified (* the latest verified block *) + +(* the variables of the lite client *) +lcvars == <> + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +InitMonitor(verified, current, pLocalClock, verdict) == + /\ prevVerified = verified + /\ prevCurrent = current + /\ prevLocalClock = pLocalClock + /\ prevVerdict = verdict + +NextMonitor(verified, current, pLocalClock, verdict) == + /\ prevVerified' = verified + /\ prevCurrent' = current + /\ prevLocalClock' = pLocalClock + /\ prevVerdict' = verdict + + +(******************* Blockchain instance ***********************************) + +\* the parameters that are propagated into Blockchain +CONSTANTS + AllNodes + (* a set of all nodes that can act as validators (correct and faulty) *) + +\* the state variables of Blockchain, see Blockchain.tla for the details +VARIABLES refClock, blockchain, Faulty + +\* All the variables of Blockchain. For some reason, BC!vars does not work +bcvars == <> + +(* Create an instance of Blockchain. + We could write EXTENDS Blockchain, but then all the constants and state variables + would be hidden inside the Blockchain module. + *) +ULTIMATE_HEIGHT == TARGET_HEIGHT + 1 + +BC == INSTANCE Blockchain_003_draft WITH + refClock <- refClock, blockchain <- blockchain, Faulty <- Faulty + +(************************** Lite client ************************************) + +(* the heights on which the light client is working *) +HEIGHTS == TRUSTED_HEIGHT..TARGET_HEIGHT + +(* the control states of the lite client *) +States == { "working", "finishedSuccess", "finishedFailure" } + +\* The verification functions are implemented in the API +API == INSTANCE LCVerificationApi_003_draft + + +(* + Initial states of the light client. + Initially, only the trusted light block is present. + *) +LCInit == + /\ \E tm \in Int: + tm >= 0 /\ API!IsLocalClockWithinDrift(tm, refClock) /\ localClock = tm + /\ state = "working" + /\ nextHeight = TARGET_HEIGHT + /\ nprobes = 0 \* no tests have been done so far + /\ LET trustedBlock == blockchain[TRUSTED_HEIGHT] + trustedLightBlock == [header |-> trustedBlock, Commits |-> AllNodes] + IN + \* initially, fetchedLightBlocks is a function of one element, i.e., TRUSTED_HEIGHT + /\ fetchedLightBlocks = [h \in {TRUSTED_HEIGHT} |-> trustedLightBlock] + \* initially, lightBlockStatus is a function of one element, i.e., TRUSTED_HEIGHT + /\ lightBlockStatus = [h \in {TRUSTED_HEIGHT} |-> "StateVerified"] + \* the latest verified block the the trusted block + /\ latestVerified = trustedLightBlock + /\ InitMonitor(trustedLightBlock, trustedLightBlock, localClock, "SUCCESS") + +\* block should contain a copy of the block from the reference chain, with a matching commit +CopyLightBlockFromChain(block, height) == + LET ref == blockchain[height] + lastCommit == + IF height < ULTIMATE_HEIGHT + THEN blockchain[height + 1].lastCommit + \* for the ultimate block, which we never use, as ULTIMATE_HEIGHT = TARGET_HEIGHT + 1 + ELSE blockchain[height].VS + IN + block = [header |-> ref, Commits |-> lastCommit] + +\* Either the primary is correct and the block comes from the reference chain, +\* or the block is produced by a faulty primary. +\* +\* [LCV-FUNC-FETCH.1::TLA.1] +FetchLightBlockInto(block, height) == + IF IS_PRIMARY_CORRECT + THEN CopyLightBlockFromChain(block, height) + ELSE BC!IsLightBlockAllowedByDigitalSignatures(height, block) + +\* add a block into the light store +\* +\* [LCV-FUNC-UPDATE.1::TLA.1] +LightStoreUpdateBlocks(lightBlocks, block) == + LET ht == block.header.height IN + [h \in DOMAIN lightBlocks \union {ht} |-> + IF h = ht THEN block ELSE lightBlocks[h]] + +\* update the state of a light block +\* +\* [LCV-FUNC-UPDATE.1::TLA.1] +LightStoreUpdateStates(statuses, ht, blockState) == + [h \in DOMAIN statuses \union {ht} |-> + IF h = ht THEN blockState ELSE statuses[h]] + +\* Check, whether newHeight is a possible next height for the light client. +\* +\* [LCV-FUNC-SCHEDULE.1::TLA.1] +CanScheduleTo(newHeight, pLatestVerified, pNextHeight, pTargetHeight) == + LET ht == pLatestVerified.header.height IN + \/ /\ ht = pNextHeight + /\ ht < pTargetHeight + /\ pNextHeight < newHeight + /\ newHeight <= pTargetHeight + \/ /\ ht < pNextHeight + /\ ht < pTargetHeight + /\ ht < newHeight + /\ newHeight < pNextHeight + \/ /\ ht = pTargetHeight + /\ newHeight = pTargetHeight + +\* The loop of VerifyToTarget. +\* +\* [LCV-FUNC-MAIN.1::TLA-LOOP.1] +VerifyToTargetLoop == + \* the loop condition is true + /\ latestVerified.header.height < TARGET_HEIGHT + \* pick a light block, which will be constrained later + /\ \E current \in BC!LightBlocks: + \* Get next LightBlock for verification + /\ IF nextHeight \in DOMAIN fetchedLightBlocks + THEN \* copy the block from the light store + /\ current = fetchedLightBlocks[nextHeight] + /\ UNCHANGED fetchedLightBlocks + ELSE \* retrieve a light block and save it in the light store + /\ FetchLightBlockInto(current, nextHeight) + /\ fetchedLightBlocks' = LightStoreUpdateBlocks(fetchedLightBlocks, current) + \* Record that one more probe has been done (for complexity and model checking) + /\ nprobes' = nprobes + 1 + \* Verify the current block + /\ LET verdict == API!ValidAndVerified(latestVerified, current, TRUE) IN + NextMonitor(latestVerified, current, localClock, verdict) /\ + \* Decide whether/how to continue + CASE verdict = "SUCCESS" -> + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateVerified") + /\ latestVerified' = current + /\ state' = + IF latestVerified'.header.height < TARGET_HEIGHT + THEN "working" + ELSE "finishedSuccess" + /\ \E newHeight \in HEIGHTS: + /\ CanScheduleTo(newHeight, current, nextHeight, TARGET_HEIGHT) + /\ nextHeight' = newHeight + + [] verdict = "NOT_ENOUGH_TRUST" -> + (* + do nothing: the light block current passed validation, but the validator + set is too different to verify it. We keep the state of + current at StateUnverified. For a later iteration, Schedule + might decide to try verification of that light block again. + *) + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateUnverified") + /\ \E newHeight \in HEIGHTS: + /\ CanScheduleTo(newHeight, latestVerified, nextHeight, TARGET_HEIGHT) + /\ nextHeight' = newHeight + /\ UNCHANGED <> + + [] OTHER -> + \* verdict is some error code + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateFailed") + /\ state' = "finishedFailure" + /\ UNCHANGED <> + +\* The terminating condition of VerifyToTarget. +\* +\* [LCV-FUNC-MAIN.1::TLA-LOOPCOND.1] +VerifyToTargetDone == + /\ latestVerified.header.height >= TARGET_HEIGHT + /\ state' = "finishedSuccess" + /\ UNCHANGED <> + /\ UNCHANGED <> + +(* + The local and global clocks can be updated. They can also drift from each other. + Note that the local clock can actually go backwards in time. + However, it still stays in the drift envelope + of [refClock - REAL_CLOCK_DRIFT, refClock + REAL_CLOCK_DRIFT]. + *) +AdvanceClocks == + /\ BC!AdvanceTime + /\ \E tm \in Int: + /\ tm >= 0 + /\ API!IsLocalClockWithinDrift(tm, refClock') + /\ localClock' = tm + \* if you like the clock to always grow monotonically, uncomment the next line: + \*/\ localClock' > localClock + +(********************* Lite client + Blockchain *******************) +Init == + \* the blockchain is initialized immediately to the ULTIMATE_HEIGHT + /\ BC!InitToHeight(FAULTY_RATIO) + \* the light client starts + /\ LCInit + +(* + The system step is very simple. + The light client is either executing VerifyToTarget, or it has terminated. + (In the latter case, a model checker reports a deadlock.) + Simultaneously, the global clock may advance. + *) +Next == + /\ state = "working" + /\ VerifyToTargetLoop \/ VerifyToTargetDone + /\ AdvanceClocks + +(************************* Types ******************************************) +TypeOK == + /\ state \in States + /\ localClock \in Nat + /\ refClock \in Nat + /\ nextHeight \in HEIGHTS + /\ latestVerified \in BC!LightBlocks + /\ \E HS \in SUBSET HEIGHTS: + /\ fetchedLightBlocks \in [HS -> BC!LightBlocks] + /\ lightBlockStatus + \in [HS -> {"StateVerified", "StateUnverified", "StateFailed"}] + +(************************* Properties ******************************************) + +(* The properties to check *) +\* this invariant candidate is false +NeverFinish == + state = "working" + +\* this invariant candidate is false +NeverFinishNegative == + state /= "finishedFailure" + +\* This invariant holds true, when the primary is correct. +\* This invariant candidate is false when the primary is faulty. +NeverFinishNegativeWhenTrusted == + BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + => state /= "finishedFailure" + +\* this invariant candidate is false +NeverFinishPositive == + state /= "finishedSuccess" + + +(** + Check that the target height has been reached upon successful termination. + *) +TargetHeightOnSuccessInv == + state = "finishedSuccess" => + /\ TARGET_HEIGHT \in DOMAIN fetchedLightBlocks + /\ lightBlockStatus[TARGET_HEIGHT] = "StateVerified" + +(** + Correctness states that all the obtained headers are exactly like in the blockchain. + + It is always the case that every verified header in LightStore was generated by + an instance of Tendermint consensus. + + [LCV-DIST-SAFE.1::CORRECTNESS-INV.1] + *) +CorrectnessInv == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" => + fetchedLightBlocks[h].header = blockchain[h] + +(** + No faulty block was used to construct a proof. This invariant holds, + only if FAULTY_RATIO < 1/3. + *) +NoTrustOnFaultyBlockInv == + (state = "finishedSuccess" + /\ fetchedLightBlocks[TARGET_HEIGHT].header = blockchain[TARGET_HEIGHT]) + => CorrectnessInv + +(** + Check that the sequence of the headers in storedLightBlocks satisfies ValidAndVerified = "SUCCESS" pairwise + This property is easily violated, whenever a header cannot be trusted anymore. + *) +StoredHeadersAreVerifiedInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: \* for every pair of different stored headers + \/ lh >= rh + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh + \* or we can verify the right one using the left one + \/ "SUCCESS" = API!ValidAndVerified(fetchedLightBlocks[lh], + fetchedLightBlocks[rh], FALSE) + +\* An improved version of StoredHeadersAreVerifiedInv, +\* assuming that a header may be not trusted. +\* This invariant candidate is also violated, +\* as there may be some unverified blocks left in the middle. +\* This property is violated under two conditions: +\* (1) the primary is faulty and there are at least 4 blocks, +\* (2) the primary is correct and there are at least 5 blocks. +StoredHeadersAreVerifiedOrNotTrustedInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: \* for every pair of different stored headers + \/ lh >= rh + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh + \* or we can verify the right one using the left one + \/ "SUCCESS" = API!ValidAndVerified(fetchedLightBlocks[lh], + fetchedLightBlocks[rh], FALSE) + \* or the left header is outside the trusting period, so no guarantees + \/ ~API!InTrustingPeriodLocal(fetchedLightBlocks[lh].header) + +(** + * An improved version of StoredHeadersAreSoundOrNotTrusted, + * checking the property only for the verified headers. + * This invariant holds true if CLOCK_DRIFT <= REAL_CLOCK_DRIFT. + *) +ProofOfChainOfTrustInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: + \* for every pair of stored headers that have been verified + \/ lh >= rh + \/ lightBlockStatus[lh] = "StateUnverified" + \/ lightBlockStatus[rh] = "StateUnverified" + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh /\ lightBlockStatus[mh] = "StateVerified" + \* or the left header is outside the trusting period, so no guarantees + \/ ~(API!InTrustingPeriodLocal(fetchedLightBlocks[lh].header)) + \* or we can verify the right one using the left one + \/ "SUCCESS" = API!ValidAndVerified(fetchedLightBlocks[lh], + fetchedLightBlocks[rh], FALSE) + +(** + * When the light client terminates, there are no failed blocks. (Otherwise, someone lied to us.) + *) +NoFailedBlocksOnSuccessInv == + state = "finishedSuccess" => + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] /= "StateFailed" + +\* This property states that whenever the light client finishes with a positive outcome, +\* the trusted header is still within the trusting period. +\* We expect this property to be violated. And Apalache shows us a counterexample. +PositiveBeforeTrustedHeaderExpires == + (state = "finishedSuccess") => + BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + +\* If the primary is correct and the initial trusted block has not expired, +\* then whenever the algorithm terminates, it reports "success". +\* This property fails. +CorrectPrimaryAndTimeliness == + (BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + /\ state /= "working" /\ IS_PRIMARY_CORRECT) => + state = "finishedSuccess" + +(** + If the primary is correct and there is a trusted block that has not expired, + then whenever the algorithm terminates, it reports "success". + This property only holds true, if the local clock is always growing monotonically. + If the local clock can go backwards in the envelope + [refClock - CLOCK_DRIFT, refClock + CLOCK_DRIFT], then the property fails. + + [LCV-DIST-LIVE.1::SUCCESS-CORR-PRIMARY-CHAIN-OF-TRUST.1] + *) +SuccessOnCorrectPrimaryAndChainOfTrustLocal == + (\E h \in DOMAIN fetchedLightBlocks: + /\ lightBlockStatus[h] = "StateVerified" + /\ API!InTrustingPeriodLocal(blockchain[h]) + /\ state /= "working" /\ IS_PRIMARY_CORRECT) => + state = "finishedSuccess" + +(** + Similar to SuccessOnCorrectPrimaryAndChainOfTrust, but using the blockchain clock. + It fails because the local clock of the client drifted away, so it rejects a block + that has not expired yet (according to the local clock). + *) +SuccessOnCorrectPrimaryAndChainOfTrustGlobal == + (\E h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" /\ BC!InTrustingPeriod(blockchain[h]) + /\ state /= "working" /\ IS_PRIMARY_CORRECT) => + state = "finishedSuccess" + +\* Lite Client Completeness: If header h was correctly generated by an instance +\* of Tendermint consensus (and its age is less than the trusting period), +\* then the lite client should eventually set trust(h) to true. +\* +\* Note that Completeness assumes that the lite client communicates with a correct full node. +\* +\* We decompose completeness into Termination (liveness) and Precision (safety). +\* Once again, Precision is an inverse version of the safety property in Completeness, +\* as A => B is logically equivalent to ~B => ~A. +\* +\* This property holds only when CLOCK_DRIFT = 0 and REAL_CLOCK_DRIFT = 0. +PrecisionInv == + (state = "finishedFailure") + => \/ ~BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) \* outside of the trusting period + \/ \E h \in DOMAIN fetchedLightBlocks: + LET lightBlock == fetchedLightBlocks[h] IN + \* the full node lied to the lite client about the block header + \/ lightBlock.header /= blockchain[h] + \* the full node lied to the lite client about the commits + \/ lightBlock.Commits /= lightBlock.header.VS + +\* the old invariant that was found to be buggy by TLC +PrecisionBuggyInv == + (state = "finishedFailure") + => \/ ~BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) \* outside of the trusting period + \/ \E h \in DOMAIN fetchedLightBlocks: + LET lightBlock == fetchedLightBlocks[h] IN + \* the full node lied to the lite client about the block header + lightBlock.header /= blockchain[h] + +\* the worst complexity +Complexity == + LET N == TARGET_HEIGHT - TRUSTED_HEIGHT + 1 IN + state /= "working" => + (2 * nprobes <= N * (N - 1)) + +(** + If the light client has terminated, then the expected postcondition holds true. + *) +ApiPostInv == + state /= "working" => + API!VerifyToTargetPost(blockchain, IS_PRIMARY_CORRECT, + fetchedLightBlocks, lightBlockStatus, + TRUSTED_HEIGHT, TARGET_HEIGHT, state) + +(* + We omit termination, as the algorithm deadlocks in the end. + So termination can be demonstrated by finding a deadlock. + Of course, one has to analyze the deadlocked state and see that + the algorithm has indeed terminated there. +*) +============================================================================= +\* Modification History +\* Last modified Fri Jun 26 12:08:28 CEST 2020 by igor +\* Created Wed Oct 02 16:39:42 CEST 2019 by igor diff --git a/spec/light-client/verification/Lightclient_A_1.tla b/spec/light-client/verification/Lightclient_A_1.tla new file mode 100644 index 0000000000..70e6caf002 --- /dev/null +++ b/spec/light-client/verification/Lightclient_A_1.tla @@ -0,0 +1,440 @@ +-------------------------- MODULE Lightclient_A_1 ---------------------------- +(** + * A state-machine specification of the lite client, following the English spec: + * + * ./verification_001_published.md + *) + +EXTENDS Integers, FiniteSets + +\* the parameters of Light Client +CONSTANTS + TRUSTED_HEIGHT, + (* an index of the block header that the light client trusts by social consensus *) + TARGET_HEIGHT, + (* an index of the block header that the light client tries to verify *) + TRUSTING_PERIOD, + (* the period within which the validators are trusted *) + IS_PRIMARY_CORRECT + (* is primary correct? *) + +VARIABLES (* see TypeOK below for the variable types *) + state, (* the current state of the light client *) + nextHeight, (* the next height to explore by the light client *) + nprobes (* the lite client iteration, or the number of block tests *) + +(* the light store *) +VARIABLES + fetchedLightBlocks, (* a function from heights to LightBlocks *) + lightBlockStatus, (* a function from heights to block statuses *) + latestVerified (* the latest verified block *) + +(* the variables of the lite client *) +lcvars == <> + +(******************* Blockchain instance ***********************************) + +\* the parameters that are propagated into Blockchain +CONSTANTS + AllNodes + (* a set of all nodes that can act as validators (correct and faulty) *) + +\* the state variables of Blockchain, see Blockchain.tla for the details +VARIABLES now, blockchain, Faulty + +\* All the variables of Blockchain. For some reason, BC!vars does not work +bcvars == <> + +(* Create an instance of Blockchain. + We could write EXTENDS Blockchain, but then all the constants and state variables + would be hidden inside the Blockchain module. + *) +ULTIMATE_HEIGHT == TARGET_HEIGHT + 1 + +BC == INSTANCE Blockchain_A_1 WITH + now <- now, blockchain <- blockchain, Faulty <- Faulty + +(************************** Lite client ************************************) + +(* the heights on which the light client is working *) +HEIGHTS == TRUSTED_HEIGHT..TARGET_HEIGHT + +(* the control states of the lite client *) +States == { "working", "finishedSuccess", "finishedFailure" } + +(** + Check the precondition of ValidAndVerified. + + [LCV-FUNC-VALID.1::TLA-PRE.1] + *) +ValidAndVerifiedPre(trusted, untrusted) == + LET thdr == trusted.header + uhdr == untrusted.header + IN + /\ BC!InTrustingPeriod(thdr) + /\ thdr.height < uhdr.height + \* the trusted block has been created earlier (no drift here) + /\ thdr.time <= uhdr.time + /\ untrusted.Commits \subseteq uhdr.VS + /\ LET TP == Cardinality(uhdr.VS) + SP == Cardinality(untrusted.Commits) + IN + 3 * SP > 2 * TP + /\ thdr.height + 1 = uhdr.height => thdr.NextVS = uhdr.VS + (* As we do not have explicit hashes we ignore these three checks of the English spec: + + 1. "trusted.Commit is a commit is for the header trusted.Header, + i.e. it contains the correct hash of the header". + 2. untrusted.Validators = hash(untrusted.Header.Validators) + 3. untrusted.NextValidators = hash(untrusted.Header.NextValidators) + *) + +(** + * Check that the commits in an untrusted block form 1/3 of the next validators + * in a trusted header. + *) +SignedByOneThirdOfTrusted(trusted, untrusted) == + LET TP == Cardinality(trusted.header.NextVS) + SP == Cardinality(untrusted.Commits \intersect trusted.header.NextVS) + IN + 3 * SP > TP + +(** + Check, whether an untrusted block is valid and verifiable w.r.t. a trusted header. + + [LCV-FUNC-VALID.1::TLA.1] + *) +ValidAndVerified(trusted, untrusted) == + IF ~ValidAndVerifiedPre(trusted, untrusted) + THEN "FAILED_VERIFICATION" + ELSE IF ~BC!InTrustingPeriod(untrusted.header) + (* We leave the following test for the documentation purposes. + The implementation should do this test, as signature verification may be slow. + In the TLA+ specification, ValidAndVerified happens in no time. + *) + THEN "FAILED_TRUSTING_PERIOD" + ELSE IF untrusted.header.height = trusted.header.height + 1 + \/ SignedByOneThirdOfTrusted(trusted, untrusted) + THEN "OK" + ELSE "CANNOT_VERIFY" + +(* + Initial states of the light client. + Initially, only the trusted light block is present. + *) +LCInit == + /\ state = "working" + /\ nextHeight = TARGET_HEIGHT + /\ nprobes = 0 \* no tests have been done so far + /\ LET trustedBlock == blockchain[TRUSTED_HEIGHT] + trustedLightBlock == [header |-> trustedBlock, Commits |-> AllNodes] + IN + \* initially, fetchedLightBlocks is a function of one element, i.e., TRUSTED_HEIGHT + /\ fetchedLightBlocks = [h \in {TRUSTED_HEIGHT} |-> trustedLightBlock] + \* initially, lightBlockStatus is a function of one element, i.e., TRUSTED_HEIGHT + /\ lightBlockStatus = [h \in {TRUSTED_HEIGHT} |-> "StateVerified"] + \* the latest verified block the the trusted block + /\ latestVerified = trustedLightBlock + +\* block should contain a copy of the block from the reference chain, with a matching commit +CopyLightBlockFromChain(block, height) == + LET ref == blockchain[height] + lastCommit == + IF height < ULTIMATE_HEIGHT + THEN blockchain[height + 1].lastCommit + \* for the ultimate block, which we never use, as ULTIMATE_HEIGHT = TARGET_HEIGHT + 1 + ELSE blockchain[height].VS + IN + block = [header |-> ref, Commits |-> lastCommit] + +\* Either the primary is correct and the block comes from the reference chain, +\* or the block is produced by a faulty primary. +\* +\* [LCV-FUNC-FETCH.1::TLA.1] +FetchLightBlockInto(block, height) == + IF IS_PRIMARY_CORRECT + THEN CopyLightBlockFromChain(block, height) + ELSE BC!IsLightBlockAllowedByDigitalSignatures(height, block) + +\* add a block into the light store +\* +\* [LCV-FUNC-UPDATE.1::TLA.1] +LightStoreUpdateBlocks(lightBlocks, block) == + LET ht == block.header.height IN + [h \in DOMAIN lightBlocks \union {ht} |-> + IF h = ht THEN block ELSE lightBlocks[h]] + +\* update the state of a light block +\* +\* [LCV-FUNC-UPDATE.1::TLA.1] +LightStoreUpdateStates(statuses, ht, blockState) == + [h \in DOMAIN statuses \union {ht} |-> + IF h = ht THEN blockState ELSE statuses[h]] + +\* Check, whether newHeight is a possible next height for the light client. +\* +\* [LCV-FUNC-SCHEDULE.1::TLA.1] +CanScheduleTo(newHeight, pLatestVerified, pNextHeight, pTargetHeight) == + LET ht == pLatestVerified.header.height IN + \/ /\ ht = pNextHeight + /\ ht < pTargetHeight + /\ pNextHeight < newHeight + /\ newHeight <= pTargetHeight + \/ /\ ht < pNextHeight + /\ ht < pTargetHeight + /\ ht < newHeight + /\ newHeight < pNextHeight + \/ /\ ht = pTargetHeight + /\ newHeight = pTargetHeight + +\* The loop of VerifyToTarget. +\* +\* [LCV-FUNC-MAIN.1::TLA-LOOP.1] +VerifyToTargetLoop == + \* the loop condition is true + /\ latestVerified.header.height < TARGET_HEIGHT + \* pick a light block, which will be constrained later + /\ \E current \in BC!LightBlocks: + \* Get next LightBlock for verification + /\ IF nextHeight \in DOMAIN fetchedLightBlocks + THEN \* copy the block from the light store + /\ current = fetchedLightBlocks[nextHeight] + /\ UNCHANGED fetchedLightBlocks + ELSE \* retrieve a light block and save it in the light store + /\ FetchLightBlockInto(current, nextHeight) + /\ fetchedLightBlocks' = LightStoreUpdateBlocks(fetchedLightBlocks, current) + \* Record that one more probe has been done (for complexity and model checking) + /\ nprobes' = nprobes + 1 + \* Verify the current block + /\ LET verdict == ValidAndVerified(latestVerified, current) IN + \* Decide whether/how to continue + CASE verdict = "OK" -> + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateVerified") + /\ latestVerified' = current + /\ state' = + IF latestVerified'.header.height < TARGET_HEIGHT + THEN "working" + ELSE "finishedSuccess" + /\ \E newHeight \in HEIGHTS: + /\ CanScheduleTo(newHeight, current, nextHeight, TARGET_HEIGHT) + /\ nextHeight' = newHeight + + [] verdict = "CANNOT_VERIFY" -> + (* + do nothing: the light block current passed validation, but the validator + set is too different to verify it. We keep the state of + current at StateUnverified. For a later iteration, Schedule + might decide to try verification of that light block again. + *) + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateUnverified") + /\ \E newHeight \in HEIGHTS: + /\ CanScheduleTo(newHeight, latestVerified, nextHeight, TARGET_HEIGHT) + /\ nextHeight' = newHeight + /\ UNCHANGED <> + + [] OTHER -> + \* verdict is some error code + /\ lightBlockStatus' = LightStoreUpdateStates(lightBlockStatus, nextHeight, "StateFailed") + /\ state' = "finishedFailure" + /\ UNCHANGED <> + +\* The terminating condition of VerifyToTarget. +\* +\* [LCV-FUNC-MAIN.1::TLA-LOOPCOND.1] +VerifyToTargetDone == + /\ latestVerified.header.height >= TARGET_HEIGHT + /\ state' = "finishedSuccess" + /\ UNCHANGED <> + +(********************* Lite client + Blockchain *******************) +Init == + \* the blockchain is initialized immediately to the ULTIMATE_HEIGHT + /\ BC!InitToHeight + \* the light client starts + /\ LCInit + +(* + The system step is very simple. + The light client is either executing VerifyToTarget, or it has terminated. + (In the latter case, a model checker reports a deadlock.) + Simultaneously, the global clock may advance. + *) +Next == + /\ state = "working" + /\ VerifyToTargetLoop \/ VerifyToTargetDone + /\ BC!AdvanceTime \* the global clock is advanced by zero or more time units + +(************************* Types ******************************************) +TypeOK == + /\ state \in States + /\ nextHeight \in HEIGHTS + /\ latestVerified \in BC!LightBlocks + /\ \E HS \in SUBSET HEIGHTS: + /\ fetchedLightBlocks \in [HS -> BC!LightBlocks] + /\ lightBlockStatus + \in [HS -> {"StateVerified", "StateUnverified", "StateFailed"}] + +(************************* Properties ******************************************) + +(* The properties to check *) +\* this invariant candidate is false +NeverFinish == + state = "working" + +\* this invariant candidate is false +NeverFinishNegative == + state /= "finishedFailure" + +\* This invariant holds true, when the primary is correct. +\* This invariant candidate is false when the primary is faulty. +NeverFinishNegativeWhenTrusted == + (*(minTrustedHeight <= TRUSTED_HEIGHT)*) + BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + => state /= "finishedFailure" + +\* this invariant candidate is false +NeverFinishPositive == + state /= "finishedSuccess" + +(** + Correctness states that all the obtained headers are exactly like in the blockchain. + + It is always the case that every verified header in LightStore was generated by + an instance of Tendermint consensus. + + [LCV-DIST-SAFE.1::CORRECTNESS-INV.1] + *) +CorrectnessInv == + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" => + fetchedLightBlocks[h].header = blockchain[h] + +(** + Check that the sequence of the headers in storedLightBlocks satisfies ValidAndVerified = "OK" pairwise + This property is easily violated, whenever a header cannot be trusted anymore. + *) +StoredHeadersAreVerifiedInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: \* for every pair of different stored headers + \/ lh >= rh + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh + \* or we can verify the right one using the left one + \/ "OK" = ValidAndVerified(fetchedLightBlocks[lh], fetchedLightBlocks[rh]) + +\* An improved version of StoredHeadersAreSound, assuming that a header may be not trusted. +\* This invariant candidate is also violated, +\* as there may be some unverified blocks left in the middle. +StoredHeadersAreVerifiedOrNotTrustedInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: \* for every pair of different stored headers + \/ lh >= rh + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh + \* or we can verify the right one using the left one + \/ "OK" = ValidAndVerified(fetchedLightBlocks[lh], fetchedLightBlocks[rh]) + \* or the left header is outside the trusting period, so no guarantees + \/ ~BC!InTrustingPeriod(fetchedLightBlocks[lh].header) + +(** + * An improved version of StoredHeadersAreSoundOrNotTrusted, + * checking the property only for the verified headers. + * This invariant holds true. + *) +ProofOfChainOfTrustInv == + state = "finishedSuccess" + => + \A lh, rh \in DOMAIN fetchedLightBlocks: + \* for every pair of stored headers that have been verified + \/ lh >= rh + \/ lightBlockStatus[lh] = "StateUnverified" + \/ lightBlockStatus[rh] = "StateUnverified" + \* either there is a header between them + \/ \E mh \in DOMAIN fetchedLightBlocks: + lh < mh /\ mh < rh /\ lightBlockStatus[mh] = "StateVerified" + \* or the left header is outside the trusting period, so no guarantees + \/ ~(BC!InTrustingPeriod(fetchedLightBlocks[lh].header)) + \* or we can verify the right one using the left one + \/ "OK" = ValidAndVerified(fetchedLightBlocks[lh], fetchedLightBlocks[rh]) + +(** + * When the light client terminates, there are no failed blocks. (Otherwise, someone lied to us.) + *) +NoFailedBlocksOnSuccessInv == + state = "finishedSuccess" => + \A h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] /= "StateFailed" + +\* This property states that whenever the light client finishes with a positive outcome, +\* the trusted header is still within the trusting period. +\* We expect this property to be violated. And Apalache shows us a counterexample. +PositiveBeforeTrustedHeaderExpires == + (state = "finishedSuccess") => BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + +\* If the primary is correct and the initial trusted block has not expired, +\* then whenever the algorithm terminates, it reports "success" +CorrectPrimaryAndTimeliness == + (BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) + /\ state /= "working" /\ IS_PRIMARY_CORRECT) => + state = "finishedSuccess" + +(** + If the primary is correct and there is a trusted block that has not expired, + then whenever the algorithm terminates, it reports "success". + + [LCV-DIST-LIVE.1::SUCCESS-CORR-PRIMARY-CHAIN-OF-TRUST.1] + *) +SuccessOnCorrectPrimaryAndChainOfTrust == + (\E h \in DOMAIN fetchedLightBlocks: + lightBlockStatus[h] = "StateVerified" /\ BC!InTrustingPeriod(blockchain[h]) + /\ state /= "working" /\ IS_PRIMARY_CORRECT) => + state = "finishedSuccess" + +\* Lite Client Completeness: If header h was correctly generated by an instance +\* of Tendermint consensus (and its age is less than the trusting period), +\* then the lite client should eventually set trust(h) to true. +\* +\* Note that Completeness assumes that the lite client communicates with a correct full node. +\* +\* We decompose completeness into Termination (liveness) and Precision (safety). +\* Once again, Precision is an inverse version of the safety property in Completeness, +\* as A => B is logically equivalent to ~B => ~A. +PrecisionInv == + (state = "finishedFailure") + => \/ ~BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) \* outside of the trusting period + \/ \E h \in DOMAIN fetchedLightBlocks: + LET lightBlock == fetchedLightBlocks[h] IN + \* the full node lied to the lite client about the block header + \/ lightBlock.header /= blockchain[h] + \* the full node lied to the lite client about the commits + \/ lightBlock.Commits /= lightBlock.header.VS + +\* the old invariant that was found to be buggy by TLC +PrecisionBuggyInv == + (state = "finishedFailure") + => \/ ~BC!InTrustingPeriod(blockchain[TRUSTED_HEIGHT]) \* outside of the trusting period + \/ \E h \in DOMAIN fetchedLightBlocks: + LET lightBlock == fetchedLightBlocks[h] IN + \* the full node lied to the lite client about the block header + lightBlock.header /= blockchain[h] + +\* the worst complexity +Complexity == + LET N == TARGET_HEIGHT - TRUSTED_HEIGHT + 1 IN + state /= "working" => + (2 * nprobes <= N * (N - 1)) + +(* + We omit termination, as the algorithm deadlocks in the end. + So termination can be demonstrated by finding a deadlock. + Of course, one has to analyze the deadlocked state and see that + the algorithm has indeed terminated there. +*) +============================================================================= +\* Modification History +\* Last modified Fri Jun 26 12:08:28 CEST 2020 by igor +\* Created Wed Oct 02 16:39:42 CEST 2019 by igor diff --git a/spec/light-client/verification/MC4_3_correct.tla b/spec/light-client/verification/MC4_3_correct.tla new file mode 100644 index 0000000000..a27d8de05d --- /dev/null +++ b/spec/light-client/verification/MC4_3_correct.tla @@ -0,0 +1,26 @@ +---------------------------- MODULE MC4_3_correct --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 3 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == TRUE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================== diff --git a/spec/light-client/verification/MC4_3_faulty.tla b/spec/light-client/verification/MC4_3_faulty.tla new file mode 100644 index 0000000000..74b278900b --- /dev/null +++ b/spec/light-client/verification/MC4_3_faulty.tla @@ -0,0 +1,26 @@ +---------------------------- MODULE MC4_3_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 3 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================== diff --git a/spec/light-client/verification/MC4_4_correct.tla b/spec/light-client/verification/MC4_4_correct.tla new file mode 100644 index 0000000000..0a8d96b59c --- /dev/null +++ b/spec/light-client/verification/MC4_4_correct.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC4_4_correct --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 4 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == TRUE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC4_4_correct_drifted.tla b/spec/light-client/verification/MC4_4_correct_drifted.tla new file mode 100644 index 0000000000..7fefe349ea --- /dev/null +++ b/spec/light-client/verification/MC4_4_correct_drifted.tla @@ -0,0 +1,26 @@ +---------------------- MODULE MC4_4_correct_drifted --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 4 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 30 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == TRUE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================== diff --git a/spec/light-client/verification/MC4_4_faulty.tla b/spec/light-client/verification/MC4_4_faulty.tla new file mode 100644 index 0000000000..167fa61fb1 --- /dev/null +++ b/spec/light-client/verification/MC4_4_faulty.tla @@ -0,0 +1,26 @@ +---------------------------- MODULE MC4_4_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 4 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================== diff --git a/spec/light-client/verification/MC4_4_faulty_drifted.tla b/spec/light-client/verification/MC4_4_faulty_drifted.tla new file mode 100644 index 0000000000..e37c3cb404 --- /dev/null +++ b/spec/light-client/verification/MC4_4_faulty_drifted.tla @@ -0,0 +1,26 @@ +---------------------- MODULE MC4_4_faulty_drifted --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 4 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 30 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================== diff --git a/spec/light-client/verification/MC4_5_correct.tla b/spec/light-client/verification/MC4_5_correct.tla new file mode 100644 index 0000000000..cffb22cc8f --- /dev/null +++ b/spec/light-client/verification/MC4_5_correct.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC4_5_correct --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 5 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == TRUE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC4_5_faulty.tla b/spec/light-client/verification/MC4_5_faulty.tla new file mode 100644 index 0000000000..3d3a002907 --- /dev/null +++ b/spec/light-client/verification/MC4_5_faulty.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC4_5_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 5 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +IS_PRICLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +MARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC4_6_faulty.tla b/spec/light-client/verification/MC4_6_faulty.tla new file mode 100644 index 0000000000..64f164854b --- /dev/null +++ b/spec/light-client/verification/MC4_6_faulty.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC4_6_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 6 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +IS_PRCLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC4_7_faulty.tla b/spec/light-client/verification/MC4_7_faulty.tla new file mode 100644 index 0000000000..dc6a94eb1d --- /dev/null +++ b/spec/light-client/verification/MC4_7_faulty.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC4_7_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 7 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC5_5_correct.tla b/spec/light-client/verification/MC5_5_correct.tla new file mode 100644 index 0000000000..00b4151f7c --- /dev/null +++ b/spec/light-client/verification/MC5_5_correct.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC5_5_correct --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 5 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == TRUE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC5_5_correct_peer_two_thirds_faulty.tla b/spec/light-client/verification/MC5_5_correct_peer_two_thirds_faulty.tla new file mode 100644 index 0000000000..d4212032fc --- /dev/null +++ b/spec/light-client/verification/MC5_5_correct_peer_two_thirds_faulty.tla @@ -0,0 +1,26 @@ +------------------- MODULE MC5_5_correct_peer_two_thirds_faulty ---------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 5 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == TRUE +FAULTY_RATIO == <<2, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC5_5_faulty.tla b/spec/light-client/verification/MC5_5_faulty.tla new file mode 100644 index 0000000000..f63d175a17 --- /dev/null +++ b/spec/light-client/verification/MC5_5_faulty.tla @@ -0,0 +1,26 @@ +----------------- MODULE MC5_5_faulty --------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 5 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<2, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC5_5_faulty_peer_two_thirds_faulty.tla b/spec/light-client/verification/MC5_5_faulty_peer_two_thirds_faulty.tla new file mode 100644 index 0000000000..ef9974d062 --- /dev/null +++ b/spec/light-client/verification/MC5_5_faulty_peer_two_thirds_faulty.tla @@ -0,0 +1,26 @@ +----------------- MODULE MC5_5_faulty_peer_two_thirds_faulty --------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 5 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<2, 3>> \* < 2 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC5_7_faulty.tla b/spec/light-client/verification/MC5_7_faulty.tla new file mode 100644 index 0000000000..63461b0c89 --- /dev/null +++ b/spec/light-client/verification/MC5_7_faulty.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC5_7_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 7 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC7_5_faulty.tla b/spec/light-client/verification/MC7_5_faulty.tla new file mode 100644 index 0000000000..860f9c0aa8 --- /dev/null +++ b/spec/light-client/verification/MC7_5_faulty.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC7_5_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5", "n6", "n7"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 5 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/MC7_7_faulty.tla b/spec/light-client/verification/MC7_7_faulty.tla new file mode 100644 index 0000000000..79e328f141 --- /dev/null +++ b/spec/light-client/verification/MC7_7_faulty.tla @@ -0,0 +1,26 @@ +------------------------- MODULE MC7_7_faulty --------------------------- + +AllNodes == {"n1", "n2", "n3", "n4", "n5", "n6", "n7"} +TRUSTED_HEIGHT == 1 +TARGET_HEIGHT == 7 +TRUSTING_PERIOD == 1400 \* two weeks, one day is 100 time units :-) +CLOCK_DRIFT == 10 \* how much we assume the local clock is drifting +REAL_CLOCK_DRIFT == 3 \* how much the local clock is actually drifting +IS_PRIMARY_CORRECT == FALSE +FAULTY_RATIO == <<1, 3>> \* < 1 / 3 faulty validators + +VARIABLES + state, nextHeight, fetchedLightBlocks, lightBlockStatus, latestVerified, + nprobes, + localClock, + refClock, blockchain, Faulty + +(* the light client previous state components, used for monitoring *) +VARIABLES + prevVerified, + prevCurrent, + prevLocalClock, + prevVerdict + +INSTANCE Lightclient_003_draft +============================================================================ diff --git a/spec/light-client/verification/README.md b/spec/light-client/verification/README.md new file mode 100644 index 0000000000..8787d0725a --- /dev/null +++ b/spec/light-client/verification/README.md @@ -0,0 +1,577 @@ +--- +order: 1 +parent: + title: Verification + order: 2 +--- +# Core Verification + +## Problem statement + +We assume that the light client knows a (base) header `inithead` it trusts (by social consensus or because +the light client has decided to trust the header before). The goal is to check whether another header +`newhead` can be trusted based on the data in `inithead`. + +The correctness of the protocol is based on the assumption that `inithead` was generated by an instance of +Tendermint consensus. + +### Failure Model + +For the purpose of the following definitions we assume that there exists a function +`validators` that returns the corresponding validator set for the given hash. + +The light client protocol is defined with respect to the following failure model: + +Given a known bound `TRUSTED_PERIOD`, and a block `b` with header `h` generated at time `Time` +(i.e. `h.Time = Time`), a set of validators that hold more than 2/3 of the voting power +in `validators(b.Header.NextValidatorsHash)` is correct until time `b.Header.Time + TRUSTED_PERIOD`. + +*Assumption*: "correct" is defined w.r.t. realtime (some Newtonian global notion of time, i.e., wall time), +while `Header.Time` corresponds to the [BFT time](../../consensus/bft-time.md). In this note, we assume that clocks of correct processes +are synchronized (for example using NTP), and therefore there is bounded clock drift (`CLOCK_DRIFT`) between local clocks and +BFT time. More precisely, for every correct light client process and every `header.Time` (i.e. BFT Time, for a header correctly +generated by the Tendermint consensus), the following inequality holds: `Header.Time < now + CLOCK_DRIFT`, +where `now` corresponds to the system clock at the light client process. + +Furthermore, we assume that `TRUSTED_PERIOD` is (several) order of magnitude bigger than `CLOCK_DRIFT` (`TRUSTED_PERIOD >> CLOCK_DRIFT`), +as `CLOCK_DRIFT` (using NTP) is in the order of milliseconds and `TRUSTED_PERIOD` is in the order of weeks. + +We expect a light client process defined in this document to be used in the context in which there is some +larger period during which misbehaving validators can be detected and punished (we normally refer to it as `UNBONDING_PERIOD` +due to the "bonding" mechanism in modern proof of stake systems). Furthermore, we assume that +`TRUSTED_PERIOD < UNBONDING_PERIOD` and that they are normally of the same order of magnitude, for example +`TRUSTED_PERIOD = UNBONDING_PERIOD / 2`. + +The specification in this document considers an implementation of the light client under the Failure Model defined above. +Mechanisms like `fork accountability` and `evidence submission` are defined in the context of `UNBONDING_PERIOD` and +they incentivize validators to follow the protocol specification defined in this document. If they don't, +and we have 1/3 (or more) faulty validators, safety may be violated. Our approach then is +to *detect* these cases (after the fact), and take suitable repair actions (automatic and social). +This is discussed in document on [Fork accountability](../accountability/README.md). + +The term "trusted" above indicates that the correctness of the protocol depends on +this assumption. It is in the responsibility of the user that runs the light client to make sure that the risk +of trusting a corrupted/forged `inithead` is negligible. + +*Remark*: This failure model might change to a hybrid version that takes heights into account in the future. + +### High Level Solution + +Upon initialization, the light client is given a header `inithead` it trusts (by +social consensus). When a light clients sees a new signed header `snh`, it has to decide whether to trust the new +header. Trust can be obtained by (possibly) the combination of three methods. + +1. **Uninterrupted sequence of headers.** Given a trusted header `h` and an untrusted header `h1`, +the light client trusts a header `h1` if it trusts all headers in between `h` and `h1`. + +2. **Trusted period.** Given a trusted header `h`, an untrusted header `h1 > h` and `TRUSTED_PERIOD` during which +the failure model holds, we can check whether at least one validator, that has been continuously correct +from `h.Time` until now, has signed `h1`. If this is the case, we can trust `h1`. + +3. **Bisection.** If a check according to 2. (trusted period) fails, the light client can try to +obtain a header `hp` whose height lies between `h` and `h1` in order to check whether `h` can be used to +get trust for `hp`, and `hp` can be used to get trust for `snh`. If this is the case we can trust `h1`; +if not, we continue recursively until either we found set of headers that can build (transitively) trust relation +between `h` and `h1`, or we failed as two consecutive headers don't verify against each other. + +## Definitions + +### Data structures + +In the following, only the details of the data structures needed for this specification are given. + + ```go + type Header struct { + Height int64 + Time Time // the chain time when the header (block) was generated + + LastBlockID BlockID // prev block info + ValidatorsHash []byte // hash of the validators for the current block + NextValidatorsHash []byte // hash of the validators for the next block + } + + type SignedHeader struct { + Header Header + Commit Commit // commit for the given header + } + + type ValidatorSet struct { + Validators []Validator + TotalVotingPower int64 + } + + type Validator struct { + Address Address // validator address (we assume validator's addresses are unique) + VotingPower int64 // validator's voting power + } + + type TrustedState { + SignedHeader SignedHeader + ValidatorSet ValidatorSet + } + ``` + +### Functions + +For the purpose of this light client specification, we assume that the Tendermint Full Node +exposes the following functions over Tendermint RPC: + +```go + // returns signed header: Header with Commit, for the given height + func Commit(height int64) (SignedHeader, error) + + // returns validator set for the given height + func Validators(height int64) (ValidatorSet, error) +``` + +Furthermore, we assume the following auxiliary functions: + +```go + // returns true if the commit is for the header, ie. if it contains + // the correct hash of the header; otherwise false + func matchingCommit(header Header, commit Commit) bool + + // returns the set of validators from the given validator set that + // committed the block (that correctly signed the block) + // it assumes signature verification so it can be computationally expensive + func signers(commit Commit, validatorSet ValidatorSet) []Validator + + // returns the voting power the validators in v1 have according to their voting power in set v2 + // it does not assume signature verification + func votingPowerIn(v1 []Validator, v2 ValidatorSet) int64 + + // returns hash of the given validator set + func hash(v2 ValidatorSet) []byte +``` + +In the functions below we will be using `trustThreshold` as a parameter. For simplicity +we assume that `trustThreshold` is a float between `1/3` and `2/3` and we will not be checking it +in the pseudo-code. + +**VerifySingle.** The function `VerifySingle` attempts to validate given untrusted header and the corresponding validator sets +based on a given trusted state. It ensures that the trusted state is still within its trusted period, +and that the untrusted header is within assumed `clockDrift` bound of the passed time `now`. +Note that this function is not making external (RPC) calls to the full node; the whole logic is +based on the local (given) state. This function is supposed to be used by the IBC handlers. + +```go +func VerifySingle(untrustedSh SignedHeader, + untrustedVs ValidatorSet, + untrustedNextVs ValidatorSet, + trustedState TrustedState, + trustThreshold float, + trustingPeriod Duration, + clockDrift Duration, + now Time) (TrustedState, error) { + + if untrustedSh.Header.Time > now + clockDrift { + return (trustedState, ErrInvalidHeaderTime) + } + + trustedHeader = trustedState.SignedHeader.Header + if !isWithinTrustedPeriod(trustedHeader, trustingPeriod, now) { + return (state, ErrHeaderNotWithinTrustedPeriod) + } + + // we assume that time it takes to execute verifySingle function + // is several order of magnitudes smaller than trustingPeriod + error = verifySingle( + trustedState, + untrustedSh, + untrustedVs, + untrustedNextVs, + trustThreshold) + + if error != nil return (state, error) + + // the untrusted header is now trusted + newTrustedState = TrustedState(untrustedSh, untrustedNextVs) + return (newTrustedState, nil) +} + +// return true if header is within its light client trusted period; otherwise returns false +func isWithinTrustedPeriod(header Header, + trustingPeriod Duration, + now Time) bool { + + return header.Time + trustedPeriod > now +} +``` + +Note that in case `VerifySingle` returns without an error (untrusted header +is successfully verified) then we have a guarantee that the transition of the trust +from `trustedState` to `newTrustedState` happened during the trusted period of +`trustedState.SignedHeader.Header`. + +TODO: Explain what happens in case `VerifySingle` returns with an error. + +**verifySingle.** The function `verifySingle` verifies a single untrusted header +against a given trusted state. It includes all validations and signature verification. +It is not publicly exposed since it does not check for header expiry (time constraints) +and hence it's possible to use it incorrectly. + +```go +func verifySingle(trustedState TrustedState, + untrustedSh SignedHeader, + untrustedVs ValidatorSet, + untrustedNextVs ValidatorSet, + trustThreshold float) error { + + untrustedHeader = untrustedSh.Header + untrustedCommit = untrustedSh.Commit + + trustedHeader = trustedState.SignedHeader.Header + trustedVs = trustedState.ValidatorSet + + if trustedHeader.Height >= untrustedHeader.Height return ErrNonIncreasingHeight + if trustedHeader.Time >= untrustedHeader.Time return ErrNonIncreasingTime + + // validate the untrusted header against its commit, vals, and next_vals + error = validateSignedHeaderAndVals(untrustedSh, untrustedVs, untrustedNextVs) + if error != nil return error + + // check for adjacent headers + if untrustedHeader.Height == trustedHeader.Height + 1 { + if trustedHeader.NextValidatorsHash != untrustedHeader.ValidatorsHash { + return ErrInvalidAdjacentHeaders + } + } else { + error = verifyCommitTrusting(trustedVs, untrustedCommit, untrustedVs, trustThreshold) + if error != nil return error + } + + // verify the untrusted commit + return verifyCommitFull(untrustedVs, untrustedCommit) +} + +// returns nil if header and validator sets are consistent; otherwise returns error +func validateSignedHeaderAndVals(signedHeader SignedHeader, vs ValidatorSet, nextVs ValidatorSet) error { + header = signedHeader.Header + if hash(vs) != header.ValidatorsHash return ErrInvalidValidatorSet + if hash(nextVs) != header.NextValidatorsHash return ErrInvalidNextValidatorSet + if !matchingCommit(header, signedHeader.Commit) return ErrInvalidCommitValue + return nil +} + +// returns nil if at least single correst signer signed the commit; otherwise returns error +func verifyCommitTrusting(trustedVs ValidatorSet, + commit Commit, + untrustedVs ValidatorSet, + trustLevel float) error { + + totalPower := trustedVs.TotalVotingPower + signedPower := votingPowerIn(signers(commit, untrustedVs), trustedVs) + + // check that the signers account for more than max(1/3, trustLevel) of the voting power + // this ensures that there is at least single correct validator in the set of signers + if signedPower < max(1/3, trustLevel) * totalPower return ErrInsufficientVotingPower + return nil +} + +// returns nil if commit is signed by more than 2/3 of voting power of the given validator set +// return error otherwise +func verifyCommitFull(vs ValidatorSet, commit Commit) error { + totalPower := vs.TotalVotingPower; + signedPower := votingPowerIn(signers(commit, vs), vs) + + // check the signers account for +2/3 of the voting power + if signedPower * 3 <= totalPower * 2 return ErrInvalidCommit + return nil +} +``` + +**VerifyHeaderAtHeight.** The function `VerifyHeaderAtHeight` captures high level +logic, i.e., application call to the light client module to download and verify header +for some height. + +```go +func VerifyHeaderAtHeight(untrustedHeight int64, + trustedState TrustedState, + trustThreshold float, + trustingPeriod Duration, + clockDrift Duration) (TrustedState, error)) { + + trustedHeader := trustedState.SignedHeader.Header + + now := System.Time() + if !isWithinTrustedPeriod(trustedHeader, trustingPeriod, now) { + return (trustedState, ErrHeaderNotWithinTrustedPeriod) + } + + newTrustedState, err := VerifyBisection(untrustedHeight, + trustedState, + trustThreshold, + trustingPeriod, + clockDrift, + now) + + if err != nil return (trustedState, err) + + now = System.Time() + if !isWithinTrustedPeriod(trustedHeader, trustingPeriod, now) { + return (trustedState, ErrHeaderNotWithinTrustedPeriod) + } + + return (newTrustedState, err) +} +``` + +Note that in case `VerifyHeaderAtHeight` returns without an error (untrusted header +is successfully verified) then we have a guarantee that the transition of the trust +from `trustedState` to `newTrustedState` happened during the trusted period of +`trustedState.SignedHeader.Header`. + +In case `VerifyHeaderAtHeight` returns with an error, then either (i) the full node we are talking to is faulty +or (ii) the trusted header has expired (it is outside its trusted period). In case (i) the full node is faulty so +light client should disconnect and reinitialise with new peer. In the case (ii) as the trusted header has expired, +we need to reinitialise light client with a new trusted header (that is within its trusted period), +but we don't necessarily need to disconnect from the full node we are talking to (as we haven't observed full node misbehavior in this case). + +**VerifyBisection.** The function `VerifyBisection` implements +recursive logic for checking if it is possible building trust +relationship between `trustedState` and untrusted header at the given height over +finite set of (downloaded and verified) headers. + +```go +func VerifyBisection(untrustedHeight int64, + trustedState TrustedState, + trustThreshold float, + trustingPeriod Duration, + clockDrift Duration, + now Time) (TrustedState, error) { + + untrustedSh, error := Commit(untrustedHeight) + if error != nil return (trustedState, ErrRequestFailed) + + untrustedHeader = untrustedSh.Header + + // note that we pass now during the recursive calls. This is fine as + // all other untrusted headers we download during recursion will be + // for a smaller heights, and therefore should happen before. + if untrustedHeader.Time > now + clockDrift { + return (trustedState, ErrInvalidHeaderTime) + } + + untrustedVs, error := Validators(untrustedHeight) + if error != nil return (trustedState, ErrRequestFailed) + + untrustedNextVs, error := Validators(untrustedHeight + 1) + if error != nil return (trustedState, ErrRequestFailed) + + error = verifySingle( + trustedState, + untrustedSh, + untrustedVs, + untrustedNextVs, + trustThreshold) + + if fatalError(error) return (trustedState, error) + + if error == nil { + // the untrusted header is now trusted. + newTrustedState = TrustedState(untrustedSh, untrustedNextVs) + return (newTrustedState, nil) + } + + // at this point in time we need to do bisection + pivotHeight := ceil((trustedHeader.Height + untrustedHeight) / 2) + + error, newTrustedState = VerifyBisection(pivotHeight, + trustedState, + trustThreshold, + trustingPeriod, + clockDrift, + now) + if error != nil return (newTrustedState, error) + + return VerifyBisection(untrustedHeight, + newTrustedState, + trustThreshold, + trustingPeriod, + clockDrift, + now) +} + +func fatalError(err) bool { + return err == ErrHeaderNotWithinTrustedPeriod OR + err == ErrInvalidAdjacentHeaders OR + err == ErrNonIncreasingHeight OR + err == ErrNonIncreasingTime OR + err == ErrInvalidValidatorSet OR + err == ErrInvalidNextValidatorSet OR + err == ErrInvalidCommitValue OR + err == ErrInvalidCommit +} +``` + +### The case `untrustedHeader.Height < trustedHeader.Height` + +In the use case where someone tells the light client that application data that is relevant for it +can be read in the block of height `k` and the light client trusts a more recent header, we can use the +hashes to verify headers "down the chain." That is, we iterate down the heights and check the hashes in each step. + +*Remark.* For the case were the light client trusts two headers `i` and `j` with `i < k < j`, we should +discuss/experiment whether the forward or the backward method is more effective. + +```go +func VerifyHeaderBackwards(trustedHeader Header, + untrustedHeader Header, + trustingPeriod Duration, + clockDrift Duration) error { + + if untrustedHeader.Height >= trustedHeader.Height return ErrErrNonDecreasingHeight + if untrustedHeader.Time >= trustedHeader.Time return ErrNonDecreasingTime + + now := System.Time() + if !isWithinTrustedPeriod(trustedHeader, trustingPeriod, now) { + return ErrHeaderNotWithinTrustedPeriod + } + + old := trustedHeader + for i := trustedHeader.Height - 1; i > untrustedHeader.Height; i-- { + untrustedSh, error := Commit(i) + if error != nil return ErrRequestFailed + + if (hash(untrustedSh.Header) != old.LastBlockID.Hash) { + return ErrInvalidAdjacentHeaders + } + + old := untrustedSh.Header + } + + if hash(untrustedHeader) != old.LastBlockID.Hash { + return ErrInvalidAdjacentHeaders + } + + now := System.Time() + if !isWithinTrustedPeriod(trustedHeader, trustingPeriod, now) { + return ErrHeaderNotWithinTrustedPeriod + } + + return nil + } +``` + +*Assumption*: In the following, we assume that *untrusted_h.Header.height > trusted_h.Header.height*. We will quickly discuss the other case in the next section. + +We consider the following set-up: + +- the light client communicates with one full node +- the light client locally stores all the headers that has passed basic verification and that are within light client trust period. In the pseudo code below we +write *Store.Add(header)* for this. If a header failed to verify, then +the full node we are talking to is faulty and we should disconnect from it and reinitialise with new peer. +- If `CanTrust` returns *error*, then the light client has seen a forged header or the trusted header has expired (it is outside its trusted period). + - In case of forged header, the full node is faulty so light client should disconnect and reinitialise with new peer. If the trusted header has expired, + we need to reinitialise light client with new trusted header (that is within its trusted period), but we don't necessarily need to disconnect from the full node + we are talking to (as we haven't observed full node misbehavior in this case). + +## Correctness of the Light Client Protocols + +### Definitions + +- `TRUSTED_PERIOD`: trusted period +- for realtime `t`, the predicate `correct(v,t)` is true if the validator `v` + follows the protocol until time `t` (we will see about recovery later). +- Validator fields. We will write a validator as a tuple `(v,p)` such that + - `v` is the identifier (i.e., validator address; we assume identifiers are unique in each validator set) + - `p` is its voting power +- For each header `h`, we write `trust(h) = true` if the light client trusts `h`. + +### Failure Model + +If a block `b` with a header `h` is generated at time `Time` (i.e. `h.Time = Time`), then a set of validators that +hold more than `2/3` of the voting power in `validators(h.NextValidatorsHash)` is correct until time +`h.Time + TRUSTED_PERIOD`. + +Formally, +\[ +\sum_{(v,p) \in validators(h.NextValidatorsHash) \wedge correct(v,h.Time + TRUSTED_PERIOD)} p > +2/3 \sum_{(v,p) \in validators(h.NextValidatorsHash)} p +\] + +The light client communicates with a full node and learns new headers. The goal is to locally decide whether to trust a header. Our implementation needs to ensure the following two properties: + +- *Light Client Completeness*: If a header `h` was correctly generated by an instance of Tendermint consensus (and its age is less than the trusted period), +then the light client should eventually set `trust(h)` to `true`. + +- *Light Client Accuracy*: If a header `h` was *not generated* by an instance of Tendermint consensus, then the light client should never set `trust(h)` to true. + +*Remark*: If in the course of the computation, the light client obtains certainty that some headers were forged by adversaries +(that is were not generated by an instance of Tendermint consensus), it may submit (a subset of) the headers it has seen as evidence of misbehavior. + +*Remark*: In Completeness we use "eventually", while in practice `trust(h)` should be set to true before `h.Time + TRUSTED_PERIOD`. If not, the header +cannot be trusted because it is too old. + +*Remark*: If a header `h` is marked with `trust(h)`, but it is too old at some point in time we denote with `now` (`h.Time + TRUSTED_PERIOD < now`), +then the light client should set `trust(h)` to `false` again at time `now`. + +*Assumption*: Initially, the light client has a header `inithead` that it trusts, that is, `inithead` was correctly generated by the Tendermint consensus. + +To reason about the correctness, we may prove the following invariant. + +*Verification Condition: light Client Invariant.* + For each light client `l` and each header `h`: +if `l` has set `trust(h) = true`, + then validators that are correct until time `h.Time + TRUSTED_PERIOD` have more than two thirds of the voting power in `validators(h.NextValidatorsHash)`. + + Formally, + \[ + \sum_{(v,p) \in validators(h.NextValidatorsHash) \wedge correct(v,h.Time + TRUSTED_PERIOD)} p > + 2/3 \sum_{(v,p) \in validators(h.NextValidatorsHash)} p + \] + +*Remark.* To prove the invariant, we will have to prove that the light client only trusts headers that were correctly generated by Tendermint consensus. +Then the formula above follows from the failure model. + +## Details + +**Observation 1.** If `h.Time + TRUSTED_PERIOD > now`, we trust the validator set `validators(h.NextValidatorsHash)`. + +When we say we trust `validators(h.NextValidatorsHash)` we do `not` trust that each individual validator in `validators(h.NextValidatorsHash)` +is correct, but we only trust the fact that less than `1/3` of them are faulty (more precisely, the faulty ones have less than `1/3` of the total voting power). + +*`VerifySingle` correctness arguments* + +Light Client Accuracy: + +- Assume by contradiction that `untrustedHeader` was not generated correctly and the light client sets trust to true because `verifySingle` returns without error. +- `trustedState` is trusted and sufficiently new +- by the Failure Model, less than `1/3` of the voting power held by faulty validators => at least one correct validator `v` has signed `untrustedHeader`. +- as `v` is correct up to now, it followed the Tendermint consensus protocol at least up to signing `untrustedHeader` => `untrustedHeader` was correctly generated. +We arrive at the required contradiction. + +Light Client Completeness: + +- The check is successful if sufficiently many validators of `trustedState` are still validators in the height `untrustedHeader.Height` and signed `untrustedHeader`. +- If `untrustedHeader.Height = trustedHeader.Height + 1`, and both headers were generated correctly, the test passes. + +*Verification Condition:* We may need a Tendermint invariant stating that if `untrustedSignedHeader.Header.Height = trustedHeader.Height + 1` then +`signers(untrustedSignedHeader.Commit) \subseteq validators(trustedHeader.NextValidatorsHash)`. + +*Remark*: The variable `trustThreshold` can be used if the user believes that relying on one correct validator is not sufficient. +However, in case of (frequent) changes in the validator set, the higher the `trustThreshold` is chosen, the more unlikely it becomes that +`verifySingle` returns with an error for non-adjacent headers. + +- `VerifyBisection` correctness arguments (sketch)* + +Light Client Accuracy: + +- Assume by contradiction that the header at `untrustedHeight` obtained from the full node was not generated correctly and +the light client sets trust to true because `VerifyBisection` returns without an error. +- `VerifyBisection` returns without error only if all calls to `verifySingle` in the recursion return without error (return `nil`). +- Thus we have a sequence of headers that all satisfied the `verifySingle` +- again a contradiction + +light Client Completeness: + +This is only ensured if upon `Commit(pivot)` the light client is always provided with a correctly generated header. + +*Stalling* + +With `VerifyBisection`, a faulty full node could stall a light client by creating a long sequence of headers that are queried one-by-one by the light client and look OK, +before the light client eventually detects a problem. There are several ways to address this: + +- Each call to `Commit` could be issued to a different full node +- Instead of querying header by header, the light client tells a full node which header it trusts, and the height of the header it needs. The full node responds with +the header along with a proof consisting of intermediate headers that the light client can use to verify. Roughly, `VerifyBisection` would then be executed at the full node. +- We may set a timeout how long `VerifyBisection` may take. diff --git a/spec/light-client/verification/verification_001_published.md b/spec/light-client/verification/verification_001_published.md new file mode 100644 index 0000000000..493098a824 --- /dev/null +++ b/spec/light-client/verification/verification_001_published.md @@ -0,0 +1,1180 @@ + + +# Light Client Verification + +The light client implements a read operation of a +[header][TMBC-HEADER-link] from the [blockchain][TMBC-SEQ-link], by +communicating with full nodes. As some full nodes may be faulty, this +functionality must be implemented in a fault-tolerant way. + +In the Tendermint blockchain, the validator set may change with every +new block. The staking and unbonding mechanism induces a [security +model][TMBC-FM-2THIRDS-link]: starting at time *Time* of the +[header][TMBC-HEADER-link], +more than two-thirds of the next validators of a new block are correct +for the duration of *TrustedPeriod*. The fault-tolerant read +operation is designed for this security model. + +The challenge addressed here is that the light client might have a +block of height *h1* and needs to read the block of height *h2* +greater than *h1*. Checking all headers of heights from *h1* to *h2* +might be too costly (e.g., in terms of energy for mobile devices). +This specification tries to reduce the number of intermediate blocks +that need to be checked, by exploiting the guarantees provided by the +[security model][TMBC-FM-2THIRDS-link]. + +# Status + +This document is thoroughly reviewed, and the protocol has been +formalized in TLA+ and model checked. + +## Issues that need to be addressed + +As it is part of the larger light node, its data structures and +functions interact with the fork dectection functionality of the light +client. As a result of the work on +[Pull Request 479](https://github.com/informalsystems/tendermint-rs/pull/479) we +established the need for an update in the data structures in [Issue 499](https://github.com/informalsystems/tendermint-rs/issues/499). This +will not change the verification logic, but it will record information +about verification that can be used in fork detection (in particular +in computing more efficiently the proof of fork). + +# Outline + +- [Part I](#part-i---tendermint-blockchain): Introduction of + relevant terms of the Tendermint +blockchain. + +- [Part II](#part-ii---sequential-definition-of-the-verification-problem): Introduction +of the problem addressed by the Lightclient Verification protocol. + - [Verification Informal Problem + statement](#Verification-Informal-Problem-statement): For the general + audience, that is, engineers who want to get an overview over what + the component is doing from a bird's eye view. + - [Sequential Problem statement](#Sequential-Problem-statement): + Provides a mathematical definition of the problem statement in + its sequential form, that is, ignoring the distributed aspect of + the implementation of the blockchain. + +- [Part III](#part-iii---light-client-as-distributed-system): Distributed + aspects of the light client, system assumptions and temporal + logic specifications. + + - [Incentives](#incentives): how faulty full nodes may benefit from + misbehaving and how correct full nodes benefit from cooperating. + + - [Computational Model](#Computational-Model): + timing and correctness assumptions. + + - [Distributed Problem Statement](#Distributed-Problem-Statement): + temporal properties that formalize safety and liveness + properties in the distributed setting. + +- [Part IV](#part-iv---light-client-verification-protocol): + Specification of the protocols. + + - [Definitions](#Definitions): Describes inputs, outputs, + variables used by the protocol, auxiliary functions + + - [Core Verification](#core-verification): gives an outline of the solution, + and details of the functions used (with preconditions, + postconditions, error conditions). + + - [Liveness Scenarios](#liveness-scenarios): when the light + client makes progress depends heavily on the changes in the + validator sets of the blockchain. We discuss some typical scenarios. + +- [Part V](#part-v---supporting-the-ibc-relayer): The above parts + focus on a common case where the last verified block has height *h1* + and the + requested height *h2* satisfies *h2 > h1*. For IBC, there are + scenarios where this might not be the case. In this part, we provide + some preliminaries for supporting this. As not all details of the + IBC requirements are clear by now, we do not provide a complete + specification at this point. We mark with "Open Question" points + that need to be addressed in order to finalize this specification. + It should be noted that the technically + most challenging case is the one specified in Part IV. + +In this document we quite extensively use tags in order to be able to +reference assumptions, invariants, etc. in future communication. In +these tags we frequently use the following short forms: + +- TMBC: Tendermint blockchain +- SEQ: for sequential specifications +- LCV: Lightclient Verification +- LIVE: liveness +- SAFE: safety +- FUNC: function +- INV: invariant +- A: assumption + +# Part I - Tendermint Blockchain + +## Header Fields necessary for the Light Client + +#### **[TMBC-HEADER.1]** + +A set of blockchain transactions is stored in a data structure called +*block*, which contains a field called *header*. (The data structure +*block* is defined [here][block]). As the header contains hashes to +the relevant fields of the block, for the purpose of this +specification, we will assume that the blockchain is a list of +headers, rather than a list of blocks. + +#### **[TMBC-HASH-UNIQUENESS.1]** + +We assume that every hash in the header identifies the data it hashes. +Therefore, in this specification, we do not distinguish between hashes and the +data they represent. + +#### **[TMBC-HEADER-FIELDS.1]** + +A header contains the following fields: + +- `Height`: non-negative integer +- `Time`: time (integer) +- `LastBlockID`: Hashvalue +- `LastCommit` DomainCommit +- `Validators`: DomainVal +- `NextValidators`: DomainVal +- `Data`: DomainTX +- `AppState`: DomainApp +- `LastResults`: DomainRes + +#### **[TMBC-SEQ.1]** + +The Tendermint blockchain is a list *chain* of headers. + +#### **[TMBC-VALIDATOR-PAIR.1]** + +Given a full node, a +*validator pair* is a pair *(peerID, voting_power)*, where + +- *peerID* is the PeerID (public key) of a full node, +- *voting_power* is an integer (representing the full node's + voting power in a certain consensus instance). + +> In the Golang implementation the data type for *validator +pair* is called `Validator` + +#### **[TMBC-VALIDATOR-SET.1]** + +A *validator set* is a set of validator pairs. For a validator set +*vs*, we write *TotalVotingPower(vs)* for the sum of the voting powers +of its validator pairs. + +#### **[TMBC-VOTE.1]** + +A *vote* contains a `prevote` or `precommit` message sent and signed by +a validator node during the execution of [consensus][arXiv]. Each +message contains the following fields + +- `Type`: prevote or precommit +- `Height`: positive integer +- `Round` a positive integer +- `BlockID` a Hashvalue of a block (not necessarily a block of the chain) + +#### **[TMBC-COMMIT.1]** + +A commit is a set of `precommit` message. + +## Tendermint Failure Model + +#### **[TMBC-AUTH-BYZ.1]** + +We assume the authenticated Byzantine fault model in which no node (faulty or +correct) may break digital signatures, but otherwise, no additional +assumption is made about the internal behavior of faulty +nodes. That is, faulty nodes are only limited in that they cannot forge +messages. + +#### **[TMBC-TIME-PARAMS.1]** + +A Tendermint blockchain has the following configuration parameters: + +- *unbondingPeriod*: a time duration. +- *trustingPeriod*: a time duration smaller than *unbondingPeriod*. + +#### **[TMBC-CORRECT.1]** + +We define a predicate *correctUntil(n, t)*, where *n* is a node and *t* is a +time point. +The predicate *correctUntil(n, t)* is true if and only if the node *n* +follows all the protocols (at least) until time *t*. + +#### **[TMBC-FM-2THIRDS.1]** + +If a block *h* is in the chain, +then there exists a subset *CorrV* +of *h.NextValidators*, such that: + +- *TotalVotingPower(CorrV) > 2/3 + TotalVotingPower(h.NextValidators)*; cf. [TMBC-VALIDATOR-SET.1] +- For every validator pair *(n,p)* in *CorrV*, it holds *correctUntil(n, + h.Time + trustingPeriod)*; cf. [TMBC-CORRECT.1] + +> The definition of correct +> [**[TMBC-CORRECT.1]**][TMBC-CORRECT-link] refers to realtime, while it +> is used here with *Time* and *trustingPeriod*, which are "hardware +> times". We do not make a distinction here. + +#### **[TMBC-CORR-FULL.1]** + +Every correct full node locally stores a prefix of the +current list of headers from [**[TMBC-SEQ.1]**][TMBC-SEQ-link]. + +## What the Light Client Checks + +> From [TMBC-FM-2THIRDS.1] we directly derive the following observation: + +#### **[TMBC-VAL-CONTAINS-CORR.1]** + +Given a (trusted) block *tb* of the blockchain, a given set of full nodes +*N* contains a correct node at a real-time *t*, if + +- *t - trustingPeriod < tb.Time < t* +- the voting power in tb.NextValidators of nodes in *N* is more + than 1/3 of *TotalVotingPower(tb.NextValidators)* + +> The following describes how a commit for a given block *b* must look +> like. + +#### **[TMBC-SOUND-DISTR-POSS-COMMIT.1]** + +For a block *b*, each element *pc* of *PossibleCommit(b)* satisfies: + +- *pc* contains only votes (cf. [TMBC-VOTE.1]) + by validators from *b.Validators* +- the sum of the voting powers in *pc* is greater than 2/3 + *TotalVotingPower(b.Validators)* +- and there is an *r* such that each vote *v* in *pc* satisfies + - v.Type = precommit + - v.Height = b.Height + - v.Round = r + - v.blockID = hash(b) + +> The following property comes from the validity of the [consensus][arXiv]: A +> correct validator node only sends `prevote` or `precommit`, if +> `BlockID` of the new (to-be-decided) block is equal to the hash of +> the last block. + +#### **[TMBC-VAL-COMMIT.1]** + +If for a block *b*, a commit *c* + +- contains at least one validator pair *(v,p)* such that *v* is a + **correct** validator node, and +- is contained in *PossibleCommit(b)* + +then the block *b* is on the blockchain. + +## Context of this document + +In this document we specify the light client verification component, +called *Core Verification*. The *Core Verification* communicates with +a full node. As full nodes may be faulty, it cannot trust the +received information, but the light client has to check whether the +header it receives coincides with the one generated by Tendermint +consensus. + +The two + properties [[TMBC-VAL-CONTAINS-CORR.1]][TMBC-VAL-CONTAINS-CORR-link] and +[[TMBC-VAL-COMMIT]][TMBC-VAL-COMMIT-link] formalize the checks done + by this specification: +Given a trusted block *tb* and an untrusted block *ub* with a commit *cub*, +one has to check that *cub* is in *PossibleCommit(ub)*, and that *cub* +contains a correct node using *tb*. + +# Part II - Sequential Definition of the Verification Problem + +## Verification Informal Problem statement + +Given a height *targetHeight* as an input, the *Verifier* eventually +stores a header *h* of height *targetHeight* locally. This header *h* +is generated by the Tendermint [blockchain][block]. In +particular, a header that was not generated by the blockchain should +never be stored. + +## Sequential Problem statement + +#### **[LCV-SEQ-LIVE.1]** + +The *Verifier* gets as input a height *targetHeight*, and eventually stores the +header of height *targetHeight* of the blockchain. + +#### **[LCV-SEQ-SAFE.1]** + +The *Verifier* never stores a header which is not in the blockchain. + +# Part III - Light Client as Distributed System + +## Incentives + +Faulty full nodes may benefit from lying to the light client, by making the +light client accept a block that deviates (e.g., contains additional +transactions) from the one generated by Tendermint consensus. +Users using the light client might be harmed by accepting a forged header. + +The [fork detector][fork-detector] of the light client may help the +correct full nodes to understand whether their header is a good one. +Hence, in combination with the light client detector, the correct full +nodes have the incentive to respond. We can thus base liveness +arguments on the assumption that correct full nodes reliably talk to +the light client. + +## Computational Model + +#### **[LCV-A-PEER.1]** + +The verifier communicates with a full node called *primary*. No assumption is made about the full node (it may be correct or faulty). + +#### **[LCV-A-COMM.1]** + +Communication between the light client and a correct full node is +reliable and bounded in time. Reliable communication means that +messages are not lost, not duplicated, and eventually delivered. There +is a (known) end-to-end delay *Delta*, such that if a message is sent +at time *t* then it is received and processes by time *t + Delta*. +This implies that we need a timeout of at least *2 Delta* for remote +procedure calls to ensure that the response of a correct peer arrives +before the timeout expires. + +#### **[LCV-A-TFM.1]** + +The Tendermint blockchain satisfies the Tendermint failure model [**[TMBC-FM-2THIRDS.1]**][TMBC-FM-2THIRDS-link]. + +#### **[LCV-A-VAL.1]** + +The system satisfies [**[TMBC-AUTH-BYZ.1]**][TMBC-Auth-Byz-link] and +[**[TMBC-FM-2THIRDS.1]**][TMBC-FM-2THIRDS-link]. Thus, there is a +blockchain that satisfies the soundness requirements (that is, the +validation rules in [[block]]). + +## Distributed Problem Statement + +### Two Kinds of Termination + +We do not assume that *primary* is correct. Under this assumption no +protocol can guarantee the combination of the sequential +properties. Thus, in the (unreliable) distributed setting, we consider +two kinds of termination (successful and failure) and we will specify +below under what (favorable) conditions *Core Verification* ensures to +terminate successfully, and satisfy the requirements of the sequential +problem statement: + +#### **[LCV-DIST-TERM.1]** + +*Core Verification* either *terminates +successfully* or it *terminates with failure*. + +### Design choices + +#### **[LCV-DIST-STORE.1]** + +*Core Verification* has a local data structure called *LightStore* that +contains light blocks (that contain a header). For each light block we +record whether it is verified. + +#### **[LCV-DIST-PRIMARY.1]** + +*Core Verification* has a local variable *primary* that contains the PeerID of a full node. + +#### **[LCV-DIST-INIT.1]** + +*LightStore* is initialized with a header *trustedHeader* that was correctly +generated by the Tendermint consensus. We say *trustedHeader* is verified. + +### Temporal Properties + +#### **[LCV-DIST-SAFE.1]** + +It is always the case that every verified header in *LightStore* was +generated by an instance of Tendermint consensus. + +#### **[LCV-DIST-LIVE.1]** + +From time to time, a new instance of *Core Verification* is called with a +height *targetHeight* greater than the height of any header in *LightStore*. +Each instance must eventually terminate. + +- If + - the *primary* is correct (and locally has the block of + *targetHeight*), and + - *LightStore* always contains a verified header whose age is less than the + trusting period, + then *Core Verification* adds a verified header *hd* with height + *targetHeight* to *LightStore* and it **terminates successfully** + +> These definitions imply that if the primary is faulty, a header may or +> may not be added to *LightStore*. In any case, +> [**[LCV-DIST-SAFE.1]**](#lcv-vc-inv) must hold. +> The invariant [**[LCV-DIST-SAFE.1]**](#lcv-dist-safe) and the liveness +> requirement [**[LCV-DIST-LIVE.1]**](#lcv-dist-life) +> allow that verified headers are added to *LightStore* whose +> height was not passed +> to the verifier (e.g., intermediate headers used in bisection; see below). +> Note that for liveness, initially having a *trustedHeader* within +> the *trustinPeriod* is not sufficient. However, as this +> specification will leave some freedom with respect to the strategy +> in which order to download intermediate headers, we do not give a +> more precise liveness specification here. After giving the +> specification of the protocol, we will discuss some liveness +> scenarios [below](#liveness-scenarios). + +### Solving the sequential specification + +This specification provides a partial solution to the sequential specification. +The *Verifier* solves the invariant of the sequential part + +[**[LCV-DIST-SAFE.1]**](#lcv-vc-inv) => [**[LCV-SEQ-SAFE.1]**](#lcv-seq-inv) + +In the case the primary is correct, and there is a recent header in *LightStore*, the verifier satisfies the liveness requirements. + +⋀ *primary is correct* +⋀ always ∃ verified header in LightStore. *header.Time* > *now* - *trustingPeriod* +⋀ [**[LCV-A-Comm.1]**](#lcv-a-comm) ⋀ ( + ( [**[TMBC-CorrFull.1]**][TMBC-CorrFull-link] ⋀ + [**[LCV-DIST-LIVE.1]**](#lcv-vc-live) ) + ⟹ [**[LCV-SEQ-LIVE.1]**](#lcv-seq-live) +) + +# Part IV - Light Client Verification Protocol + +We provide a specification for Light Client Verification. The local +code for verification is presented by a sequential function +`VerifyToTarget` to highlight the control flow of this functionality. +We note that if a different concurrency model is considered for +an implementation, the sequential flow of the function may be +implemented with mutexes, etc. However, the light client verification +is partitioned into three blocks that can be implemented and tested +independently: + +- `FetchLightBlock` is called to download a light block (header) of a + given height from a peer. +- `ValidAndVerified` is a local code that checks the header. +- `Schedule` decides which height to try to verify next. We keep this + underspecified as different implementations (currently in Goland and + Rust) may implement different optimizations here. We just provide + necessary conditions on how the height may evolve. + + + + +## Definitions + +### Data Types + +The core data structure of the protocol is the LightBlock. + +#### **[LCV-DATA-LIGHTBLOCK.1]** + +```go +type LightBlock struct { + Header Header + Commit Commit + Validators ValidatorSet +} +``` + +#### **[LCV-DATA-LIGHTSTORE.1]** + +LightBlocks are stored in a structure which stores all LightBlock from +initialization or received from peers. + +```go +type LightStore struct { + ... +} + +``` + +Each LightBlock is in one of the following states: + +```go +type VerifiedState int + +const ( + StateUnverified = iota + 1 + StateVerified + StateFailed + StateTrusted +) +``` + +> Only the detector module sets a lightBlock state to `StateTrusted` +> and only if it was `StateVerified` before. + +The LightStore exposes the following functions to query stored LightBlocks. + +#### **[LCV-FUNC-GET.1]** + +```go +func (ls LightStore) Get(height Height) (LightBlock, bool) +``` + +- Expected postcondition + - returns a LightBlock at a given height or false in the second argument if + the LightStore does not contain the specified LightBlock. + +#### **[LCV-FUNC-LATEST-VERIF.1]** + +```go +func (ls LightStore) LatestVerified() LightBlock +``` + +- Expected postcondition + - returns the highest light block whose state is `StateVerified` + or `StateTrusted` + +#### **[LCV-FUNC-UPDATE.2]** + +```go +func (ls LightStore) Update(lightBlock LightBlock, + verfiedState VerifiedState + verifiedBy Height) +``` + +- Expected postcondition + - The state of the LightBlock is set to *verifiedState*. + - verifiedBy of the Lightblock is set to *Height* + +> The following function is used only in the detector specification +> listed here for completeness. + +#### **[LCV-FUNC-LATEST-TRUSTED.1]** + +```go +func (ls LightStore) LatestTrusted() LightBlock +``` + +- Expected postcondition + - returns the highest light block that has been verified and + checked by the detector. + +#### **[LCV-FUNC-FILTER.1]** + +```go +func (ls LightStore) FilterVerified() LightSTore +``` + +- Expected postcondition + - returns only the LightBlocks with state verified. + +### Inputs + +- *lightStore*: stores light blocks that have been downloaded and that + passed verification. Initially it contains a light block with + *trustedHeader*. +- *primary*: peerID +- *targetHeight*: the height of the needed header + +### Configuration Parameters + +- *trustThreshold*: a float. Can be used if correctness should not be based on more voting power and 1/3. +- *trustingPeriod*: a time duration [**[TMBC-TIME_PARAMS.1]**][TMBC-TIME_PARAMS-link]. +- *clockDrift*: a time duration. Correction parameter dealing with only approximately synchronized clocks. + +### Variables + +- *nextHeight*: initially *targetHeight* + > *nextHeight* should be thought of the "height of the next header we need + > to download and verify" + +### Assumptions + +#### **[LCV-A-INIT.1]** + +- *trustedHeader* is from the blockchain + +- *targetHeight > LightStore.LatestVerified.Header.Height* + +### Invariants + +#### **[LCV-INV-TP.1]** + +It is always the case that *LightStore.LatestTrusted.Header.Time > now - trustingPeriod*. + +> If the invariant is violated, the light client does not have a +> header it can trust. A trusted header must be obtained externally, +> its trust can only be based on social consensus. + +### Used Remote Functions + +We use the functions `commit` and `validators` that are provided +by the [RPC client for Tendermint][RPC]. + +```go +func Commit(height int64) (SignedHeader, error) +``` + +- Implementation remark + - RPC to full node *n* + - JSON sent: + +```javascript +// POST /commit +{ + "jsonrpc": "2.0", + "id": "ccc84631-dfdb-4adc-b88c-5291ea3c2cfb", // UUID v4, unique per request + "method": "commit", + "params": { + "height": 1234 + } +} +``` + +- Expected precondition + - header of `height` exists on blockchain +- Expected postcondition + - if *n* is correct: Returns the signed header of height `height` + from the blockchain if communication is timely (no timeout) + - if *n* is faulty: Returns a signed header with arbitrary content +- Error condition + - if *n* is correct: precondition violated or timeout + - if *n* is faulty: arbitrary error + +---- + +```go +func Validators(height int64) (ValidatorSet, error) +``` + +- Implementation remark + - RPC to full node *n* + - JSON sent: + +```javascript +// POST /validators +{ + "jsonrpc": "2.0", + "id": "ccc84631-dfdb-4adc-b88c-5291ea3c2cfb", // UUID v4, unique per request + "method": "validators", + "params": { + "height": 1234 + } +} +``` + +- Expected precondition + - header of `height` exists on blockchain +- Expected postcondition + - if *n* is correct: Returns the validator set of height `height` + from the blockchain if communication is timely (no timeout) + - if *n* is faulty: Returns arbitrary validator set +- Error condition + - if *n* is correct: precondition violated or timeout + - if *n* is faulty: arbitrary error + +---- + +### Communicating Function + +#### **[LCV-FUNC-FETCH.1]** + + ```go +func FetchLightBlock(peer PeerID, height Height) LightBlock +``` + +- Implementation remark + - RPC to peer at *PeerID* + - calls `Commit` for *height* and `Validators` for *height* and *height+1* +- Expected precondition + - `height` is less than or equal to height of the peer **[LCV-IO-PRE-HEIGHT.1]** +- Expected postcondition: + - if *node* is correct: + - Returns the LightBlock *lb* of height `height` + that is consistent with the blockchain + - *lb.provider = peer* **[LCV-IO-POST-PROVIDER.1]** + - *lb.Header* is a header consistent with the blockchain + - *lb.Validators* is the validator set of the blockchain at height *nextHeight* + - *lb.NextValidators* is the validator set of the blockchain at height *nextHeight + 1* + - if *node* is faulty: Returns a LightBlock with arbitrary content + [**[TMBC-AUTH-BYZ.1]**][TMBC-Auth-Byz-link] +- Error condition + - if *n* is correct: precondition violated + - if *n* is faulty: arbitrary error + - if *lb.provider != peer* + - times out after 2 Delta (by assumption *n* is faulty) + +---- + +## Core Verification + +### Outline + +The `VerifyToTarget` is the main function and uses the following functions. + +- `FetchLightBlock` is called to download the next light block. It is + the only function that communicates with other nodes +- `ValidAndVerified` checks whether header is valid and checks if a + new lightBlock should be trusted + based on a previously verified lightBlock. +- `Schedule` decides which height to try to verify next + +In the following description of `VerifyToTarget` we do not deal with error +handling. If any of the above function returns an error, VerifyToTarget just +passes the error on. + +#### **[LCV-FUNC-MAIN.1]** + +```go +func VerifyToTarget(primary PeerID, lightStore LightStore, + targetHeight Height) (LightStore, Result) { + + nextHeight := targetHeight + + for lightStore.LatestVerified.height < targetHeight { + + // Get next LightBlock for verification + current, found := lightStore.Get(nextHeight) + if !found { + current = FetchLightBlock(primary, nextHeight) + lightStore.Update(current, StateUnverified) + } + + // Verify + verdict = ValidAndVerified(lightStore.LatestVerified, current) + + // Decide whether/how to continue + if verdict == SUCCESS { + lightStore.Update(current, StateVerified) + } + else if verdict == NOT_ENOUGH_TRUST { + // do nothing + // the light block current passed validation, but the validator + // set is too different to verify it. We keep the state of + // current at StateUnverified. For a later iteration, Schedule + // might decide to try verification of that light block again. + } + else { + // verdict is some error code + lightStore.Update(current, StateFailed) + // possibly remove all LightBlocks from primary + return (lightStore, ResultFailure) + } + nextHeight = Schedule(lightStore, nextHeight, targetHeight) + } + return (lightStore, ResultSuccess) +} +``` + +- Expected precondition + - *lightStore* contains a LightBlock within the *trustingPeriod* **[LCV-PRE-TP.1]** + - *targetHeight* is greater than the height of all the LightBlocks in *lightStore* +- Expected postcondition: + - returns *lightStore* that contains a LightBlock that corresponds to a block + of the blockchain of height *targetHeight* + (that is, the LightBlock has been added to *lightStore*) **[LCV-POST-LS.1]** +- Error conditions + - if the precondition is violated + - if `ValidAndVerified` or `FetchLightBlock` report an error + - if [**[LCV-INV-TP.1]**](#LCV-INV-TP.1) is violated + +### Details of the Functions + +#### **[LCV-FUNC-VALID.1]** + +```go +func ValidAndVerified(trusted LightBlock, untrusted LightBlock) Result +``` + +- Expected precondition: + - *untrusted* is valid, that is, satisfies the soundness [checks][block] + - *untrusted* is **well-formed**, that is, + - *untrusted.Header.Time < now + clockDrift* + - *untrusted.Validators = hash(untrusted.Header.Validators)* + - *untrusted.NextValidators = hash(untrusted.Header.NextValidators)* + - *trusted.Header.Time > now - trustingPeriod* + - *trusted.Commit* is a commit for the header + *trusted.Header*, i.e., it contains + the correct hash of the header, and +2/3 of signatures + - the `Height` and `Time` of `trusted` are smaller than the Height and + `Time` of `untrusted`, respectively + - the *untrusted.Header* is well-formed (passes the tests from + [[block]]), and in particular + - if the untrusted header `unstrusted.Header` is the immediate + successor of `trusted.Header`, then it holds that + - *trusted.Header.NextValidators = + untrusted.Header.Validators*, and + moreover, + - *untrusted.Header.Commit* + - contains signatures by more than two-thirds of the validators + - contains no signature from nodes that are not in *trusted.Header.NextValidators* +- Expected postcondition: + - Returns `SUCCESS`: + - if *untrusted* is the immediate successor of *trusted*, or otherwise, + - if the signatures of a set of validators that have more than + *max(1/3,trustThreshold)* of voting power in + *trusted.Nextvalidators* is contained in + *untrusted.Commit* (that is, header passes the tests + [**[TMBC-VAL-CONTAINS-CORR.1]**][TMBC-VAL-CONTAINS-CORR-link] + and [**[TMBC-VAL-COMMIT.1]**][TMBC-VAL-COMMIT-link]) + - Returns `NOT_ENOUGH_TRUST` if: + - *untrusted* is *not* the immediate successor of + *trusted* + and the *max(1/3,trustThreshold)* threshold is not reached + (that is, if + [**[TMBC-VAL-CONTAINS-CORR.1]**][TMBC-VAL-CONTAINS-CORR-link] + fails and header is does not violate the soundness + checks [[block]]). +- Error condition: + - if precondition violated + +---- + +#### **[LCV-FUNC-SCHEDULE.1]** + +```go +func Schedule(lightStore, nextHeight, targetHeight) Height +``` + +- Implementation remark: If picks the next height to be verified. + We keep the precise choice of the next header under-specified. It is + subject to performance optimizations that do not influence the correctness +- Expected postcondition: **[LCV-SCHEDULE-POST.1]** + Return *H* s.t. + 1. if *lightStore.LatestVerified.Height = nextHeight* and + *lightStore.LatestVerified < targetHeight* then + *nextHeight < H <= targetHeight* + 2. if *lightStore.LatestVerified.Height < nextHeight* and + *lightStore.LatestVerified.Height < targetHeight* then + *lightStore.LatestVerified.Height < H < nextHeight* + 3. if *lightStore.LatestVerified.Height = targetHeight* then + *H = targetHeight* + +> Case i. captures the case where the light block at height *nextHeight* +> has been verified, and we can choose a height closer to the *targetHeight*. +> As we get the *lightStore* as parameter, the choice of the next height can +> depend on the *lightStore*, e.g., we can pick a height for which we have +> already downloaded a light block. +> In Case ii. the header of *nextHeight* could not be verified, and we need to pick a smaller height. +> In Case iii. is a special case when we have verified the *targetHeight*. + +### Solving the distributed specification + +*trustedStore* is implemented by the light blocks in lightStore that +have the state *StateVerified*. + +#### Argument for [**[LCV-DIST-SAFE.1]**](#lcv-dist-safe) + +- `ValidAndVerified` implements the soundness checks and the checks + [**[TMBC-VAL-CONTAINS-CORR.1]**][TMBC-VAL-CONTAINS-CORR-link] and + [**[TMBC-VAL-COMMIT.1]**][TMBC-VAL-COMMIT-link] under + the assumption [**[TMBC-FM-2THIRDS.1]**][TMBC-FM-2THIRDS-link] +- Only if `ValidAndVerified` returns with `SUCCESS`, the state of a light block is + set to *StateVerified*. + +#### Argument for [**[LCV-DIST-LIVE.1]**](#lcv-dist-life) + +- If *primary* is correct, + - `FetchLightBlock` will always return a light block consistent + with the blockchain + - `ValidAndVerified` either verifies the header using the trusting + period or falls back to sequential + verification + - If [**[LCV-INV-TP.1]**](#LCV-INV-TP.1) holds, eventually every + header will be verified and core verification **terminates successfully**. + - successful termination depends on the age of *lightStore.LatestVerified* + (for instance, initially on the age of *trustedHeader*) and the + changes of the validator sets on the blockchain. + We will give some examples [below](#liveness-scenarios). +- If *primary* is faulty, + - it either provides headers that pass all the tests, and we + return with the header + - it provides one header that fails a test, core verification + **terminates with failure**. + - it times out and core verification + **terminates with failure**. + +## Liveness Scenarios + +The liveness argument above assumes [**[LCV-INV-TP.1]**](#LCV-INV-TP.1) + +which requires that there is a header that does not expire before the +target height is reached. Here we discuss scenarios to ensure this. + +Let *startHeader* be *LightStore.LatestVerified* when core +verification is called (*trustedHeader*) and *startTime* be the time +core verification is invoked. + +In order to ensure liveness, *LightStore* always needs to contain a +verified (or initially trusted) header whose time is within the +trusting period. To ensure this, core verification needs to add new +headers to *LightStore* and verify them, before all headers in +*LightStore* expire. + +#### Many changes in validator set + + Let's consider `Schedule` implements + bisection, that is, it halves the distance. + Assume the case where the validator set changes completely in each +block. Then the + method in this specification needs to +sequentially verify all headers. That is, for + +- *W = log_2 (targetHeight - startHeader.Height)*, + +*W* headers need to be downloaded and checked before the +header of height *startHeader.Height + 1* is added to *LightStore*. + +- Let *Comp* + be the local computation time needed to check headers and signatures + for one header. +- Then we need in the worst case *Comp + 2 Delta* to download and + check one header. +- Then the first time a verified header could be added to *LightStore* is + startTime + W * (Comp + 2 Delta) +- [TP.1] However, it can only be added if we still have a header in + *LightStore*, + which is not + expired, that is only the case if + - startHeader.Time > startTime + WCG * (Comp + 2 Delta) - + trustingPeriod, + - that is, if core verification is started at + startTime < startHeader.Time + trustingPeriod - WCG * (Comp + 2 Delta) + +- one may then do an inductive argument from this point on, depending + on the implementation of `Schedule`. We may have to account for the + headers that are already + downloaded, but they are checked against the new *LightStore.LatestVerified*. + +> We observe that +> the worst case time it needs to verify the header of height +> *targetHeight* depends mainly on how frequent the validator set on the +> blockchain changes. That core verification terminates successfully +> crucially depends on the check [TP.1], that is, that the headers in +> *LightStore* do not expire in the time needed to download more +> headers, which depends on the creation time of the headers in +> *LightStore*. That is, termination of core verification is highly +> depending on the data stored in the blockchain. +> The current light client core verification protocol exploits that, in +> practice, changes in the validator set are rare. For instance, +> consider the following scenario. + +#### No change in validator set + +If on the blockchain the validator set of the block at height +*targetHeight* is equal to *startHeader.NextValidators*: + +- there is one round trip in `FetchLightBlock` to download the light + block + of height + *targetHeight*, and *Comp* to check it. +- as the validator sets are equal, `Verify` returns `SUCCESS`, if + *startHeader.Time > now - trustingPeriod*. +- that is, if *startTime < startHeader.Header.Time + trustingPeriod - + 2 Delta - Comp*, then core verification terminates successfully + +# Part V - Supporting the IBC Relayer + +The above specification focuses on the most common case, which also +constitutes the most challenging task: using the Tendermint [security +model][TMBC-FM-2THIRDS-link] to verify light blocks without +downloading all intermediate blocks. To focus on this challenge, above +we have restricted ourselves to the case where *targetHeight* is +greater than the height of any trusted header. This simplified +presentation of the algorithm as initially +`lightStore.LatestVerified()` is less than *targetHeight*, and in the +process of verification `lightStore.LatestVerified()` increases until +*targetHeight* is reached. + +For [IBC][ibc-rs] it might be that some "older" header is +needed, that is, *targetHeight < lightStore.LatestVerified()*. In this section we present a preliminary design, and we mark some +remaining open questions. +If *targetHeight < lightStore.LatestVerified()* our design separates +the following cases: + +- A previous instance of `VerifyToTarget` has already downloaded the + light block of *targetHeight*. There are two cases + - the light block has been verified + - the light block has not been verified yet +- No light block of *targetHeight* had been downloaded before. There + are two cases: + - there exists a verified light block of height less than *targetHeight* + - otherwise. In this case we need to do "backwards verification" + using the hash of the previous block in the `LastBlockID` field + of a header. + +**Open Question:** what are the security assumptions for backward +verification. Should we check that the light block we verify from +(and/or the checked light block) is within the trusting period? + +The design just presents the above case +distinction as a function, and defines some auxiliary functions in the +same way the protocol was presented in +[Part IV](#part-iv---light-client-verification-protocol). + +```go +func (ls LightStore) LatestPrevious(height Height) (LightBlock, bool) +``` + +- Expected postcondition + - returns a light block *lb* that satisfies: + - *lb* is in lightStore + - *lb* is verified and not expired + - *lb.Header.Height < height* + - for all *b* in lightStore s.t. *b* is verified and not expired it + holds *lb.Header.Height >= b.Header.Height* + - *false* in the second argument if + the LightStore does not contain such an *lb*. + +```go +func (ls LightStore) MinVerified() (LightBlock, bool) +``` + +- Expected postcondition + - returns a light block *lb* that satisfies: + - *lb* is in lightStore + - *lb* is verified **Open Question:** replace by trusted? + - *lb.Header.Height* is minimal in the lightStore + - **Open Question:** according to this, it might be expired (outside the + trusting period). This approach appears safe. Are there reasons we + should not do that? + - *false* in the second argument if + the LightStore does not contain such an *lb*. + +If a height that is smaller than the smallest height in the lightstore +is required, we check the hashes backwards. This is done with the +following function: + +#### **[LCV-FUNC-BACKWARDS.1]** + +```go +func Backwards (primary PeerID, lightStore LightStore, targetHeight Height) + (LightStore, Result) { + + lb,res = lightStore.MinVerified() + if res = false { + return (lightStore, ResultFailure) + } + + latest := lb.Header + for i := lb.Header.height - 1; i >= targetHeight; i-- { + // here we download height-by-height. We might first download all + // headers down to targetHeight and then check them. + current := FetchLightBlock(primary,i) + if (hash(current) != latest.Header.LastBlockId) { + return (lightStore, ResultFailure) + } + else { + lightStore.Update(current, StateVerified) + // **Open Question:** Do we need a new state type for + // backwards verified light blocks? + } + latest = current + } + return (lightStore, ResultSuccess) +} +``` + +The following function just decided based on the required height which +method should be used. + +#### **[LCV-FUNC-IBCMAIN.1]** + +```go +func Main (primary PeerID, lightStore LightStore, targetHeight Height) + (LightStore, Result) { + + b1, r1 = lightStore.Get(targetHeight) + if r1 = true and b1.State = StateVerified { + // block already there + return (lightStore, ResultSuccess) + } + + if targetHeight > lightStore.LatestVerified.height { + // case of Part IV + return VerifyToTarget(primary, lightStore, targetHeight) + } + else { + b2, r2 = lightStore.LatestPrevious(targetHeight); + if r2 = true { + // make auxiliary lightStore auxLS to call VerifyToTarget. + // VerifyToTarget uses LatestVerified of the given lightStore + // For that we need: + // auxLS.LatestVerified = lightStore.LatestPrevious(targetHeight) + auxLS.Init; + auxLS.Update(b2,StateVerified); + if r1 = true { + // we need to verify a previously downloaded light block. + // we add it to the auxiliary store so that VerifyToTarget + // does not download it again + auxLS.Update(b1,b1.State); + } + auxLS, res2 = VerifyToTarget(primary, auxLS, targetHeight) + // move all lightblocks from auxLS to lightStore, + // maintain state + // we do that whether VerifyToTarget was successful or not + for i, s range auxLS { + lighStore.Update(s,s.State) + } + return (lightStore, res2) + } + else { + return Backwards(primary, lightStore, targetHeight) + } + } +} +``` + + + + + + + + + + + + + + + + + + + +# References + +[[block]] Specification of the block data structure. + +[[RPC]] RPC client for Tendermint + +[[fork-detector]] The specification of the light client fork detector. + +[[fullnode]] Specification of the full node API + +[[ibc-rs]] Rust implementation of IBC modules and relayer. + +[[lightclient]] The light client ADR [77d2651 on Dec 27, 2019]. + +[RPC]: https://docs.tendermint.com/master/rpc/ + +[block]: https://github.com/tendermint/spec/blob/d46cd7f573a2c6a2399fcab2cde981330aa63f37/spec/core/data_structures.md + +[TMBC-HEADER-link]: #tmbc-header1 +[TMBC-SEQ-link]: #tmbc-seq1 +[TMBC-CorrFull-link]: #tmbc-corr-full1 +[TMBC-Auth-Byz-link]: #tmbc-auth-byz1 +[TMBC-TIME_PARAMS-link]: #tmbc-time-params1 +[TMBC-FM-2THIRDS-link]: #tmbc-fm-2thirds1 +[TMBC-VAL-CONTAINS-CORR-link]: #tmbc-val-contains-corr1 +[TMBC-VAL-COMMIT-link]: #tmbc-val-commit1 +[TMBC-SOUND-DISTR-POSS-COMMIT-link]: #tmbc-sound-distr-poss-commit1 + +[lightclient]: https://github.com/interchainio/tendermint-rs/blob/e2cb9aca0b95430fca2eac154edddc9588038982/docs/architecture/adr-002-lite-client.md +[fork-detector]: https://github.com/informalsystems/tendermint-rs/blob/master/docs/spec/lightclient/detection.md +[fullnode]: https://github.com/tendermint/spec/blob/master/spec/blockchain/fullnode.md + +[ibc-rs]:https://github.com/informalsystems/ibc-rs + +[FN-LuckyCase-link]: https://github.com/tendermint/spec/blob/master/spec/blockchain/fullnode.md#fn-luckycase + +[blockchain-validator-set]: https://github.com/tendermint/spec/blob/master/spec/blockchain/blockchain.md#data-structures +[fullnode-data-structures]: https://github.com/tendermint/spec/blob/master/spec/blockchain/fullnode.md#data-structures + +[FN-ManifestFaulty-link]: https://github.com/tendermint/spec/blob/master/spec/blockchain/fullnode.md#fn-manifestfaulty + +[arXiv]: https://arxiv.org/abs/1807.04938 diff --git a/spec/light-client/verification/verification_002_draft.md b/spec/light-client/verification/verification_002_draft.md new file mode 100644 index 0000000000..70cdfdd94a --- /dev/null +++ b/spec/light-client/verification/verification_002_draft.md @@ -0,0 +1,1063 @@ + + +# Light Client Verification + +The light client implements a read operation of a +[header][TMBC-HEADER-link] from the [blockchain][TMBC-SEQ-link], by +communicating with full nodes. As some full nodes may be faulty, this +functionality must be implemented in a fault-tolerant way. + +In the Tendermint blockchain, the validator set may change with every +new block. The staking and unbonding mechanism induces a [security +model][TMBC-FM-2THIRDS-link]: starting at time *Time* of the +[header][TMBC-HEADER-link], +more than two-thirds of the next validators of a new block are correct +for the duration of *TrustedPeriod*. The fault-tolerant read +operation is designed for this security model. + +The challenge addressed here is that the light client might have a +block of height *h1* and needs to read the block of height *h2* +greater than *h1*. Checking all headers of heights from *h1* to *h2* +might be too costly (e.g., in terms of energy for mobile devices). +This specification tries to reduce the number of intermediate blocks +that need to be checked, by exploiting the guarantees provided by the +[security model][TMBC-FM-2THIRDS-link]. + +# Status + +## Previous Versions + +- [[001_published]](./verification_001_published.md) + is thoroughly reviewed, and the protocol has been +formalized in TLA+ and model checked. + +## Issues that are addressed in this revision + +As it is part of the larger light node, its data structures and +functions interact with the attack dectection functionality of the light +client. As a result of the work on + +- [attack detection](https://github.com/tendermint/spec/pull/164) for light nodes + +- attack detection for IBC and [relayer requirements](https://github.com/informalsystems/tendermint-rs/issues/497) + +- light client + [supervisor](https://github.com/tendermint/spec/pull/159) (also in + [Rust proposal](https://github.com/informalsystems/tendermint-rs/pull/509)) + +adaptations to the semantics and functions exposed by the LightStore +needed to be made. In contrast to [version +001](./verification_001_published.md) we specify the following: + +- `VerifyToTarget` and `Backwards` are called with a single lightblock + as root of trust in contrast to passing the complete lightstore. + +- During verification, we record for each lightblock which other + lightblock can be used to verify it in one step. This is needed to + generate verification traces that are needed for IBC. + +# Outline + +- [Part I](#part-i---tendermint-blockchain): Introduction of + relevant terms of the Tendermint +blockchain. + +- [Part II](#part-ii---sequential-definition-of-the-verification-problem): Introduction +of the problem addressed by the Lightclient Verification protocol. + - [Verification Informal Problem + statement](#Verification-Informal-Problem-statement): For the general + audience, that is, engineers who want to get an overview over what + the component is doing from a bird's eye view. + - [Sequential Problem statement](#Sequential-Problem-statement): + Provides a mathematical definition of the problem statement in + its sequential form, that is, ignoring the distributed aspect of + the implementation of the blockchain. + +- [Part III](#part-iii---light-client-as-distributed-system): Distributed + aspects of the light client, system assumptions and temporal + logic specifications. + + - [Incentives](#incentives): how faulty full nodes may benefit from + misbehaving and how correct full nodes benefit from cooperating. + + - [Computational Model](#Computational-Model): + timing and correctness assumptions. + + - [Distributed Problem Statement](#Distributed-Problem-Statement): + temporal properties that formalize safety and liveness + properties in the distributed setting. + +- [Part IV](#part-iv---light-client-verification-protocol): + Specification of the protocols. + + - [Definitions](#Definitions): Describes inputs, outputs, + variables used by the protocol, auxiliary functions + + - [Core Verification](#core-verification): gives an outline of the solution, + and details of the functions used (with preconditions, + postconditions, error conditions). + + - [Liveness Scenarios](#liveness-scenarios): when the light + client makes progress depends heavily on the changes in the + validator sets of the blockchain. We discuss some typical scenarios. + +- [Part V](#part-v---supporting-the-ibc-relayer): The above parts + focus on a common case where the last verified block has height *h1* + and the + requested height *h2* satisfies *h2 > h1*. For IBC, there are + scenarios where this might not be the case. In this part, we provide + some preliminaries for supporting this. As not all details of the + IBC requirements are clear by now, we do not provide a complete + specification at this point. We mark with "Open Question" points + that need to be addressed in order to finalize this specification. + It should be noted that the technically + most challenging case is the one specified in Part IV. + +In this document we quite extensively use tags in order to be able to +reference assumptions, invariants, etc. in future communication. In +these tags we frequently use the following short forms: + +- TMBC: Tendermint blockchain +- SEQ: for sequential specifications +- LCV: Lightclient Verification +- LIVE: liveness +- SAFE: safety +- FUNC: function +- INV: invariant +- A: assumption + +# Part I - Tendermint Blockchain + +## Header Fields necessary for the Light Client + +#### **[TMBC-HEADER.1]** + +A set of blockchain transactions is stored in a data structure called +*block*, which contains a field called *header*. (The data structure +*block* is defined [here][block]). As the header contains hashes to +the relevant fields of the block, for the purpose of this +specification, we will assume that the blockchain is a list of +headers, rather than a list of blocks. + +#### **[TMBC-HASH-UNIQUENESS.1]** + +We assume that every hash in the header identifies the data it hashes. +Therefore, in this specification, we do not distinguish between hashes and the +data they represent. + +#### **[TMBC-HEADER-FIELDS.2]** + +A header contains the following fields: + +- `Height`: non-negative integer +- `Time`: time (non-negative integer) +- `LastBlockID`: Hashvalue +- `LastCommit` DomainCommit +- `Validators`: DomainVal +- `NextValidators`: DomainVal +- `Data`: DomainTX +- `AppState`: DomainApp +- `LastResults`: DomainRes + +#### **[TMBC-SEQ.1]** + +The Tendermint blockchain is a list *chain* of headers. + +#### **[TMBC-VALIDATOR-PAIR.1]** + +Given a full node, a +*validator pair* is a pair *(peerID, voting_power)*, where + +- *peerID* is the PeerID (public key) of a full node, +- *voting_power* is an integer (representing the full node's + voting power in a certain consensus instance). + +> In the Golang implementation the data type for *validator +pair* is called `Validator` + +#### **[TMBC-VALIDATOR-SET.1]** + +A *validator set* is a set of validator pairs. For a validator set +*vs*, we write *TotalVotingPower(vs)* for the sum of the voting powers +of its validator pairs. + +#### **[TMBC-VOTE.1]** + +A *vote* contains a `prevote` or `precommit` message sent and signed by +a validator node during the execution of [consensus][arXiv]. Each +message contains the following fields + +- `Type`: prevote or precommit +- `Height`: positive integer +- `Round` a positive integer +- `BlockID` a Hashvalue of a block (not necessarily a block of the chain) + +#### **[TMBC-COMMIT.1]** + +A commit is a set of `precommit` message. + +## Tendermint Failure Model + +#### **[TMBC-AUTH-BYZ.1]** + +We assume the authenticated Byzantine fault model in which no node (faulty or +correct) may break digital signatures, but otherwise, no additional +assumption is made about the internal behavior of faulty +nodes. That is, faulty nodes are only limited in that they cannot forge +messages. + +#### **[TMBC-TIME-PARAMS.1]** + +A Tendermint blockchain has the following configuration parameters: + +- *unbondingPeriod*: a time duration. +- *trustingPeriod*: a time duration smaller than *unbondingPeriod*. + +#### **[TMBC-CORRECT.1]** + +We define a predicate *correctUntil(n, t)*, where *n* is a node and *t* is a +time point. +The predicate *correctUntil(n, t)* is true if and only if the node *n* +follows all the protocols (at least) until time *t*. + +#### **[TMBC-FM-2THIRDS.1]** + +If a block *h* is in the chain, +then there exists a subset *CorrV* +of *h.NextValidators*, such that: + +- *TotalVotingPower(CorrV) > 2/3 + TotalVotingPower(h.NextValidators)*; cf. [TMBC-VALIDATOR-SET.1] +- For every validator pair *(n,p)* in *CorrV*, it holds *correctUntil(n, + h.Time + trustingPeriod)*; cf. [TMBC-CORRECT.1] + +> The definition of correct +> [**[TMBC-CORRECT.1]**][TMBC-CORRECT-link] refers to realtime, while it +> is used here with *Time* and *trustingPeriod*, which are "hardware +> times". We do not make a distinction here. + +#### **[TMBC-CORR-FULL.1]** + +Every correct full node locally stores a prefix of the +current list of headers from [**[TMBC-SEQ.1]**][TMBC-SEQ-link]. + +## What the Light Client Checks + +> From [TMBC-FM-2THIRDS.1] we directly derive the following observation: + +#### **[TMBC-VAL-CONTAINS-CORR.1]** + +Given a (trusted) block *tb* of the blockchain, a given set of full nodes +*N* contains a correct node at a real-time *t*, if + +- *t - trustingPeriod < tb.Time < t* +- the voting power in tb.NextValidators of nodes in *N* is more + than 1/3 of *TotalVotingPower(tb.NextValidators)* + +> The following describes how a commit for a given block *b* must look +> like. + +#### **[TMBC-SOUND-DISTR-POSS-COMMIT.1]** + +For a block *b*, each element *pc* of *PossibleCommit(b)* satisfies: + +- *pc* contains only votes (cf. [TMBC-VOTE.1]) + by validators from *b.Validators* +- the sum of the voting powers in *pc* is greater than 2/3 + *TotalVotingPower(b.Validators)* +- and there is an *r* such that each vote *v* in *pc* satisfies + - v.Type = precommit + - v.Height = b.Height + - v.Round = r + - v.blockID = hash(b) + +> The following property comes from the validity of the [consensus][arXiv]: A +> correct validator node only sends `prevote` or `precommit`, if +> `BlockID` of the new (to-be-decided) block is equal to the hash of +> the last block. + +#### **[TMBC-VAL-COMMIT.1]** + +If for a block *b*, a commit *c* + +- contains at least one validator pair *(v,p)* such that *v* is a + **correct** validator node, and +- is contained in *PossibleCommit(b)* + +then the block *b* is on the blockchain. + +## Context of this document + +In this document we specify the light client verification component, +called *Core Verification*. The *Core Verification* communicates with +a full node. As full nodes may be faulty, it cannot trust the +received information, but the light client has to check whether the +header it receives coincides with the one generated by Tendermint +consensus. + +The two + properties [[TMBC-VAL-CONTAINS-CORR.1]][TMBC-VAL-CONTAINS-CORR-link] and +[[TMBC-VAL-COMMIT]][TMBC-VAL-COMMIT-link] formalize the checks done + by this specification: +Given a trusted block *tb* and an untrusted block *ub* with a commit *cub*, +one has to check that *cub* is in *PossibleCommit(ub)*, and that *cub* +contains a correct node using *tb*. + +# Part II - Sequential Definition of the Verification Problem + +## Verification Informal Problem statement + +Given a height *targetHeight* as an input, the *Verifier* eventually +stores a header *h* of height *targetHeight* locally. This header *h* +is generated by the Tendermint [blockchain][block]. In +particular, a header that was not generated by the blockchain should +never be stored. + +## Sequential Problem statement + +#### **[LCV-SEQ-LIVE.1]** + +The *Verifier* gets as input a height *targetHeight*, and eventually stores the +header of height *targetHeight* of the blockchain. + +#### **[LCV-SEQ-SAFE.1]** + +The *Verifier* never stores a header which is not in the blockchain. + +# Part III - Light Client as Distributed System + +## Incentives + +Faulty full nodes may benefit from lying to the light client, by making the +light client accept a block that deviates (e.g., contains additional +transactions) from the one generated by Tendermint consensus. +Users using the light client might be harmed by accepting a forged header. + +The [attack detector][attack-detector] of the light client may help the +correct full nodes to understand whether their header is a good one. +Hence, in combination with the light client detector, the correct full +nodes have the incentive to respond. We can thus base liveness +arguments on the assumption that correct full nodes reliably talk to +the light client. + +## Computational Model + +#### **[LCV-A-PEER.1]** + +The verifier communicates with a full node called *primary*. No assumption is made about the full node (it may be correct or faulty). + +#### **[LCV-A-COMM.1]** + +Communication between the light client and a correct full node is +reliable and bounded in time. Reliable communication means that +messages are not lost, not duplicated, and eventually delivered. There +is a (known) end-to-end delay *Delta*, such that if a message is sent +at time *t* then it is received and processes by time *t + Delta*. +This implies that we need a timeout of at least *2 Delta* for remote +procedure calls to ensure that the response of a correct peer arrives +before the timeout expires. + +#### **[LCV-A-TFM.1]** + +The Tendermint blockchain satisfies the Tendermint failure model [**[TMBC-FM-2THIRDS.1]**][TMBC-FM-2THIRDS-link]. + +#### **[LCV-A-VAL.1]** + +The system satisfies [**[TMBC-AUTH-BYZ.1]**][TMBC-Auth-Byz-link] and +[**[TMBC-FM-2THIRDS.1]**][TMBC-FM-2THIRDS-link]. Thus, there is a +blockchain that satisfies the soundness requirements (that is, the +validation rules in [[block]]). + +## Distributed Problem Statement + +### Two Kinds of Termination + +We do not assume that *primary* is correct. Under this assumption no +protocol can guarantee the combination of the sequential +properties. Thus, in the (unreliable) distributed setting, we consider +two kinds of termination (successful and failure) and we will specify +below under what (favorable) conditions *Core Verification* ensures to +terminate successfully, and satisfy the requirements of the sequential +problem statement: + +#### **[LCV-DIST-TERM.1]** + +*Core Verification* either *terminates +successfully* or it *terminates with failure*. + +### Design choices + +#### **[LCV-DIST-STORE.2]** + +*Core Verification* returns a data structure called *LightStore* that +contains light blocks (that contain a header). + +#### **[LCV-DIST-INIT.2]** + +*Core Verification* is called with + +- *primary*: the PeerID of a full node (with verification communicates) +- *root*: a light block (the root of trust) +- *targetHeight*: a height (the height of a header that should be obtained) + +### Temporal Properties + +#### **[LCV-DIST-SAFE.2]** + +It is always the case that every header in *LightStore* was +generated by an instance of Tendermint consensus. + +#### **[LCV-DIST-LIVE.2]** + +If a new instance of *Core Verification* is called with a +height *targetHeight* greater than root.Header.Height it must +must eventually terminate. + +- If + - the *primary* is correct (and locally has the block of + *targetHeight*), and + - the age of root is always less than the trusting period, + then *Core Verification* adds a verified header *hd* with height + *targetHeight* to *LightStore* and it **terminates successfully** + +> These definitions imply that if the primary is faulty, a header may or +> may not be added to *LightStore*. In any case, +> [**[LCV-DIST-SAFE.2]**](#lcv-dist-safe2) must hold. +> The invariant [**[LCV-DIST-SAFE.2]**](#lcv-dist-safe2) and the liveness +> requirement [**[LCV-DIST-LIVE.2]**](#lcv-dist-life) +> allow that verified headers are added to *LightStore* whose +> height was not passed +> to the verifier (e.g., intermediate headers used in bisection; see below). +> Note that for liveness, initially having a *root* within +> the *trustinPeriod* is not sufficient. However, as this +> specification will leave some freedom with respect to the strategy +> in which order to download intermediate headers, we do not give a +> more precise liveness specification here. After giving the +> specification of the protocol, we will discuss some liveness +> scenarios [below](#liveness-scenarios). + +### Solving the sequential specification + +This specification provides a partial solution to the sequential specification. +The *Verifier* solves the invariant of the sequential part + +[**[LCV-DIST-SAFE.2]**](#lcv-dist-safe2) => [**[LCV-SEQ-SAFE.1]**](#lcv-seq-safe1) + +In the case the primary is correct, and *root* is a recent header in *LightStore*, the verifier satisfies the liveness requirements. + +⋀ *primary is correct* +⋀ *root.header.Time* > *now* - *trustingPeriod* +⋀ [**[LCV-A-Comm.1]**](#lcv-a-comm) ⋀ ( + ( [**[TMBC-CorrFull.1]**][TMBC-CorrFull-link] ⋀ + [**[LCV-DIST-LIVE.2]**](#lcv-dist-live2) ) + ⟹ [**[LCV-SEQ-LIVE.1]**](#lcv-seq-live1) +) + +# Part IV - Light Client Verification Protocol + +We provide a specification for Light Client Verification. The local +code for verification is presented by a sequential function +`VerifyToTarget` to highlight the control flow of this functionality. +We note that if a different concurrency model is considered for +an implementation, the sequential flow of the function may be +implemented with mutexes, etc. However, the light client verification +is partitioned into three blocks that can be implemented and tested +independently: + +- `FetchLightBlock` is called to download a light block (header) of a + given height from a peer. +- `ValidAndVerified` is a local code that checks the header. +- `Schedule` decides which height to try to verify next. We keep this + underspecified as different implementations (currently in Goland and + Rust) may implement different optimizations here. We just provide + necessary conditions on how the height may evolve. + + + + +## Definitions + +### Data Types + +The core data structure of the protocol is the LightBlock. + +#### **[LCV-DATA-LIGHTBLOCK.1]** + +```go +type LightBlock struct { + Header Header + Commit Commit + Validators ValidatorSet +} +``` + +#### **[LCV-DATA-LIGHTSTORE.2]** + +LightBlocks are stored in a structure which stores all LightBlock from +initialization or received from peers. + +```go +type LightStore struct { + ... +} + +``` + +#### **[LCV-DATA-LS-ROOT.2]** + +For each lightblock in a lightstore we record in a field `verification-root` of +type Height. + +> `verification-root` records the height of a lightblock that can be used to verify +> the lightblock in one step + +#### **[LCV-INV-LS-ROOT.2]** + +At all times, if a lightblock *b* in a lightstore has *b.verification-root = h*, +then + +- the lightstore contains a lightblock with height *h*, or +- *b* has the minimal height of all lightblocks in lightstore, then + b.verification-root should be nil. + +The LightStore exposes the following functions to query stored LightBlocks. + +#### **[LCV-DATA-LS-STATE.1]** + +Each LightBlock is in one of the following states: + +```go +type VerifiedState int + +const ( + StateUnverified = iota + 1 + StateVerified + StateFailed + StateTrusted +) +``` + +#### **[LCV-FUNC-GET.1]** + +```go +func (ls LightStore) Get(height Height) (LightBlock, bool) +``` + +- Expected postcondition + - returns a LightBlock at a given height or false in the second argument if + the LightStore does not contain the specified LightBlock. + +#### **[LCV-FUNC-LATEST.1]** + +```go +func (ls LightStore) Latest() LightBlock +``` + +- Expected postcondition + - returns the highest light block + +#### **[LCV-FUNC-ADD.1]** + +```go +func (ls LightStore) Add(newBlock) +``` + +- Expected precondition + - the lightstore is empty +- Expected postcondition + - adds newBlock into light store + +#### **[LCV-FUNC-STORE.1]** + +```go +func (ls LightStore) store_chain(newLS LightStore) +``` + +- Expected postcondition + - adds `newLS` to the lightStore. + +#### **[LCV-FUNC-LATEST-VERIF.2]** + +```go +func (ls LightStore) LatestVerified() LightBlock +``` + +- Expected postcondition + - returns the highest light block whose state is `StateVerified` + +#### **[LCV-FUNC-FILTER.1]** + +```go +func (ls LightStore) FilterVerified() LightStore +``` + +- Expected postcondition + - returns all the lightblocks of the lightstore with state `StateVerified` + +#### **[LCV-FUNC-UPDATE.2]** + +```go +func (ls LightStore) Update(lightBlock LightBlock, verfiedState +VerifiedState, root-height Height) +``` + +- Expected postcondition + - the lightblock is part of the lightstore + - The state of the LightBlock is set to *verifiedState*. + - The verification-root of the LightBlock is set to *root-height* + +```go +func (ls LightStore) TraceTo(lightBlock LightBlock) (LightBlock, LightStore) +``` + +- Expected postcondition + - returns a **trusted** lightblock `root` from the lightstore with a height + less than `lightBlock` + - returns a lightstore that contains lightblocks that constitute a + [verification trace](TODOlinkToDetectorSpecOnceThere) from + `root` to `lightBlock` (including `lightBlock`) + +### Inputs + +- *root*: A light block that is trusted +- *primary*: peerID +- *targetHeight*: the height of the needed header + +### Configuration Parameters + +- *trustThreshold*: a float. Can be used if correctness should not be based on more voting power and 1/3. +- *trustingPeriod*: a time duration [**[TMBC-TIME_PARAMS.1]**][TMBC-TIME_PARAMS-link]. +- *clockDrift*: a time duration. Correction parameter dealing with only approximately synchronized clocks. + +### Variables + +- *nextHeight*: initially *targetHeight* + > *nextHeight* should be thought of the "height of the next header we need + > to download and verify" + +### Assumptions + +#### **[LCV-A-INIT.2]** + +- *root* is from the blockchain + +- *targetHeight > root.Header.Height* + +### Invariants + +#### **[LCV-INV-TP.1]** + +It is always the case that *LightStore.LatestTrusted.Header.Time > now - trustingPeriod*. + +> If the invariant is violated, the light client does not have a +> header it can trust. A trusted header must be obtained externally, +> its trust can only be based on social consensus. +> We use the convention that root is assumed to be verified. + +### Used Remote Functions + +We use the functions `commit` and `validators` that are provided +by the [RPC client for Tendermint][RPC]. + +```go +func Commit(height int64) (SignedHeader, error) +``` + +- Implementation remark + - RPC to full node *n* + - JSON sent: + +```javascript +// POST /commit +{ + "jsonrpc": "2.0", + "id": "ccc84631-dfdb-4adc-b88c-5291ea3c2cfb", // UUID v4, unique per request + "method": "commit", + "params": { + "height": 1234 + } +} +``` + +- Expected precondition + - header of `height` exists on blockchain +- Expected postcondition + - if *n* is correct: Returns the signed header of height `height` + from the blockchain if communication is timely (no timeout) + - if *n* is faulty: Returns a signed header with arbitrary content +- Error condition + - if *n* is correct: precondition violated or timeout + - if *n* is faulty: arbitrary error + +----; + +```go +func Validators(height int64) (ValidatorSet, error) +``` + +- Implementation remark + - RPC to full node *n* + - JSON sent: + +```javascript +// POST /validators +{ + "jsonrpc": "2.0", + "id": "ccc84631-dfdb-4adc-b88c-5291ea3c2cfb", // UUID v4, unique per request + "method": "validators", + "params": { + "height": 1234 + } +} +``` + +- Expected precondition + - header of `height` exists on blockchain +- Expected postcondition + - if *n* is correct: Returns the validator set of height `height` + from the blockchain if communication is timely (no timeout) + - if *n* is faulty: Returns arbitrary validator set +- Error condition + - if *n* is correct: precondition violated or timeout + - if *n* is faulty: arbitrary error + +----; + +### Communicating Function + +#### **[LCV-FUNC-FETCH.1]** + + ```go +func FetchLightBlock(peer PeerID, height Height) LightBlock +``` + +- Implementation remark + - RPC to peer at *PeerID* + - calls `Commit` for *height* and `Validators` for *height* and *height+1* +- Expected precondition + - `height` is less than or equal to height of the peer **[LCV-IO-PRE-HEIGHT.1]** +- Expected postcondition: + - if *node* is correct: + - Returns the LightBlock *lb* of height `height` + that is consistent with the blockchain + - *lb.provider = peer* **[LCV-IO-POST-PROVIDER.1]** + - *lb.Header* is a header consistent with the blockchain + - *lb.Validators* is the validator set of the blockchain at height *nextHeight* + - *lb.NextValidators* is the validator set of the blockchain at height *nextHeight + 1* + - if *node* is faulty: Returns a LightBlock with arbitrary content + [**[TMBC-AUTH-BYZ.1]**][TMBC-Auth-Byz-link] +- Error condition + - if *n* is correct: precondition violated + - if *n* is faulty: arbitrary error + - if *lb.provider != peer* + - times out after 2 Delta (by assumption *n* is faulty) + +----; + +## Core Verification + +### Outline + +The `VerifyToTarget` is the main function and uses the following functions. + +- `FetchLightBlock` is called to download the next light block. It is + the only function that communicates with other nodes +- `ValidAndVerified` checks whether header is valid and checks if a + new lightBlock should be trusted + based on a previously verified lightBlock. +- `Schedule` decides which height to try to verify next + +In the following description of `VerifyToTarget` we do not deal with error +handling. If any of the above function returns an error, VerifyToTarget just +passes the error on. + +#### **[LCV-FUNC-MAIN.2]** + +```go +func VerifyToTarget(primary PeerID, root LightBlock, + targetHeight Height) (LightStore, Result) { + + lightStore = new LightStore; + lightStore.Update(root, StateVerified, root.verifiedBy); + nextHeight := targetHeight; + + for lightStore.LatestVerified.height < targetHeight { + + // Get next LightBlock for verification + current, found := lightStore.Get(nextHeight) + if !found { + current = FetchLightBlock(primary, nextHeight) + lightStore.Update(current, StateUnverified, nil) + } + + // Verify + verdict = ValidAndVerified(lightStore.LatestVerified, current) + + // Decide whether/how to continue + if verdict == SUCCESS { + lightStore.Update(current, StateVerified, lightStore.LatestVerified.Height) + } + else if verdict == NOT_ENOUGH_TRUST { + // do nothing + // the light block current passed validation, but the validator + // set is too different to verify it. We keep the state of + // current at StateUnverified. For a later iteration, Schedule + // might decide to try verification of that light block again. + } + else { + // verdict is some error code + lightStore.Update(current, StateFailed, nil) + return (nil, ResultFailure) + } + nextHeight = Schedule(lightStore, nextHeight, targetHeight) + } + return (lightStore.FilterVerified, ResultSuccess) +} +``` + +- Expected precondition + - *root* is within the *trustingPeriod* **[LCV-PRE-TP.1]** + - *targetHeight* is greater than the height of *root* +- Expected postcondition: + - returns *lightStore* that contains a LightBlock that corresponds to a block + of the blockchain of height *targetHeight* + (that is, the LightBlock has been added to *lightStore*) **[LCV-POST-LS.1]** +- Error conditions + - if the precondition is violated + - if `ValidAndVerified` or `FetchLightBlock` report an error + - if [**[LCV-INV-TP.1]**](#LCV-INV-TP.1) is violated + +### Details of the Functions + +#### **[LCV-FUNC-VALID.2]** + +```go +func ValidAndVerified(trusted LightBlock, untrusted LightBlock) Result +``` + +- Expected precondition: + - *untrusted* is valid, that is, satisfies the soundness [checks][block] + - *untrusted* is **well-formed**, that is, + - *untrusted.Header.Time < now + clockDrift* + - *untrusted.Validators = hash(untrusted.Header.Validators)* + - *untrusted.NextValidators = hash(untrusted.Header.NextValidators)* + - *trusted.Header.Time > now - trustingPeriod* + - the `Height` and `Time` of `trusted` are smaller than the Height and + `Time` of `untrusted`, respectively + - the *untrusted.Header* is well-formed (passes the tests from + [[block]]), and in particular + - if the untrusted header `unstrusted.Header` is the immediate + successor of `trusted.Header`, then it holds that + - *trusted.Header.NextValidators = + untrusted.Header.Validators*, and + moreover, + - *untrusted.Header.Commit* + - contains signatures by more than two-thirds of the validators + - contains no signature from nodes that are not in *trusted.Header.NextValidators* +- Expected postcondition: + - Returns `SUCCESS`: + - if *untrusted* is the immediate successor of *trusted*, or otherwise, + - if the signatures of a set of validators that have more than + *max(1/3,trustThreshold)* of voting power in + *trusted.Nextvalidators* is contained in + *untrusted.Commit* (that is, header passes the tests + [**[TMBC-VAL-CONTAINS-CORR.1]**][TMBC-VAL-CONTAINS-CORR-link] + and [**[TMBC-VAL-COMMIT.1]**][TMBC-VAL-COMMIT-link]) + - Returns `NOT_ENOUGH_TRUST` if: + - *untrusted* is *not* the immediate successor of + *trusted* + and the *max(1/3,trustThreshold)* threshold is not reached + (that is, if + [**[TMBC-VAL-CONTAINS-CORR.1]**][TMBC-VAL-CONTAINS-CORR-link] + fails and header is does not violate the soundness + checks [[block]]). +- Error condition: + - if precondition violated + +----; + +#### **[LCV-FUNC-SCHEDULE.1]** + +```go +func Schedule(lightStore, nextHeight, targetHeight) Height +``` + +- Implementation remark: If picks the next height to be verified. + We keep the precise choice of the next header under-specified. It is + subject to performance optimizations that do not influence the correctness +- Expected postcondition: **[LCV-SCHEDULE-POST.1]** + Return *H* s.t. + 1. if *lightStore.LatestVerified.Height = nextHeight* and + *lightStore.LatestVerified < targetHeight* then + *nextHeight < H <= targetHeight* + 2. if *lightStore.LatestVerified.Height < nextHeight* and + *lightStore.LatestVerified.Height < targetHeight* then + *lightStore.LatestVerified.Height < H < nextHeight* + 3. if *lightStore.LatestVerified.Height = targetHeight* then + *H = targetHeight* + +> Case i. captures the case where the light block at height *nextHeight* +> has been verified, and we can choose a height closer to the *targetHeight*. +> As we get the *lightStore* as parameter, the choice of the next height can +> depend on the *lightStore*, e.g., we can pick a height for which we have +> already downloaded a light block. +> In Case ii. the header of *nextHeight* could not be verified, and we need to pick a smaller height. +> In Case iii. is a special case when we have verified the *targetHeight*. + +### Solving the distributed specification + +Analogous to [[001_published]](./verification_001_published.md#solving-the-distributed-specification) + +## Liveness Scenarios + +Analogous to [[001_published]](./verification_001_published.md#liveness-scenarios) + +# Part V - Supporting the IBC Relayer + +The above specification focuses on the most common case, which also +constitutes the most challenging task: using the Tendermint [security +model][TMBC-FM-2THIRDS-link] to verify light blocks without +downloading all intermediate blocks. To focus on this challenge, above +we have restricted ourselves to the case where *targetHeight* is +greater than the height of any trusted header. This simplified +presentation of the algorithm as initially +`lightStore.LatestVerified()` is less than *targetHeight*, and in the +process of verification `lightStore.LatestVerified()` increases until +*targetHeight* is reached. + +For [IBC][ibc-rs] there are two additional challenges: + +1. it might be that some "older" header is needed, that is, +*targetHeight < lightStore.LatestVerified()*. The +[supervisor](../supervisor/supervisor.md) checks whether it is in this +case by calling `LatestPrevious` and `MinVerified` and if so it calls +`Backwards`. All these functions are specified below. + +2. In order to submit proof of a light client attack, a relayer may + need to submit a verification trace. This it is important to + compute such a trace efficiently. That it can be done is based on + the invariant [[LCV-INV-LS-ROOT.2]](#LCV-INV-LS-ROOT2) that needs + to be maintained by the light client. In particular + `VerifyToTarget` and `Backwards` need to take care of setting + `verification-root`. + +#### **[LCV-FUNC-LATEST-PREV.2]** + +```go +func (ls LightStore) LatestPrevious(height Height) (LightBlock, bool) +``` + +- Expected postcondition + - returns a light block *lb* that satisfies: + - *lb* is in lightStore + - *lb* is in StateTrusted + - *lb* is not expired + - *lb.Header.Height < height* + - for all *b* in lightStore s.t. *b* is trusted and not expired it + holds *lb.Header.Height >= b.Header.Height* + - *false* in the second argument if + the LightStore does not contain such an *lb*. + +----; + +#### **[LCV-FUNC-LOWEST.2]** + +```go +func (ls LightStore) Lowest() (LightBlock) +``` + +- Expected postcondition + - returns the lowest trusted light block within trusting period + +----; + +#### **[LCV-FUNC-MIN.2]** + +```go +func (ls LightStore) MinVerified() (LightBlock, bool) +``` + +- Expected postcondition + - returns a light block *lb* that satisfies: + - *lb* is in lightStore + - *lb.Header.Height* is minimal in the lightStore + - *false* in the second argument if + the LightStore does not contain such an *lb*. + +If a height that is smaller than the smallest height in the lightstore +is required, we check the hashes backwards. This is done with the +following function: + +#### **[LCV-FUNC-BACKWARDS.2]** + +```go +func Backwards (primary PeerID, root LightBlock, targetHeight Height) + (LightStore, Result) { + + lb := root; + lightStore := new LightStore; + lightStore.Update(lb, StateTrusted, lb.verifiedBy) + + latest := lb.Header + for i := lb.Header.height - 1; i >= targetHeight; i-- { + // here we download height-by-height. We might first download all + // headers down to targetHeight and then check them. + current := FetchLightBlock(primary,i) + if (hash(current) != latest.Header.LastBlockId) { + return (nil, ResultFailure) + } + else { + // latest and current are linked together by LastBlockId + // therefore it is not relevant which we verified first + // for consistency, we store latest was veried using + // current so that the verifiedBy is always pointing down + // the chain + lightStore.Update(current, StateTrusted, nil) + lightStore.Update(latest, StateTrusted, current.Header.Height) + } + latest = current + } + return (lightStore, ResultSuccess) +} +``` + +# References + +[[block]] Specification of the block data structure. + +[[RPC]] RPC client for Tendermint + +[[attack-detector]] The specification of the light client attack detector. + +[[fullnode]] Specification of the full node API + +[[ibc-rs]] Rust implementation of IBC modules and relayer. + +[[lightclient]] The light client ADR [77d2651 on Dec 27, 2019]. + +[RPC]: https://docs.tendermint.com/master/rpc/ + +[block]: https://github.com/tendermint/spec/blob/d46cd7f573a2c6a2399fcab2cde981330aa63f37/spec/core/data_structures.md + +[TMBC-HEADER-link]: #tmbc-header1 +[TMBC-SEQ-link]: #tmbc-seq1 +[TMBC-CorrFull-link]: #tmbc-corr-full1 +[TMBC-Auth-Byz-link]: #tmbc-auth-byz1 +[TMBC-TIME_PARAMS-link]: #tmbc-time-params1 +[TMBC-FM-2THIRDS-link]: #tmbc-fm-2thirds1 +[TMBC-VAL-CONTAINS-CORR-link]: #tmbc-val-contains-corr1 +[TMBC-VAL-COMMIT-link]: #tmbc-val-commit1 +[TMBC-SOUND-DISTR-POSS-COMMIT-link]: #tmbc-sound-distr-poss-commit1 + +[lightclient]: https://github.com/interchainio/tendermint-rs/blob/e2cb9aca0b95430fca2eac154edddc9588038982/docs/architecture/adr-002-lite-client.md +[attack-detector]: https://github.com/tendermint/spec/blob/master/rust-spec/lightclient/detection/detection_001_reviewed.md +[fullnode]: https://github.com/tendermint/spec/blob/master/spec/blockchain/fullnode.md + +[ibc-rs]:https://github.com/informalsystems/ibc-rs + +[blockchain-validator-set]: https://github.com/tendermint/spec/blob/master/spec/blockchain/blockchain.md#data-structures +[fullnode-data-structures]: https://github.com/tendermint/spec/blob/master/spec/blockchain/fullnode.md#data-structures + +[FN-ManifestFaulty-link]: https://github.com/tendermint/spec/blob/master/spec/blockchain/fullnode.md#fn-manifestfaulty + +[arXiv]: https://arxiv.org/abs/1807.04938 diff --git a/spec/light-client/verification/verification_003_draft.md b/spec/light-client/verification/verification_003_draft.md new file mode 100644 index 0000000000..cd38e7e967 --- /dev/null +++ b/spec/light-client/verification/verification_003_draft.md @@ -0,0 +1,76 @@ +# Light Client Verificaiton + +#### **[LCV-FUNC-VERIFYCOMMITLIGHT.1]** + +VerifyCommitLight verifies that 2/3+ of the signatures for a validator set were for +a given blockID. The function will finish early and thus may not check all signatures. + +```go +func VerifyCommitLight(chainID string, vals *ValidatorSet, blockID BlockID, +height int64, commit *Commit) error { + // run a basic validation of the arguments + if err := verifyBasicValsAndCommit(vals, commit, height, blockID); err != nil { + return err + } + + // calculate voting power needed + votingPowerNeeded := vals.TotalVotingPower() * 2 / 3 + + var ( + val *Validator + valIdx int32 + seenVals = make(map[int32]int, len(commit.Signatures)) + talliedVotingPower int64 = 0 + voteSignBytes []byte + ) + for idx, commitSig := range commit.Signatures { + // ignore all commit signatures that are not for the block + if !commitSig.ForBlock() { + continue + } + + // If the vals and commit have a 1-to-1 correspondance we can retrieve + // them by index else we need to retrieve them by address + if lookUpByIndex { + val = vals.Validators[idx] + } else { + valIdx, val = vals.GetByAddress(commitSig.ValidatorAddress) + + // if the signature doesn't belong to anyone in the validator set + // then we just skip over it + if val == nil { + continue + } + + // because we are getting validators by address we need to make sure + // that the same validator doesn't commit twice + if firstIndex, ok := seenVals[valIdx]; ok { + secondIndex := idx + return fmt.Errorf("double vote from %v (%d and %d)", val, firstIndex, secondIndex) + } + seenVals[valIdx] = idx + } + + voteSignBytes = commit.VoteSignBytes(chainID, int32(idx)) + + if !val.PubKey.VerifySignature(voteSignBytes, commitSig.Signature) { + return fmt.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) + } + + // Add the voting power of the validator + // to the tally + talliedVotingPower += val.VotingPower + + // check if we have enough signatures and can thus exit early + if talliedVotingPower > votingPowerNeeded { + return nil + } + } + + if got, needed := talliedVotingPower, votingPowerNeeded; got <= needed { + return ErrNotEnoughVotingPowerSigned{Got: got, Needed: needed} + } + + return nil +} +``` \ No newline at end of file diff --git a/spec/p2p/config.md b/spec/p2p/config.md new file mode 100644 index 0000000000..b63c04f28d --- /dev/null +++ b/spec/p2p/config.md @@ -0,0 +1,49 @@ +# P2P Config + +Here we describe configuration options around the Peer Exchange. +These can be set using flags or via the `$TMHOME/config/config.toml` file. + +## Seed Mode + +`--p2p.seed_mode` + +The node operates in seed mode. In seed mode, a node continuously crawls the network for peers, +and upon incoming connection shares some peers and disconnects. + +## Seeds + +`--p2p.seeds “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:4444”` + +Dials these seeds when we need more peers. They should return a list of peers and then disconnect. +If we already have enough peers in the address book, we may never need to dial them. + +## Persistent Peers + +`--p2p.persistent_peers “id100000000000000000000000000000000@1.2.3.4:26656,id200000000000000000000000000000000@2.3.4.5:26656”` + +Dial these peers and auto-redial them if the connection fails. +These are intended to be trusted persistent peers that can help +anchor us in the p2p network. The auto-redial uses exponential +backoff and will give up after a day of trying to connect. + +But If `persistent_peers_max_dial_period` is set greater than zero, +pause between each dial to each persistent peer will not exceed `persistent_peers_max_dial_period` +during exponential backoff and we keep trying again without giving up + +**Note:** If `seeds` and `persistent_peers` intersect, +the user will be warned that seeds may auto-close connections +and that the node may not be able to keep the connection persistent. + +## Private Peers + +`--p2p.private_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` + +These are IDs of the peers that we do not add to the address book or gossip to +other peers. They stay private to us. + +## Unconditional Peers + +`--p2p.unconditional_peer_ids “id100000000000000000000000000000000,id200000000000000000000000000000000”` + +These are IDs of the peers which are allowed to be connected by both inbound or outbound regardless of +`max_num_inbound_peers` or `max_num_outbound_peers` of user's node reached or not. diff --git a/spec/p2p/connection.md b/spec/p2p/connection.md new file mode 100644 index 0000000000..33178f4794 --- /dev/null +++ b/spec/p2p/connection.md @@ -0,0 +1,111 @@ +# P2P Multiplex Connection + +## MConnection + +`MConnection` is a multiplex connection that supports multiple independent streams +with distinct quality of service guarantees atop a single TCP connection. +Each stream is known as a `Channel` and each `Channel` has a globally unique _byte id_. +Each `Channel` also has a relative priority that determines the quality of service +of the `Channel` compared to other `Channel`s. +The _byte id_ and the relative priorities of each `Channel` are configured upon +initialization of the connection. + +The `MConnection` supports three packet types: + +- Ping +- Pong +- Msg + +### Ping and Pong + +The ping and pong messages consist of writing a single byte to the connection; 0x1 and 0x2, respectively. + +When we haven't received any messages on an `MConnection` in time `pingTimeout`, we send a ping message. +When a ping is received on the `MConnection`, a pong is sent in response only if there are no other messages +to send and the peer has not sent us too many pings (TODO). + +If a pong or message is not received in sufficient time after a ping, the peer is disconnected from. + +### Msg + +Messages in channels are chopped into smaller `msgPacket`s for multiplexing. + +```go +type msgPacket struct { + ChannelID byte + EOF byte // 1 means message ends here. + Bytes []byte +} +``` + +The `msgPacket` is serialized using [Proto3](https://developers.google.com/protocol-buffers/docs/proto3). +The received `Bytes` of a sequential set of packets are appended together +until a packet with `EOF=1` is received, then the complete serialized message +is returned for processing by the `onReceive` function of the corresponding channel. + +### Multiplexing + +Messages are sent from a single `sendRoutine`, which loops over a select statement and results in the sending +of a ping, a pong, or a batch of data messages. The batch of data messages may include messages from multiple channels. +Message bytes are queued for sending in their respective channel, with each channel holding one unsent message at a time. +Messages are chosen for a batch one at a time from the channel with the lowest ratio of recently sent bytes to channel priority. + +## Sending Messages + +There are two methods for sending messages: + +```go +func (m MConnection) Send(chID byte, msg interface{}) bool {} +func (m MConnection) TrySend(chID byte, msg interface{}) bool {} +``` + +`Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued +for the channel with the given id byte `chID`. The message `msg` is serialized +using the `tendermint/go-amino` submodule's `WriteBinary()` reflection routine. + +`TrySend(chID, msg)` is a nonblocking call that queues the message msg in the channel +with the given id byte chID if the queue is not full; otherwise it returns false immediately. + +`Send()` and `TrySend()` are also exposed for each `Peer`. + +## Peer + +Each peer has one `MConnection` instance, and includes other information such as whether the connection +was outbound, whether the connection should be recreated if it closes, various identity information about the node, +and other higher level thread-safe data used by the reactors. + +## Switch/Reactor + +The `Switch` handles peer connections and exposes an API to receive incoming messages +on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one +or more `Channels`. So while sending outgoing messages is typically performed on the peer, +incoming messages are received on the reactor. + +```go +// Declare a MyReactor reactor that handles messages on MyChannelID. +type MyReactor struct{} + +func (reactor MyReactor) GetChannels() []*ChannelDescriptor { + return []*ChannelDescriptor{ChannelDescriptor{ID:MyChannelID, Priority: 1}} +} + +func (reactor MyReactor) Receive(chID byte, peer *Peer, msgBytes []byte) { + r, n, err := bytes.NewBuffer(msgBytes), new(int64), new(error) + msgString := ReadString(r, n, err) + fmt.Println(msgString) +} + +// Other Reactor methods omitted for brevity +... + +switch := NewSwitch([]Reactor{MyReactor{}}) + +... + +// Send a random message to all outbound connections +for _, peer := range switch.Peers().List() { + if peer.IsOutbound() { + peer.Send(MyChannelID, "Here's a random message") + } +} +``` diff --git a/spec/p2p/messages/README.md b/spec/p2p/messages/README.md new file mode 100644 index 0000000000..1b5f5c60dd --- /dev/null +++ b/spec/p2p/messages/README.md @@ -0,0 +1,19 @@ +--- +order: 1 +parent: + title: Messages + order: 1 +--- + +# Messages + +An implementation of the spec consists of many components. While many parts of these components are implementation specific, the p2p messages are not. In this section we will be covering all the p2p messages of components. + +There are two parts to the P2P messages, the message and the channel. The channel is message specific and messages are specific to components of Tendermint. When a node connect to a peer it will tell the other node which channels are available. This notifies the peer what services the connecting node offers. You can read more on channels in [connection.md](../connection.md#mconnection) + +- [Block Sync](./block-sync.md) +- [Mempool](./mempool.md) +- [Evidence](./evidence.md) +- [State Sync](./state-sync.md) +- [Pex](./pex.md) +- [Consensus](./consensus.md) diff --git a/spec/p2p/messages/block-sync.md b/spec/p2p/messages/block-sync.md new file mode 100644 index 0000000000..48aa6155fd --- /dev/null +++ b/spec/p2p/messages/block-sync.md @@ -0,0 +1,68 @@ +--- +order: 2 +--- + +# Block Sync + +## Channel + +Block sync has one channel. + +| Name | Number | +|-------------------|--------| +| BlockchainChannel | 64 | + +## Message Types + +There are multiple message types for Block Sync + +### BlockRequest + +BlockRequest asks a peer for a block at the height specified. + +| Name | Type | Description | Field Number | +|--------|-------|---------------------------|--------------| +| Height | int64 | Height of requested block | 1 | + +### NoBlockResponse + +NoBlockResponse notifies the peer requesting a block that the node does not contain it. + +| Name | Type | Description | Field Number | +|--------|-------|---------------------------|--------------| +| Height | int64 | Height of requested block | 1 | + +### BlockResponse + +BlockResponse contains the block requested. + +| Name | Type | Description | Field Number | +|-------|----------------------------------------------|-----------------|--------------| +| Block | [Block](../../core/data_structures.md#block) | Requested Block | 1 | + +### StatusRequest + +StatusRequest is an empty message that notifies the peer to respond with the highest and lowest blocks it has stored. + +> Empty message. + +### StatusResponse + +StatusResponse responds to a peer with the highest and lowest block stored. + +| Name | Type | Description | Field Number | +|--------|-------|-------------------------------------------------------------------|--------------| +| Height | int64 | Current Height of a node | 1 | +| base | int64 | First known block, if pruning is enabled it will be higher than 1 | 1 | + +### Message + +Message is a [`oneof` protobuf type](https://developers.google.com/protocol-buffers/docs/proto#oneof). The `oneof` consists of five messages. + +| Name | Type | Description | Field Number | +|-------------------|----------------------------------|--------------------------------------------------------------|--------------| +| block_request | [BlockRequest](#blockrequest) | Request a block from a peer | 1 | +| no_block_response | [NoBlockResponse](#noblockresponse) | Response saying it doe snot have the requested block | 2 | +| block_response | [BlockResponse](#blockresponse) | Response with requested block | 3 | +| status_request | [StatusRequest](#statusrequest) | Request the highest and lowest block numbers from a peer | 4 | +| status_response | [StatusResponse](#statusresponse) | Response with the highest and lowest block numbers the store | 5 | diff --git a/spec/p2p/messages/consensus.md b/spec/p2p/messages/consensus.md new file mode 100644 index 0000000000..7a56231e6d --- /dev/null +++ b/spec/p2p/messages/consensus.md @@ -0,0 +1,149 @@ +--- +order: 7 +--- + +# Consensus + +## Channel + +Consensus has four separate channels. The channel identifiers are listed below. + +| Name | Number | +|--------------------|--------| +| StateChannel | 32 | +| DataChannel | 33 | +| VoteChannel | 34 | +| VoteSetBitsChannel | 35 | + +## Message Types + +### Proposal + +Proposal is sent when a new block is proposed. It is a suggestion of what the +next block in the blockchain should be. + +| Name | Type | Description | Field Number | +|----------|----------------------------------------------------|----------------------------------------|--------------| +| proposal | [Proposal](../../core/data_structures.md#proposal) | Proposed Block to come to consensus on | 1 | + +### Vote + +Vote is sent to vote for some block (or to inform others that a process does not vote in the +current round). Vote is defined in the +[Blockchain](https://github.com/tendermint/tendermint/blob/master/spec/core/data_structures.md#blockidd) +section and contains validator's +information (validator address and index), height and round for which the vote is sent, vote type, +blockID if process vote for some block (`nil` otherwise) and a timestamp when the vote is sent. The +message is signed by the validator private key. + +| Name | Type | Description | Field Number | +|------|--------------------------------------------|---------------------------|--------------| +| vote | [Vote](../../core/data_structures.md#vote) | Vote for a proposed Block | 1 | + +### BlockPart + +BlockPart is sent when gossiping a piece of the proposed block. It contains height, round +and the block part. + +| Name | Type | Description | Field Number | +|--------|--------------------------------------------|----------------------------------------|--------------| +| height | int64 | Height of corresponding block. | 1 | +| round | int32 | Round of voting to finalize the block. | 2 | +| part | [Part](../../core/data_structures.md#part) | A part of the block. | 3 | + +### NewRoundStep + +NewRoundStep is sent for every step transition during the core consensus algorithm execution. +It is used in the gossip part of the Tendermint protocol to inform peers about a current +height/round/step a process is in. + +| Name | Type | Description | Field Number | +|--------------------------|--------|----------------------------------------|--------------| +| height | int64 | Height of corresponding block | 1 | +| round | int32 | Round of voting to finalize the block. | 2 | +| step | uint32 | | 3 | +| seconds_since_start_time | int64 | | 4 | +| last_commit_round | int32 | | 5 | + +### NewValidBlock + +NewValidBlock is sent when a validator observes a valid block B in some round r, +i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +It contains height and round in which valid block is observed, block parts header that describes +the valid block and is used to obtain all +block parts, and a bit array of the block parts a process currently has, so its peers can know what +parts it is missing so they can send them. +In case the block is also committed, then IsCommit flag is set to true. + +| Name | Type | Description | Field Number | +|-----------------------|--------------------------------------------------------------|----------------------------------------|--------------| +| height | int64 | Height of corresponding block | 1 | +| round | int32 | Round of voting to finalize the block. | 2 | +| block_part_set_header | [PartSetHeader](../../core/data_structures.md#partsetheader) | | 3 | +| block_parts | int32 | | 4 | +| is_commit | bool | | 5 | + +### ProposalPOL + +ProposalPOL is sent when a previous block is re-proposed. +It is used to inform peers in what round the process learned for this block (ProposalPOLRound), +and what prevotes for the re-proposed block the process has. + +| Name | Type | Description | Field Number | +|--------------------|----------|-------------------------------|--------------| +| height | int64 | Height of corresponding block | 1 | +| proposal_pol_round | int32 | | 2 | +| proposal_pol | bitarray | | 3 | + +### ReceivedVote + +ReceivedVote is sent to indicate that a particular vote has been received. It contains height, +round, vote type and the index of the validator that is the originator of the corresponding vote. + +| Name | Type | Description | Field Number | +|--------|------------------------------------------------------------------|----------------------------------------|--------------| +| height | int64 | Height of corresponding block | 1 | +| round | int32 | Round of voting to finalize the block. | 2 | +| type | [SignedMessageType](../../core/data_structures.md#signedmsgtype) | | 3 | +| index | int32 | | 4 | + +### VoteSetMaj23 + +VoteSetMaj23 is sent to indicate that a process has seen +2/3 votes for some BlockID. +It contains height, round, vote type and the BlockID. + +| Name | Type | Description | Field Number | +|--------|------------------------------------------------------------------|----------------------------------------|--------------| +| height | int64 | Height of corresponding block | 1 | +| round | int32 | Round of voting to finalize the block. | 2 | +| type | [SignedMessageType](../../core/data_structures.md#signedmsgtype) | | 3 | + +### VoteSetBits + +VoteSetBits is sent to communicate the bit-array of votes a process has seen for a given +BlockID. It contains height, round, vote type, BlockID and a bit array of +the votes a process has. + +| Name | Type | Description | Field Number | +|----------|------------------------------------------------------------------|----------------------------------------|--------------| +| height | int64 | Height of corresponding block | 1 | +| round | int32 | Round of voting to finalize the block. | 2 | +| type | [SignedMessageType](../../core/data_structures.md#signedmsgtype) | | 3 | +| block_id | [BlockID](../../core/data_structures.md#blockid) | | 4 | +| votes | BitArray | Round of voting to finalize the block. | 5 | + +### Message + +Message is a [`oneof` protobuf type](https://developers.google.com/protocol-buffers/docs/proto#oneof). + +| Name | Type | Description | Field Number | +|-----------------|---------------------------------|----------------------------------------|--------------| +| new_round_step | [NewRoundStep](#newroundstep) | Height of corresponding block | 1 | +| new_valid_block | [NewValidBlock](#newvalidblock) | Round of voting to finalize the block. | 2 | +| proposal | [Proposal](#proposal) | | 3 | +| proposal_pol | [ProposalPOL](#proposalpol) | | 4 | +| block_part | [BlockPart](#blockpart) | | 5 | +| vote | [Vote](#vote) | | 6 | +| received_vote | [ReceivedVote](#ReceivedVote) | | 7 | +| vote_set_maj23 | [VoteSetMaj23](#votesetmaj23) | | 8 | +| vote_set_bits | [VoteSetBits](#votesetbits) | | 9 | diff --git a/spec/p2p/messages/evidence.md b/spec/p2p/messages/evidence.md new file mode 100644 index 0000000000..84dfe258a1 --- /dev/null +++ b/spec/p2p/messages/evidence.md @@ -0,0 +1,23 @@ +--- +order: 3 +--- + +# Evidence + +## Channel + +Evidence has one channel. The channel identifier is listed below. + +| Name | Number | +|-----------------|--------| +| EvidenceChannel | 56 | + +## Message Types + +### Evidence + +Verified evidence that has already been propagated throughout the network. This evidence will appear within the EvidenceList struct of a [block](../../core/data_structures.md#block) as well. + +| Name | Type | Description | Field Number | +|----------|-------------------------------------------------------------|------------------------|--------------| +| evidence | [Evidence](../../core/data_structures.md#evidence) | Valid evidence | 1 | diff --git a/spec/p2p/messages/mempool.md b/spec/p2p/messages/mempool.md new file mode 100644 index 0000000000..8f3925cad5 --- /dev/null +++ b/spec/p2p/messages/mempool.md @@ -0,0 +1,33 @@ +--- +order: 4 +--- +# Mempool + +## Channel + +Mempool has one channel. The channel identifier is listed below. + +| Name | Number | +|----------------|--------| +| MempoolChannel | 48 | + +## Message Types + +There is currently only one message that Mempool broadcasts and receives over +the p2p gossip network (via the reactor): `TxsMessage` + +### Txs + +A list of transactions. These transactions have been checked against the application for validity. This does not mean that the transactions are valid, it is up to the application to check this. + +| Name | Type | Description | Field Number | +|------|----------------|----------------------|--------------| +| txs | repeated bytes | List of transactions | 1 | + +### Message + +Message is a [`oneof` protobuf type](https://developers.google.com/protocol-buffers/docs/proto#oneof). The one of consists of one message [`Txs`](#txs). + +| Name | Type | Description | Field Number | +|------|-------------|-----------------------|--------------| +| txs | [Txs](#txs) | List of transactions | 1 | diff --git a/spec/p2p/messages/pex.md b/spec/p2p/messages/pex.md new file mode 100644 index 0000000000..e02393d52d --- /dev/null +++ b/spec/p2p/messages/pex.md @@ -0,0 +1,47 @@ +--- +order: 6 +--- + +# Peer Exchange + +## Channels + +Pex has one channel. The channel identifier is listed below. + +| Name | Number | +|------------|--------| +| PexChannel | 0 | + +## Message Types + +### PexRequest + +PexRequest is an empty message requesting a list of peers. + +> EmptyRequest + +### PexResponse + +PexResponse is an list of net addresses provided to a peer to dial. + +| Name | Type | Description | Field Number | +|-------|------------------------------------|------------------------------------------|--------------| +| addresses | repeated [PexAddress](#pexaddress) | List of peer addresses available to dial | 1 | + +### PexAddress + +PexAddress provides needed information for a node to dial a peer. This is in the form of a `URL` that gets parsed +into a `NodeAddress`. See [ParseNodeAddress](https://github.com/tendermint/tendermint/blob/f2a8f5e054cf99ebe246818bb6d71f41f9a30faa/internal/p2p/address.go#L43) for more details. + +| Name | Type | Description | Field Number | +|------|--------|------------------|--------------| +| url | string | See [golang url](https://golang.org/pkg/net/url/#URL) | 1 | + +### Message + +Message is a [`oneof` protobuf type](https://developers.google.com/protocol-buffers/docs/proto#oneof). The one of consists of two messages. + +| Name | Type | Description | Field Number | +|--------------|-----------------------------|------------------------------------------------------|--------------| +| pex_request | [PexRequest](#pexrequest) | Empty request asking for a list of addresses to dial | 3 | +| pex_response | [PexResponse](#pexresponse) | List of addresses to dial | 4 | diff --git a/spec/p2p/messages/state-sync.md b/spec/p2p/messages/state-sync.md new file mode 100644 index 0000000000..2aa5618bc2 --- /dev/null +++ b/spec/p2p/messages/state-sync.md @@ -0,0 +1,134 @@ +--- +order: 5 +--- + +# State Sync + +## Channels + +State sync has four distinct channels. The channel identifiers are listed below. + +| Name | Number | +|-------------------|--------| +| SnapshotChannel | 96 | +| ChunkChannel | 97 | +| LightBlockChannel | 98 | +| ParamsChannel | 99 | + +## Message Types + +### SnapshotRequest + +When a new node begin state syncing, it will ask all peers it encounters if it has any +available snapshots: + +| Name | Type | Description | Field Number | +|----------|--------|-------------|--------------| + +### SnapShotResponse + +The receiver will query the local ABCI application via `ListSnapshots`, and send a message +containing snapshot metadata (limited to 4 MB) for each of the 10 most recent snapshots: and stored at the application layer. When a peer is starting it will request snapshots. + +| Name | Type | Description | Field Number | +|----------|--------|-----------------------------------------------------------|--------------| +| height | uint64 | Height at which the snapshot was taken | 1 | +| format | uint32 | Format of the snapshot. | 2 | +| chunks | uint32 | How many chunks make up the snapshot | 3 | +| hash | bytes | Arbitrary snapshot hash | 4 | +| metadata | bytes | Arbitrary application data. **May be non-deterministic.** | 5 | + +### ChunkRequest + +The node running state sync will offer these snapshots to the local ABCI application via +`OfferSnapshot` ABCI calls, and keep track of which peers contain which snapshots. Once a snapshot +is accepted, the state syncer will request snapshot chunks from appropriate peers: + +| Name | Type | Description | Field Number | +|--------|--------|-------------------------------------------------------------|--------------| +| height | uint64 | Height at which the chunk was created | 1 | +| format | uint32 | Format chosen for the chunk. **May be non-deterministic.** | 2 | +| index | uint32 | Index of the chunk within the snapshot. | 3 | + +### ChunkResponse + +The receiver will load the requested chunk from its local application via `LoadSnapshotChunk`, +and respond with it (limited to 16 MB): + +| Name | Type | Description | Field Number | +|---------|--------|-------------------------------------------------------------|--------------| +| height | uint64 | Height at which the chunk was created | 1 | +| format | uint32 | Format chosen for the chunk. **May be non-deterministic.** | 2 | +| index | uint32 | Index of the chunk within the snapshot. | 3 | +| hash | bytes | Arbitrary snapshot hash | 4 | +| missing | bool | Arbitrary application data. **May be non-deterministic.** | 5 | + +Here, `Missing` is used to signify that the chunk was not found on the peer, since an empty +chunk is a valid (although unlikely) response. + +The returned chunk is given to the ABCI application via `ApplySnapshotChunk` until the snapshot +is restored. If a chunk response is not returned within some time, it will be re-requested, +possibly from a different peer. + +The ABCI application is able to request peer bans and chunk refetching as part of the ABCI protocol. + +### LightBlockRequest + +To verify state and to provide state relevant information for consensus, the node will ask peers for +light blocks at specified heights. + +| Name | Type | Description | Field Number | +|----------|--------|----------------------------|--------------| +| height | uint64 | Height of the light block | 1 | + +### LightBlockResponse + +The receiver will retrieve and construct the light block from both the block and state stores. The +receiver will verify the data by comparing the hashes and store the header, commit and validator set +if necessary. The light block at the height of the snapshot will be used to verify the `AppHash`. + +| Name | Type | Description | Field Number | +|---------------|---------------------------------------------------------|--------------------------------------|--------------| +| light_block | [LightBlock](../../core/data_structures.md#lightblock) | Light block at the height requested | 1 | + +State sync will use [light client +verification](https://github.com/tendermint/tendermint/blob/master/docs/tendermint-core/light-client.md) +to verify the light blocks. + + +If no state sync is in progress (i.e. during normal operation), any unsolicited response messages +are discarded. + +### ParamsRequest + +In order to build tendermint state, the state provider will request the params at the height of the snapshot and use the header to verify it. + +| Name | Type | Description | Field Number | +|----------|--------|----------------------------|--------------| +| height | uint64 | Height of the consensus params | 1 | + + +### ParamsResponse + +A reciever to the request will use the state store to fetch the consensus params at that height and return it to the sender. + +| Name | Type | Description | Field Number | +|----------|--------|---------------------------------|--------------| +| height | uint64 | Height of the consensus params | 1 | +| consensus_params | [ConsensusParams](../../core/data_structures.md#ConsensusParams) | Consensus params at the height requested | 2 | + + +### Message + +Message is a [`oneof` protobuf type](https://developers.google.com/protocol-buffers/docs/proto#oneof). The `oneof` consists of eight messages. + +| Name | Type | Description | Field Number | +|----------------------|--------------------------------------------|----------------------------------------------|--------------| +| snapshots_request | [SnapshotRequest](#snapshotrequest) | Request a recent snapshot from a peer | 1 | +| snapshots_response | [SnapshotResponse](#snapshotresponse) | Respond with the most recent snapshot stored | 2 | +| chunk_request | [ChunkRequest](#chunkrequest) | Request chunks of the snapshot. | 3 | +| chunk_response | [ChunkRequest](#chunkresponse) | Response of chunks used to recreate state. | 4 | +| light_block_request | [LightBlockRequest](#lightblockrequest) | Request a light block. | 5 | +| light_block_response | [LightBlockResponse](#lightblockresponse) | Respond with a light block | 6 | +| params_request | [ParamsRequest](#paramsrequest) | Request the consensus params at a height. | 7 | +| params_response | [ParamsResponse](#paramsresponse) | Respond with the consensus params | 8 | diff --git a/spec/p2p/node.md b/spec/p2p/node.md new file mode 100644 index 0000000000..e056c14af4 --- /dev/null +++ b/spec/p2p/node.md @@ -0,0 +1,65 @@ +# Peer Discovery + +A Tendermint P2P network has different kinds of nodes with different requirements for connectivity to one another. +This document describes what kind of nodes Tendermint should enable and how they should work. + +## Seeds + +Seeds are the first point of contact for a new node. +They return a list of known active peers and then disconnect. + +Seeds should operate full nodes with the PEX reactor in a "crawler" mode +that continuously explores to validate the availability of peers. + +Seeds should only respond with some top percentile of the best peers it knows about. + +## New Full Node + +A new node needs a few things to connect to the network: + +- a list of seeds, which can be provided to Tendermint via config file or flags, + or hardcoded into the software by in-process apps +- a `ChainID`, also called `Network` at the p2p layer +- a recent block height, H, and hash, HASH for the blockchain. + +The values `H` and `HASH` must be received and corroborated by means external to Tendermint, and specific to the user - ie. via the user's trusted social consensus. +This requirement to validate `H` and `HASH` out-of-band and via social consensus +is the essential difference in security models between Proof-of-Work and Proof-of-Stake blockchains. + +With the above, the node then queries some seeds for peers for its chain, +dials those peers, and runs the Tendermint protocols with those it successfully connects to. + +When the peer catches up to height H, it ensures the block hash matches HASH. +If not, Tendermint will exit, and the user must try again - either they are connected +to bad peers or their social consensus is invalid. + +## Restarted Full Node + +A node checks its address book on startup and attempts to connect to peers from there. +If it can't connect to any peers after some time, it falls back to the seeds to find more. + +Restarted full nodes can run the `blockchain` or `consensus` reactor protocols to sync up +to the latest state of the blockchain from wherever they were last. +In a Proof-of-Stake context, if they are sufficiently far behind (greater than the length +of the unbonding period), they will need to validate a recent `H` and `HASH` out-of-band again +so they know they have synced the correct chain. + +## Validator Node + +A validator node is a node that interfaces with a validator signing key. +These nodes require the highest security, and should not accept incoming connections. +They should maintain outgoing connections to a controlled set of "Sentry Nodes" that serve +as their proxy shield to the rest of the network. + +Validators that know and trust each other can accept incoming connections from one another and maintain direct private connectivity via VPN. + +## Sentry Node + +Sentry nodes are guardians of a validator node and provide it access to the rest of the network. +They should be well connected to other full nodes on the network. +Sentry nodes may be dynamic, but should maintain persistent connections to some evolving random subset of each other. +They should always expect to have direct incoming connections from the validator node and its backup(s). +They do not report the validator node's address in the PEX and +they may be more strict about the quality of peers they keep. + +Sentry nodes belonging to validators that trust each other may wish to maintain persistent connections via VPN with one another, but only report each other sparingly in the PEX. diff --git a/spec/p2p/peer.md b/spec/p2p/peer.md new file mode 100644 index 0000000000..292cbebfa5 --- /dev/null +++ b/spec/p2p/peer.md @@ -0,0 +1,130 @@ +# Peers + +This document explains how Tendermint Peers are identified and how they connect to one another. + +For details on peer discovery, see the [peer exchange (PEX) doc](https://github.com/tendermint/tendermint/blob/master/docs/tendermint-core/pex/README.md). + +## Peer Identity + +Tendermint peers are expected to maintain long-term persistent identities in the form of a public key. +Each peer has an ID defined as `peer.ID == peer.PubKey.Address()`, where `Address` uses the scheme defined in `crypto` package. + +A single peer ID can have multiple IP addresses associated with it, but a node +will only ever connect to one at a time. + +When attempting to connect to a peer, we use the PeerURL: `@:`. +We will attempt to connect to the peer at IP:PORT, and verify, +via authenticated encryption, that it is in possession of the private key +corresponding to ``. This prevents man-in-the-middle attacks on the peer layer. + +## Connections + +All p2p connections use TCP. +Upon establishing a successful TCP connection with a peer, +two handshakes are performed: one for authenticated encryption, and one for Tendermint versioning. +Both handshakes have configurable timeouts (they should complete quickly). + +### Authenticated Encryption Handshake + +Tendermint implements the Station-to-Station protocol +using X25519 keys for Diffie-Helman key-exchange and chacha20poly1305 for encryption. + +Previous versions of this protocol (0.32 and below) suffered from malleability attacks whereas an active man +in the middle attacker could compromise confidentiality as described in [Prime, Order Please! +Revisiting Small Subgroup and Invalid Curve Attacks on +Protocols using Diffie-Hellman](https://eprint.iacr.org/2019/526.pdf). + +We have added dependency on the Merlin a keccak based transcript hashing protocol to ensure non-malleability. + +It goes as follows: + +- generate an ephemeral X25519 keypair +- send the ephemeral public key to the peer +- wait to receive the peer's ephemeral public key +- create a new Merlin Transcript with the string "TENDERMINT_SECRET_CONNECTION_TRANSCRIPT_HASH" +- Sort the ephemeral keys and add the high labeled "EPHEMERAL_UPPER_PUBLIC_KEY" and the low keys labeled "EPHEMERAL_LOWER_PUBLIC_KEY" to the Merlin transcript. +- compute the Diffie-Hellman shared secret using the peers ephemeral public key and our ephemeral private key +- add the DH secret to the transcript labeled DH_SECRET. +- generate two keys to use for encryption (sending and receiving) and a challenge for authentication as follows: + - create a hkdf-sha256 instance with the key being the diffie hellman shared secret, and info parameter as + `TENDERMINT_SECRET_CONNECTION_KEY_AND_CHALLENGE_GEN` + - get 64 bytes of output from hkdf-sha256 + - if we had the smaller ephemeral pubkey, use the first 32 bytes for the key for receiving, the second 32 bytes for sending; else the opposite. +- use a separate nonce for receiving and sending. Both nonces start at 0, and should support the full 96 bit nonce range +- all communications from now on are encrypted in 1400 byte frames (plus encoding overhead), + using the respective secret and nonce. Each nonce is incremented by one after each use. +- we now have an encrypted channel, but still need to authenticate +- extract a 32 bytes challenge from merlin transcript with the label "SECRET_CONNECTION_MAC" +- sign the common challenge obtained from the hkdf with our persistent private key +- send the amino encoded persistent pubkey and signature to the peer +- wait to receive the persistent public key and signature from the peer +- verify the signature on the challenge using the peer's persistent public key + +If this is an outgoing connection (we dialed the peer) and we used a peer ID, +then finally verify that the peer's persistent public key corresponds to the peer ID we dialed, +ie. `peer.PubKey.Address() == `. + +The connection has now been authenticated. All traffic is encrypted. + +Note: only the dialer can authenticate the identity of the peer, +but this is what we care about since when we join the network we wish to +ensure we have reached the intended peer (and are not being MITMd). + +### Peer Filter + +Before continuing, we check if the new peer has the same ID as ourselves or +an existing peer. If so, we disconnect. + +We also check the peer's address and public key against +an optional whitelist which can be managed through the ABCI app - +if the whitelist is enabled and the peer does not qualify, the connection is +terminated. + +### Tendermint Version Handshake + +The Tendermint Version Handshake allows the peers to exchange their NodeInfo: + +```golang +type NodeInfo struct { + Version p2p.Version + ID p2p.ID + ListenAddr string + + Network string + SoftwareVersion string + Channels []int8 + + Moniker string + Other NodeInfoOther +} + +type Version struct { + P2P uint64 + Block uint64 + App uint64 +} + +type NodeInfoOther struct { + TxIndex string + RPCAddress string +} +``` + +The connection is disconnected if: + +- `peer.NodeInfo.ID` is not equal `peerConn.ID` +- `peer.NodeInfo.Version.Block` does not match ours +- `peer.NodeInfo.Network` is not the same as ours +- `peer.Channels` does not intersect with our known Channels. +- `peer.NodeInfo.ListenAddr` is malformed or is a DNS host that cannot be + resolved + +At this point, if we have not disconnected, the peer is valid. +It is added to the switch and hence all reactors via the `AddPeer` method. +Note that each reactor may handle multiple channels. + +## Connection Activity + +Once a peer is added, incoming messages for a given reactor are handled through +that reactor's `Receive` method, and output messages are sent directly by the Reactors +on each peer. A typical reactor maintains per-peer go-routine(s) that handle this. diff --git a/spec/p2p/readme.md b/spec/p2p/readme.md new file mode 100644 index 0000000000..96867aad05 --- /dev/null +++ b/spec/p2p/readme.md @@ -0,0 +1,6 @@ +--- +order: 1 +parent: + title: P2P + order: 6 +--- diff --git a/spec/rpc/README.md b/spec/rpc/README.md new file mode 100644 index 0000000000..68c200f0de --- /dev/null +++ b/spec/rpc/README.md @@ -0,0 +1,1382 @@ +--- +order: 1 +parent: + title: RPC + order: 6 +--- + +# RPC spec + +This file defines the JSON-RPC spec of Tendermint. This is meant to be implemented by all clients. + +## Support + + | | [Tendermint-Go](https://github.com/tendermint/tendermint/) | [Tendermint-Rs](https://github.com/informalsystems/tendermint-rs) | + |--------------|:-----------------------------------------------------------:|:-----------------------------------------------------------------:| + | JSON-RPC 2.0 | ✅ | ✅ | + | HTTP | ✅ | ✅ | + | HTTPS | ✅ | ❌ | + | WS | ✅ | ✅ | + + | Routes | [Tendermint-Go](https://github.com/tendermint/tendermint/) | [Tendermint-Rs](https://github.com/informalsystems/tendermint-rs) | + |-----------------------------------------|:----------------------------------------------------------:|:-----------------------------------------------------------------:| + | [Health](#health) | ✅ | ✅ | + | [Status](#status) | ✅ | ✅ | + | [NetInfo](#netinfo) | ✅ | ✅ | + | [Blockchain](#blockchain) | ✅ | ✅ | + | [Header](#header) | ✅ | ❌ | + | [HeaderByHash](#headerbyhash) | ✅ | ❌ | + | [Block](#block) | ✅ | ✅ | + | [BlockByHash](#blockbyhash) | ✅ | ❌ | + | [BlockResults](#blockresults) | ✅ | ✅ | + | [Commit](#commit) | ✅ | ✅ | + | [Validators](#validators) | ✅ | ✅ | + | [Genesis](#genesis) | ✅ | ✅ | + | [GenesisChunked](#genesischunked) | ✅ | ❌ | + | [ConsensusParams](#consensusparams) | ✅ | ❌ | + | [UnconfirmedTxs](#unconfirmedtxs) | ✅ | ❌ | + | [NumUnconfirmedTxs](#numunconfirmedtxs) | ✅ | ❌ | + | [Tx](#tx) | ✅ | ❌ | + | [BroadCastTxSync](#broadcasttxsync) | ✅ | ✅ | + | [BroadCastTxAsync](#broadcasttxasync) | ✅ | ✅ | + | [ABCIInfo](#abciinfo) | ✅ | ✅ | + | [ABCIQuery](#abciquery) | ✅ | ✅ | + | [BroadcastTxAsync](#broadcasttxasync) | ✅ | ✅ | + | [BroadcastEvidence](#broadcastevidence) | ✅ | ✅ | + +## Timestamps + +Timestamps in the RPC layer of Tendermint follows RFC3339Nano. The RFC3339Nano format removes trailing zeros from the seconds field. + +This means if a block has a timestamp like: `1985-04-12T23:20:50.5200000Z`, the value returned in the RPC will be `1985-04-12T23:20:50.52Z`. + + +## Info Routes + +### Health + +Node heartbeat + +#### Parameters + +None + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/health +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"health\"}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": -1, + "result": {} +} +``` + +### Status + +Get Tendermint status including node info, pubkey, latest block hash, app hash, block height and time. + +#### Parameters + +None + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/status +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"status\"}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": -1, + "result": { + "node_info": { + "protocol_version": { + "p2p": "8", + "block": "11", + "app": "0" + }, + "id": "b93270b358a72a2db30089f3856475bb1f918d6d", + "listen_addr": "tcp://0.0.0.0:26656", + "network": "cosmoshub-4", + "version": "v0.34.8", + "channels": "40202122233038606100", + "moniker": "aib-hub-node", + "other": { + "tx_index": "on", + "rpc_address": "tcp://0.0.0.0:26657" + } + }, + "sync_info": { + "latest_block_hash": "50F03C0EAACA8BCA7F9C14189ACE9C05A9A1BBB5268DB63DC6A3C848D1ECFD27", + "latest_app_hash": "2316CFF7644219F4F15BEE456435F280E2B38955EEA6D4617CCB6D7ABF781C22", + "latest_block_height": "5622165", + "latest_block_time": "2021-03-25T14:00:43.356134226Z", + "earliest_block_hash": "1455A0C15AC49BB506992EC85A3CD4D32367E53A087689815E01A524231C3ADF", + "earliest_app_hash": "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", + "earliest_block_height": "5200791", + "earliest_block_time": "2019-12-11T16:11:34Z", + "catching_up": false + }, + "validator_info": { + "address": "38FB765D0092470989360ECA1C89CD06C2C1583C", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "Z+8kntVegi1sQiWLYwFSVLNWqdAUGEy7lskL78gxLZI=" + }, + "voting_power": "0" + } + } +} +``` + +### NetInfo + +Network information + +#### Parameters + +None + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/net_info +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"net_info\"}" +``` + +#### Response + +```json +{ + "id": 0, + "jsonrpc": "2.0", + "result": { + "listening": true, + "listeners": [ + "Listener(@)" + ], + "n_peers": "1", + "peers": [ + { + "node_id": "5576458aef205977e18fd50b274e9b5d9014525a", + "url": "tcp://5576458aef205977e18fd50b274e9b5d9014525a@95.179.155.35:26656" + } + ] + } +} +``` + +### Blockchain + +Get block headers. Returned in descending order. May be limited in quantity. + +#### Parameters + +- `minHeight (integer)`: The lowest block to be returned in the response +- `maxHeight (integer)`: The highest block to be returned in the response + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/blockchain + +curl http://127.0.0.1:26657/blockchain?minHeight=1&maxHeight=2 +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"blockchain\",\"params\":{\"minHeight\":\"1\", \"maxHeight\":\"2\"}}" +``` + +#### Response + +```json +{ + "id": 0, + "jsonrpc": "2.0", + "result": { + "last_height": "1276718", + "block_metas": [ + { + "block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "block_size": 1000000, + "header": { + "version": { + "block": "10", + "app": "0" + }, + "chain_id": "cosmoshub-2", + "height": "12", + "time": "2019-04-22T17:01:51.701356223Z", + "last_block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "last_commit_hash": "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812", + "data_hash": "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73", + "validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "next_validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "consensus_hash": "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8", + "app_hash": "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C", + "last_results_hash": "", + "evidence_hash": "", + "proposer_address": "D540AB022088612AC74B287D076DBFBC4A377A2E" + }, + "num_txs": "54" + } + ] + } +} +``` + +### Header + +Get a header at a specified height. + +#### Parameters + +- `height (integer)`: height of the requested header. If no height is specified the latest height will be used. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/header + +curl http://127.0.0.1:26657/header?height=1 +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"header\",\"params\":{\"height\":\"1\"}}" +``` + +#### Response + +```json +{ + "id": 0, + "jsonrpc": "2.0", + "result": { + "header": { + "version": { + "block": "10", + "app": "0" + }, + "chain_id": "cosmoshub-2", + "height": "12", + "time": "2019-04-22T17:01:51.701356223Z", + "last_block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "last_commit_hash": "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812", + "data_hash": "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73", + "validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "next_validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "consensus_hash": "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8", + "app_hash": "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C", + "last_results_hash": "", + "evidence_hash": "", + "proposer_address": "D540AB022088612AC74B287D076DBFBC4A377A2E" + } + } +} +``` + +### HeaderByHash + +#### Parameters + +- `hash (string)`: Hash of the header to query for. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/header_by_hash?hash=0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"header_by_hash\",\"params\":{\"hash\":\"0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED\"}}" +``` + +#### Response + +```json +{ + "id": 0, + "jsonrpc": "2.0", + "result": { + "header": { + "version": { + "block": "10", + "app": "0" + }, + "chain_id": "cosmoshub-2", + "height": "12", + "time": "2019-04-22T17:01:51.701356223Z", + "last_block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "last_commit_hash": "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812", + "data_hash": "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73", + "validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "next_validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "consensus_hash": "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8", + "app_hash": "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C", + "last_results_hash": "", + "evidence_hash": "", + "proposer_address": "D540AB022088612AC74B287D076DBFBC4A377A2E" + } + } + } +} +``` + +### Block + +Get block at a specified height. + +#### Parameters + +- `height (integer)`: height of the requested block. If no height is specified the latest height will be used. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/block + +curl http://127.0.0.1:26657/block?height=1 +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"block\",\"params\":{\"height\":\"1\"}}" +``` + +#### Response + +```json +{ + "id": 0, + "jsonrpc": "2.0", + "result": { + "block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "block": { + "header": { + "version": { + "block": "10", + "app": "0" + }, + "chain_id": "cosmoshub-2", + "height": "12", + "time": "2019-04-22T17:01:51.701356223Z", + "last_block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "last_commit_hash": "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812", + "data_hash": "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73", + "validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "next_validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "consensus_hash": "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8", + "app_hash": "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C", + "last_results_hash": "", + "evidence_hash": "", + "proposer_address": "D540AB022088612AC74B287D076DBFBC4A377A2E" + }, + "data": [ + "yQHwYl3uCkKoo2GaChRnd+THLQ2RM87nEZrE19910Z28ABIUWW/t8AtIMwcyU0sT32RcMDI9GF0aEAoFdWF0b20SBzEwMDAwMDASEwoNCgV1YXRvbRIEMzEwMRCd8gEaagom61rphyEDoJPxlcjRoNDtZ9xMdvs+lRzFaHe2dl2P5R2yVCWrsHISQKkqX5H1zXAIJuC57yw0Yb03Fwy75VRip0ZBtLiYsUqkOsPUoQZAhDNP+6LY+RUwz/nVzedkF0S29NZ32QXdGv0=" + ], + "evidence": [ + { + "type": "string", + "height": 0, + "time": 0, + "total_voting_power": 0, + "validator": { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "A6DoBUypNtUAyEHWtQ9bFjfNg8Bo9CrnkUGl6k6OHN4=" + }, + "voting_power": 0, + "address": "string" + } + } + ], + "last_commit": { + "height": 0, + "round": 0, + "block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "signatures": [ + { + "type": 2, + "height": "1262085", + "round": 0, + "block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "timestamp": "2019-08-01T11:39:38.867269833Z", + "validator_address": "000001E443FD237E4B616E2FA69DF4EE3D49A94F", + "validator_index": 0, + "signature": "DBchvucTzAUEJnGYpNvMdqLhBAHG4Px8BsOBB3J3mAFCLGeuG7uJqy+nVngKzZdPhPi8RhmE/xcw/M9DOJjEDg==" + } + ] + } + } + } +} +``` + +### BlockByHash + +#### Parameters + +- `hash (string)`: Hash of the block to query for. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/block_by_hash?hash=0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"block_by_hash\",\"params\":{\"hash\":\"0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED\"}}" +``` + +#### Response + +```json +{ + "id": 0, + "jsonrpc": "2.0", + "result": { + "block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "block": { + "header": { + "version": { + "block": "10", + "app": "0" + }, + "chain_id": "cosmoshub-2", + "height": "12", + "time": "2019-04-22T17:01:51.701356223Z", + "last_block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "last_commit_hash": "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812", + "data_hash": "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73", + "validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "next_validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "consensus_hash": "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8", + "app_hash": "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C", + "last_results_hash": "", + "evidence_hash": "", + "proposer_address": "D540AB022088612AC74B287D076DBFBC4A377A2E" + }, + "data": [ + "yQHwYl3uCkKoo2GaChRnd+THLQ2RM87nEZrE19910Z28ABIUWW/t8AtIMwcyU0sT32RcMDI9GF0aEAoFdWF0b20SBzEwMDAwMDASEwoNCgV1YXRvbRIEMzEwMRCd8gEaagom61rphyEDoJPxlcjRoNDtZ9xMdvs+lRzFaHe2dl2P5R2yVCWrsHISQKkqX5H1zXAIJuC57yw0Yb03Fwy75VRip0ZBtLiYsUqkOsPUoQZAhDNP+6LY+RUwz/nVzedkF0S29NZ32QXdGv0=" + ], + "evidence": [ + { + "type": "string", + "height": 0, + "time": 0, + "total_voting_power": 0, + "validator": { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "A6DoBUypNtUAyEHWtQ9bFjfNg8Bo9CrnkUGl6k6OHN4=" + }, + "voting_power": 0, + "address": "string" + } + } + ], + "last_commit": { + "height": 0, + "round": 0, + "block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "signatures": [ + { + "type": 2, + "height": "1262085", + "round": 0, + "block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "timestamp": "2019-08-01T11:39:38.867269833Z", + "validator_address": "000001E443FD237E4B616E2FA69DF4EE3D49A94F", + "validator_index": 0, + "signature": "DBchvucTzAUEJnGYpNvMdqLhBAHG4Px8BsOBB3J3mAFCLGeuG7uJqy+nVngKzZdPhPi8RhmE/xcw/M9DOJjEDg==" + } + ] + } + } + } +} +``` + +### BlockResults + +### Parameters + +- `height (integer)`: Height of the block which contains the results. If no height is specified, the latest block height will be used + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/block_results + + +curl http://127.0.0.1:26657/block_results?height=1 +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"block_results\",\"params\":{\"height\":\"1\"}}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "height": "12", + "total_gas_used": "100", + "txs_results": [ + { + "code": "0", + "data": "", + "log": "not enough gas", + "info": "", + "gas_wanted": "100", + "gas_used": "100", + "events": [ + { + "type": "app", + "attributes": [ + { + "key": "YWN0aW9u", + "value": "c2VuZA==", + "index": false + } + ] + } + ], + "codespace": "ibc" + } + ], + "begin_block_events": [ + { + "type": "app", + "attributes": [ + { + "key": "YWN0aW9u", + "value": "c2VuZA==", + "index": false + } + ] + } + ], + "end_block": [ + { + "type": "app", + "attributes": [ + { + "key": "YWN0aW9u", + "value": "c2VuZA==", + "index": false + } + ] + } + ], + "validator_updates": [ + { + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM=" + }, + "power": "300" + } + ], + "consensus_params_updates": { + "block": { + "max_bytes": "22020096", + "max_gas": "1000", + "time_iota_ms": "1000" + }, + "evidence": { + "max_age": "100000" + }, + "validator": { + "pub_key_types": [ + "ed25519" + ] + } + } + } +} +``` + +### Commit + +#### Parameters + +- `height (integer)`: Height of the block the requested commit pertains to. If no height is set the latest commit will be returned. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/commit + + +curl http://127.0.0.1:26657/commit?height=1 +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"commit\",\"params\":{\"height\":\"1\"}}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "signed_header": { + "header": { + "version": { + "block": "10", + "app": "0" + }, + "chain_id": "cosmoshub-2", + "height": "12", + "time": "2019-04-22T17:01:51.701356223Z", + "last_block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "last_commit_hash": "21B9BC845AD2CB2C4193CDD17BFC506F1EBE5A7402E84AD96E64171287A34812", + "data_hash": "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73", + "validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "next_validators_hash": "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0", + "consensus_hash": "0F2908883A105C793B74495EB7D6DF2EEA479ED7FC9349206A65CB0F9987A0B8", + "app_hash": "223BF64D4A01074DC523A80E76B9BBC786C791FB0A1893AC5B14866356FCFD6C", + "last_results_hash": "", + "evidence_hash": "", + "proposer_address": "D540AB022088612AC74B287D076DBFBC4A377A2E" + }, + "commit": { + "height": "1311801", + "round": 0, + "block_id": { + "hash": "112BC173FD838FB68EB43476816CD7B4C6661B6884A9E357B417EE957E1CF8F7", + "parts": { + "total": 1, + "hash": "38D4B26B5B725C4F13571EFE022C030390E4C33C8CF6F88EDD142EA769642DBD" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "000001E443FD237E4B616E2FA69DF4EE3D49A94F", + "timestamp": "2019-04-22T17:01:58.376629719Z", + "signature": "14jaTQXYRt8kbLKEhdHq7AXycrFImiLuZx50uOjs2+Zv+2i7RTG/jnObD07Jo2ubZ8xd7bNBJMqkgtkd0oQHAw==" + } + ] + } + }, + "canonical": true + } +} +``` + +### Validators + +#### Parameters + +- `height (integer)`: Block height at which the validators were present on. If no height is set the latest commit will be returned. +- `page (integer)`: +- `per_page (integer)`: + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/validators +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"validators\",\"params\":{\"height\":\"1\", \"page\":\"1\", \"per_page\":\"20\"}}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "block_height": "55", + "validators": [ + { + "address": "000001E443FD237E4B616E2FA69DF4EE3D49A94F", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "9tK9IT+FPdf2qm+5c2qaxi10sWP+3erWTKgftn2PaQM=" + }, + "voting_power": "239727", + "proposer_priority": "-11896414" + } + ], + "count": "1", + "total": "25" + } +} +``` + +### Genesis + +Get Genesis of the chain. If the response is large, this operation +will return an error: use `genesis_chunked` instead. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/genesis +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"genesis\"}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "genesis": { + "genesis_time": "2019-04-22T17:00:00Z", + "chain_id": "cosmoshub-2", + "initial_height": "2", + "consensus_params": { + "block": { + "max_bytes": "22020096", + "max_gas": "1000", + "time_iota_ms": "1000" + }, + "evidence": { + "max_age": "100000" + }, + "validator": { + "pub_key_types": [ + "ed25519" + ] + } + }, + "validators": [ + { + "address": "B00A6323737F321EB0B8D59C6FD497A14B60938A", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "cOQZvh/h9ZioSeUMZB/1Vy1Xo5x2sjrVjlE/qHnYifM=" + }, + "power": "9328525", + "name": "Certus One" + } + ], + "app_hash": "", + "app_state": {} + } + } +} +``` + +### GenesisChunked + +Get the genesis document in a chunks to support easily transfering larger documents. + +#### Parameters + +- `chunk` (integer): the index number of the chunk that you wish to + fetch. These IDs are 0 indexed. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/genesis_chunked?chunk=0 +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"genesis_chunked\",\"params\":{\"chunk\":0}}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "chunk": 0, + "total": 10, + "data": "dGVuZGVybWludAo=" + } +} +``` + +### ConsensusParams + +Get the consensus parameters. + +#### Parameters + +- `height (integer)`: Block height at which the consensus params would like to be fetched for. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/consensus_params +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"consensus_params\"}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "block_height": "1", + "consensus_params": { + "block": { + "max_bytes": "22020096", + "max_gas": "1000", + "time_iota_ms": "1000" + }, + "evidence": { + "max_age": "100000" + }, + "validator": { + "pub_key_types": [ + "ed25519" + ] + } + } + } +} +``` + +### UnconfirmedTxs + +Get a list of unconfirmed transactions. + +#### Parameters + +- `limit (integer)` The amount of txs to respond with. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/unconfirmed_txs +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"unconfirmed_txs\, \"params\":{\"limit\":\"20\"}}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "n_txs": "82", + "total": "82", + "total_bytes": "19974", + "txs": [ + "gAPwYl3uCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUA75/FmYq9WymsOBJ0XSJ8yV8zmQKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhQbrvwbvlNiT+Yjr86G+YQNx7kRVgowjE1xDQoUjJyJG+WaWBwSiGannBRFdrbma+8SFK2m+1oxgILuQLO55n8mWfnbIzyPCjCMTXENChSMnIkb5ZpYHBKIZqecFEV2tuZr7xIUQNGfkmhTNMis4j+dyMDIWXdIPiYKMIxNcQ0KFIyciRvlmlgcEohmp5wURXa25mvvEhS8sL0D0wwgGCItQwVowak5YB38KRIUCg4KBXVhdG9tEgUxMDA1NBDoxRgaagom61rphyECn8x7emhhKdRCB2io7aS/6Cpuq5NbVqbODmqOT3jWw6kSQKUresk+d+Gw0BhjiggTsu8+1voW+VlDCQ1GRYnMaFOHXhyFv7BCLhFWxLxHSAYT8a5XqoMayosZf9mANKdXArA=" + ] + } +} +``` + +### NumUnconfirmedTxs + +Get data about unconfirmed transactions. + +#### Parameters + +None + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/num_unconfirmed_txs +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"num_unconfirmed_txs\"}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "n_txs": "31", + "total": "82", + "total_bytes": "19974" + } +} +``` + +### Tx + +#### Parameters + +- `hash (string)`: The hash of the transaction +- `prove (bool)`: If the response should include proof the transaction was included in a block. + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/num_unconfirmed_txs +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"num_unconfirmed_txs\"}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "hash": "D70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED", + "height": "1000", + "index": 0, + "tx_result": { + "log": "[{\"msg_index\":\"0\",\"success\":true,\"log\":\"\"}]", + "gas_wanted": "200000", + "gas_used": "28596", + "tags": [ + { + "key": "YWN0aW9u", + "value": "c2VuZA==", + "index": false + } + ] + }, + "tx": "5wHwYl3uCkaoo2GaChQmSIu8hxpJxLcCuIi8fiHN4TMwrRIU/Af1cEG7Rcs/6LjTl7YjRSymJfYaFAoFdWF0b20SCzE0OTk5OTk1MDAwEhMKDQoFdWF0b20SBDUwMDAQwJoMGmoKJuta6YchAwswBShaB1wkZBctLIhYqBC3JrAI28XGzxP+rVEticGEEkAc+khTkKL9CDE47aDvjEHvUNt+izJfT4KVF2v2JkC+bmlH9K08q3PqHeMI9Z5up+XMusnTqlP985KF+SI5J3ZOIhhNYWRlIGJ5IENpcmNsZSB3aXRoIGxvdmU=" + } +} +``` + +## Transaction Routes + +### BroadCastTxSync + +Returns with the response from CheckTx. Does not wait for DeliverTx result. + +#### Parameters + +- `tx (string)`: The transaction encoded + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/broadcast_tx_sync?tx=encoded_tx +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"broadcast_tx_sync\",\"params\":{\"tx\":\"a/encoded_tx/c\"}}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "code": "0", + "data": "", + "log": "", + "codespace": "ibc", + "hash": "0D33F2F03A5234F38706E43004489E061AC40A2E" + }, + "error": "" +} +``` + +### BroadCastTxAsync + +Returns right away, with no response. Does not wait for CheckTx nor DeliverTx results. + +#### Parameters + +- `tx (string)`: The transaction encoded + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/broadcast_tx_async?tx=encoded_tx +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"broadcast_tx_async\",\"params\":{\"tx\":\"a/encoded_tx/c\"}}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "code": "0", + "data": "", + "log": "", + "codespace": "ibc", + "hash": "0D33F2F03A5234F38706E43004489E061AC40A2E" + }, + "error": "" +} +``` + +### CheckTx + +Checks the transaction without executing it. + +#### Parameters + +- `tx (string)`: String of the encoded transaction + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/check_tx?tx=encoded_tx +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"check_tx\",\"params\":{\"tx\":\"a/encoded_tx/c\"}}" +``` + +#### Response + +```json +{ + "id": 0, + "jsonrpc": "2.0", + "error": "", + "result": { + "code": "0", + "data": "", + "log": "", + "info": "", + "gas_wanted": "1", + "gas_used": "0", + "events": [ + { + "type": "app", + "attributes": [ + { + "key": "YWN0aW9u", + "value": "c2VuZA==", + "index": false + } + ] + } + ], + "codespace": "bank" + } +} +``` + +## ABCI Routes + +### ABCIInfo + +Get some info about the application. + +#### Parameters + +None + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/abci_info +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"abci_info\"}" +``` + +#### Response + +```json +{ + "jsonrpc": "2.0", + "id": 0, + "result": { + "response": { + "data": "{\"size\":0}", + "version": "0.16.1", + "app_version": "1314126" + } + } +} +``` + +### ABCIQuery + +Query the application for some information. + +#### Parameters + +- `path (string)`: Path to the data. This is defined by the application. +- `data (string)`: The data requested +- `height (integer)`: Height at which the data is being requested for. +- `prove (bool)`: Include proofs of the transactions inclusion in the block + +#### Request + +##### HTTP + +```sh +curl http://127.0.0.1:26657/abci_query?path="a/b/c"=IHAVENOIDEA&height=1&prove=true +``` + +##### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"abci_query\",\"params\":{\"path\":\"a/b/c\", \"height\":\"1\", \"bool\":\"true\"}}" +``` + +#### Response + +```json +{ + "error": "", + "result": { + "response": { + "log": "exists", + "height": "0", + "proof": "010114FED0DAD959F36091AD761C922ABA3CBF1D8349990101020103011406AA2262E2F448242DF2C2607C3CDC705313EE3B0001149D16177BC71E445476174622EA559715C293740C", + "value": "61626364", + "key": "61626364", + "index": "-1", + "code": "0" + } + }, + "id": 0, + "jsonrpc": "2.0" +} +``` + +## Evidence Routes + +### BroadcastEvidence + +Broadcast evidence of the misbehavior. + +#### Parameters + +- `evidence (string)`: + +#### Request + +##### HTTP + +```sh +curl http://localhost:26657/broadcast_evidence?evidence=JSON_EVIDENCE_encoded +``` + +#### JSONRPC + +```sh +curl -X POST https://localhost:26657 -d "{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"broadcast_evidence\",\"params\":{\"evidence\":\"JSON_EVIDENCE_encoded\"}}" +``` + +#### Response + +```json +{ + "error": "", + "result": "", + "id": 0, + "jsonrpc": "2.0" +} +``` diff --git a/test/Makefile b/test/Makefile index 3622cb4652..fcb6877230 100644 --- a/test/Makefile +++ b/test/Makefile @@ -3,6 +3,8 @@ ######################################## ### Testing +PACKAGES=$(shell go list ./...) + BINDIR ?= $(GOPATH)/bin ## required to be run first by most tests diff --git a/test/app/grpc_client.go b/test/app/grpc_client.go deleted file mode 100644 index 73022aaf83..0000000000 --- a/test/app/grpc_client.go +++ /dev/null @@ -1,42 +0,0 @@ -package main - -import ( - "encoding/hex" - "fmt" - "os" - - "context" - - tmjson "github.com/tendermint/tendermint/libs/json" - coregrpc "github.com/tendermint/tendermint/rpc/grpc" -) - -var grpcAddr = "tcp://localhost:36656" - -func main() { - args := os.Args - if len(args) == 1 { - fmt.Println("Must enter a transaction to send (hex)") - os.Exit(1) - } - tx := args[1] - txBytes, err := hex.DecodeString(tx) - if err != nil { - fmt.Println("Invalid hex", err) - os.Exit(1) - } - - clientGRPC := coregrpc.StartGRPCClient(grpcAddr) - res, err := clientGRPC.BroadcastTx(context.Background(), &coregrpc.RequestBroadcastTx{Tx: txBytes}) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - bz, err := tmjson.Marshal(res) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - fmt.Println(string(bz)) -} diff --git a/test/app/kvstore_test.sh b/test/app/kvstore_test.sh index 034e28878d..305a2926c6 100755 --- a/test/app/kvstore_test.sh +++ b/test/app/kvstore_test.sh @@ -57,7 +57,7 @@ echo "... testing query with /abci_query 2" # we should be able to look up the key RESPONSE=`curl -s "127.0.0.1:26657/abci_query?path=\"\"&data=$(toHex $KEY)&prove=false"` -RESPONSE=`echo $RESPONSE | jq .result.response.log` +RESPONSE=`echo $RESPONSE | jq .response.log` set +e A=`echo $RESPONSE | grep 'exists'` @@ -70,7 +70,7 @@ set -e # we should not be able to look up the value RESPONSE=`curl -s "127.0.0.1:26657/abci_query?path=\"\"&data=$(toHex $VALUE)&prove=false"` -RESPONSE=`echo $RESPONSE | jq .result.response.log` +RESPONSE=`echo $RESPONSE | jq .response.log` set +e A=`echo $RESPONSE | grep 'exists'` if [[ $? == 0 ]]; then diff --git a/test/app/test.sh b/test/app/test.sh index bce49c4993..c06227dd46 100755 --- a/test/app/test.sh +++ b/test/app/test.sh @@ -1,17 +1,26 @@ -#! /bin/bash -set -ex +#!/bin/bash +set -exo pipefail #- kvstore over socket, curl # TODO: install everything -GOPATH=$(go env GOPATH) export PATH="$GOBIN:$PATH" export TMHOME=$HOME/.tenderdash_app -function kvstore_over_socket(){ - rm -rf $TMHOME +function init_validator() { + rm -rf -- "$TMHOME" tenderdash init validator + + # The default configuration sets a null indexer, but these tests require + # indexing to be enabled. Rewrite the config file to set the "kv" indexer + # before starting up the node. + sed -i'' -e '/indexer = \["null"\]/c\ +indexer = ["kv"]' "$TMHOME/config/config.toml" +} + +function kvstore_over_socket() { + init_validator echo "Starting kvstore_over_socket" abci-cli kvstore > /dev/null & pid_kvstore=$! @@ -25,13 +34,12 @@ function kvstore_over_socket(){ kill -9 $pid_kvstore $pid_tenderdash } -# start tenderdash first -function kvstore_over_socket_reorder(){ - rm -rf $TMHOME - tenderdash init validator +# start tendermint first +function kvstore_over_socket_reorder() { + init_validator echo "Starting kvstore_over_socket_reorder (ie. start tenderdash first)" tenderdash start --mode validator > tenderdash.log & - pid_tenderdash=$! + pid_tendermint=$! sleep 2 abci-cli kvstore > /dev/null & pid_kvstore=$! diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index e9c775199b..28494ce6f0 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.16 +FROM golang:1.17 # Grab deps (jq, hexdump, xxd, killall) RUN apt-get update && \ diff --git a/test/docker/config-template.toml b/test/docker/config-template.toml index a90eb7bd5f..6ce39c9f86 100644 --- a/test/docker/config-template.toml +++ b/test/docker/config-template.toml @@ -1,2 +1,5 @@ [rpc] laddr = "tcp://0.0.0.0:26657" + +[tx-index] +indexer = ["kv"] diff --git a/test/e2e/README.md b/test/e2e/README.md index e5c21df43e..3ffb0f67f9 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -11,7 +11,7 @@ This creates and runs a testnet named `ci` under `networks/ci/`. ## Conceptual Overview -End-to-end testnets are used to test Tendermint functionality as a user would use it, by spinning up a set of nodes with various configurations and making sure the nodes and network behave correctly. The background for the E2E test suite is outlined in [RFC-001](https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-001-end-to-end-testing.md). +End-to-end testnets are used to test Tendermint functionality as a user would use it, by spinning up a set of nodes with various configurations and making sure the nodes and network behave correctly. The background for the E2E test suite is outlined in [RFC-001](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-066-e2e-testing.md). The end-to-end tests can be thought of in this manner: diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index aa4b83fefa..f0e9e612d7 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -2,20 +2,23 @@ package app import ( "bytes" + "context" "encoding/base64" + "encoding/binary" "encoding/hex" "errors" "fmt" + "math/rand" "path/filepath" "sort" "strconv" + "strings" + "sync" "github.com/gogo/protobuf/proto" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/crypto/bls12381" "github.com/tendermint/tendermint/crypto/encoding" @@ -23,14 +26,21 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" types1 "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) +const ( + voteExtensionKey string = "extensionSum" + voteExtensionMaxVal int64 = 128 +) + // Application is an ABCI application for use by end-to-end tests. It is a // simple key/value store for strings, storing data in memory and persisting // to disk as JSON, taking state sync snapshots if requested. type Application struct { abci.BaseApplication + mu sync.Mutex logger log.Logger state *State snapshots *SnapshotStore @@ -105,8 +115,13 @@ func NewApplication(cfg *Config) (*Application, error) { if err != nil { return nil, err } + logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) + if err != nil { + return nil, err + } + return &Application{ - logger: log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false), + logger: logger, state: state, snapshots: snapshots, cfg: cfg, @@ -114,20 +129,23 @@ func NewApplication(cfg *Config) (*Application, error) { } // Info implements ABCI. -func (app *Application) Info(req abci.RequestInfo) abci.ResponseInfo { - app.state.RLock() - defer app.state.RUnlock() +func (app *Application) Info(_ context.Context, req *abci.RequestInfo) (*abci.ResponseInfo, error) { + app.mu.Lock() + defer app.mu.Unlock() - return abci.ResponseInfo{ + return &abci.ResponseInfo{ Version: version.ABCIVersion, AppVersion: 1, LastBlockHeight: int64(app.state.Height), LastBlockAppHash: app.state.Hash, - } + }, nil } -// InitChain implements ABCI. -func (app *Application) InitChain(req abci.RequestInitChain) abci.ResponseInitChain { +// Info implements ABCI. +func (app *Application) InitChain(_ context.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) { + app.mu.Lock() + defer app.mu.Unlock() + var err error app.state.initialHeight = uint64(req.InitialHeight) if len(req.AppStateBytes) > 0 { @@ -136,7 +154,7 @@ func (app *Application) InitChain(req abci.RequestInitChain) abci.ResponseInitCh panic(err) } } - resp := abci.ResponseInitChain{ + resp := &abci.ResponseInitChain{ AppHash: app.state.Hash, ConsensusParams: &types1.ConsensusParams{ Version: &types1.VersionParams{ @@ -154,35 +172,45 @@ func (app *Application) InitChain(req abci.RequestInitChain) abci.ResponseInitCh if resp.NextCoreChainLockUpdate, err = app.chainLockUpdate(0); err != nil { panic(err) } - return resp + return resp, nil } // CheckTx implements ABCI. -func (app *Application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { +func (app *Application) CheckTx(_ context.Context, req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { + app.mu.Lock() + defer app.mu.Unlock() + _, _, err := parseTx(req.Tx) if err != nil { - return abci.ResponseCheckTx{ + return &abci.ResponseCheckTx{ Code: code.CodeTypeEncodingError, Log: err.Error(), - } + }, nil } - return abci.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1} + return &abci.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}, nil } -// DeliverTx implements ABCI. -func (app *Application) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { - key, value, err := parseTx(req.Tx) - if err != nil { - panic(err) // shouldn't happen since we verified it in CheckTx +// FinalizeBlock implements ABCI. +func (app *Application) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { + var txs = make([]*abci.ExecTxResult, len(req.Txs)) + + app.mu.Lock() + defer app.mu.Unlock() + + for i, tx := range req.Txs { + key, value, err := parseTx(tx) + if err != nil { + panic(err) // shouldn't happen since we verified it in CheckTx + } + app.state.Set(key, value) + + txs[i] = &abci.ExecTxResult{Code: code.CodeTypeOK} } - app.state.Set(key, value) - return abci.ResponseDeliverTx{Code: code.CodeTypeOK} -} -// EndBlock implements ABCI. -func (app *Application) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { var err error - resp := abci.ResponseEndBlock{} + resp := abci.ResponseFinalizeBlock{ + TxResults: txs, + } resp.ValidatorSetUpdate, err = app.validatorSetUpdates(uint64(req.Height)) if err != nil { panic(err) @@ -206,11 +234,15 @@ func (app *Application) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock }, }, } - return resp + + return &resp, nil } // Commit implements ABCI. -func (app *Application) Commit() abci.ResponseCommit { +func (app *Application) Commit(_ context.Context) (*abci.ResponseCommit, error) { + app.mu.Lock() + defer app.mu.Unlock() + height, hash, err := app.state.Commit() if err != nil { panic(err) @@ -220,61 +252,76 @@ func (app *Application) Commit() abci.ResponseCommit { if err != nil { panic(err) } - app.logger.Info("Created state sync snapshot", "height", snapshot.Height) + app.logger.Info("created state sync snapshot", "height", snapshot.Height) err = app.snapshots.Prune(maxSnapshotCount) if err != nil { - app.logger.Error("Failed to prune snapshots", "err", err) + app.logger.Error("failed to prune snapshots", "err", err) } } retainHeight := int64(0) if app.cfg.RetainBlocks > 0 { retainHeight = int64(height - app.cfg.RetainBlocks + 1) } - return abci.ResponseCommit{ + return &abci.ResponseCommit{ Data: hash, RetainHeight: retainHeight, - } + }, nil } // Query implements ABCI. -func (app *Application) Query(req abci.RequestQuery) abci.ResponseQuery { - return abci.ResponseQuery{ +func (app *Application) Query(_ context.Context, req *abci.RequestQuery) (*abci.ResponseQuery, error) { + app.mu.Lock() + defer app.mu.Unlock() + + return &abci.ResponseQuery{ Height: int64(app.state.Height), Key: req.Data, Value: []byte(app.state.Get(string(req.Data))), - } + }, nil } // ListSnapshots implements ABCI. -func (app *Application) ListSnapshots(req abci.RequestListSnapshots) abci.ResponseListSnapshots { +func (app *Application) ListSnapshots(_ context.Context, req *abci.RequestListSnapshots) (*abci.ResponseListSnapshots, error) { + app.mu.Lock() + defer app.mu.Unlock() + snapshots, err := app.snapshots.List() if err != nil { panic(err) } - return abci.ResponseListSnapshots{Snapshots: snapshots} + return &abci.ResponseListSnapshots{Snapshots: snapshots}, nil } // LoadSnapshotChunk implements ABCI. -func (app *Application) LoadSnapshotChunk(req abci.RequestLoadSnapshotChunk) abci.ResponseLoadSnapshotChunk { +func (app *Application) LoadSnapshotChunk(_ context.Context, req *abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error) { + app.mu.Lock() + defer app.mu.Unlock() + chunk, err := app.snapshots.LoadChunk(req.Height, req.Format, req.Chunk) if err != nil { panic(err) } - return abci.ResponseLoadSnapshotChunk{Chunk: chunk} + return &abci.ResponseLoadSnapshotChunk{Chunk: chunk}, nil } // OfferSnapshot implements ABCI. -func (app *Application) OfferSnapshot(req abci.RequestOfferSnapshot) abci.ResponseOfferSnapshot { +func (app *Application) OfferSnapshot(_ context.Context, req *abci.RequestOfferSnapshot) (*abci.ResponseOfferSnapshot, error) { + app.mu.Lock() + defer app.mu.Unlock() + if app.restoreSnapshot != nil { panic("A snapshot is already being restored") } app.restoreSnapshot = req.Snapshot app.restoreChunks = [][]byte{} - return abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT} + return &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil } // ApplySnapshotChunk implements ABCI. -func (app *Application) ApplySnapshotChunk(req abci.RequestApplySnapshotChunk) abci.ResponseApplySnapshotChunk { +func (app *Application) ApplySnapshotChunk(_ context.Context, req *abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error) { + app.mu.Lock() + defer app.mu.Unlock() + if app.restoreSnapshot == nil { panic("No restore in progress") } @@ -291,10 +338,182 @@ func (app *Application) ApplySnapshotChunk(req abci.RequestApplySnapshotChunk) a app.restoreSnapshot = nil app.restoreChunks = nil } - return abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT} + return &abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil +} + +// PrepareProposal will take the given transactions and attempt to prepare a +// proposal from them when it's our turn to do so. In the process, vote +// extensions from the previous round of consensus, if present, will be used to +// construct a special transaction whose value is the sum of all of the vote +// extensions from the previous round. +// +// NB: Assumes that the supplied transactions do not exceed `req.MaxTxBytes`. +// If adding a special vote extension-generated transaction would cause the +// total number of transaction bytes to exceed `req.MaxTxBytes`, we will not +// append our special vote extension transaction. +func (app *Application) PrepareProposal(_ context.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { + var sum int64 + var extCount int + for _, vote := range req.LocalLastCommit.Votes { + if len(vote.VoteExtension) == 0 { + continue + } + extValue, err := parseVoteExtension(vote.VoteExtension) + // This should have been verified in VerifyVoteExtension + if err != nil { + panic(fmt.Errorf("failed to parse vote extension in PrepareProposal: %w", err)) + } + proTxHash := crypto.ProTxHash(vote.Validator.ProTxHash) + app.logger.Info("got vote extension value in PrepareProposal", "proTxHash", proTxHash, "value", extValue) + sum += extValue + extCount++ + } + // We only generate our special transaction if we have vote extensions + if extCount > 0 { + var totalBytes int64 + extTxPrefix := fmt.Sprintf("%s=", voteExtensionKey) + extTx := []byte(fmt.Sprintf("%s%d", extTxPrefix, sum)) + app.logger.Info("preparing proposal with custom transaction from vote extensions", "tx", extTx) + // Our generated transaction takes precedence over any supplied + // transaction that attempts to modify the "extensionSum" value. + txRecords := make([]*abci.TxRecord, len(req.Txs)+1) + for i, tx := range req.Txs { + if strings.HasPrefix(string(tx), extTxPrefix) { + txRecords[i] = &abci.TxRecord{ + Action: abci.TxRecord_REMOVED, + Tx: tx, + } + } else { + txRecords[i] = &abci.TxRecord{ + Action: abci.TxRecord_UNMODIFIED, + Tx: tx, + } + totalBytes += int64(len(tx)) + } + } + if totalBytes+int64(len(extTx)) < req.MaxTxBytes { + txRecords[len(req.Txs)] = &abci.TxRecord{ + Action: abci.TxRecord_ADDED, + Tx: extTx, + } + } else { + app.logger.Info( + "too many txs to include special vote extension-generated tx", + "totalBytes", totalBytes, + "MaxTxBytes", req.MaxTxBytes, + "extTx", extTx, + "extTxLen", len(extTx), + ) + } + return &abci.ResponsePrepareProposal{ + TxRecords: txRecords, + }, nil + } + // None of the transactions are modified by this application. + trs := make([]*abci.TxRecord, 0, len(req.Txs)) + var totalBytes int64 + for _, tx := range req.Txs { + totalBytes += int64(len(tx)) + if totalBytes > req.MaxTxBytes { + break + } + trs = append(trs, &abci.TxRecord{ + Action: abci.TxRecord_UNMODIFIED, + Tx: tx, + }) + } + return &abci.ResponsePrepareProposal{TxRecords: trs}, nil +} + +// ProcessProposal implements part of the Application interface. +// It accepts any proposal that does not contain a malformed transaction. +func (app *Application) ProcessProposal(_ context.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { + for _, tx := range req.Txs { + k, v, err := parseTx(tx) + if err != nil { + app.logger.Error("malformed transaction in ProcessProposal", "tx", tx, "err", err) + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil + } + // Additional check for vote extension-related txs + if k == voteExtensionKey { + _, err := strconv.Atoi(v) + if err != nil { + app.logger.Error("malformed vote extension transaction", k, v, "err", err) + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil + } + } + } + return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil +} + +// ExtendVote will produce vote extensions in the form of random numbers to +// demonstrate vote extension nondeterminism. +// +// In the next block, if there are any vote extensions from the previous block, +// a new transaction will be proposed that updates a special value in the +// key/value store ("extensionSum") with the sum of all of the numbers collected +// from the vote extensions. +func (app *Application) ExtendVote(_ context.Context, req *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) { + // We ignore any requests for vote extensions that don't match our expected + // next height. + if req.Height != int64(app.state.Height)+1 { + app.logger.Error( + "got unexpected height in ExtendVote request", + "expectedHeight", app.state.Height+1, + "requestHeight", req.Height, + ) + return &abci.ResponseExtendVote{}, nil + } + ext := make([]byte, binary.MaxVarintLen64) + // We don't care that these values are generated by a weak random number + // generator. It's just for test purposes. + // nolint:gosec // G404: Use of weak random number generator + num := rand.Int63n(voteExtensionMaxVal) + extLen := binary.PutVarint(ext, num) + app.logger.Info("generated vote extension", "num", num, "ext", fmt.Sprintf("%x", ext[:extLen]), "state.Height", app.state.Height) + return &abci.ResponseExtendVote{ + VoteExtension: ext[:extLen], + }, nil +} + +// VerifyVoteExtension simply validates vote extensions from other validators +// without doing anything about them. In this case, it just makes sure that the +// vote extension is a well-formed integer value. +func (app *Application) VerifyVoteExtension(_ context.Context, req *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) { + // We allow vote extensions to be optional + if len(req.VoteExtension) == 0 { + return &abci.ResponseVerifyVoteExtension{ + Status: abci.ResponseVerifyVoteExtension_ACCEPT, + }, nil + } + if req.Height != int64(app.state.Height)+1 { + app.logger.Error( + "got unexpected height in VerifyVoteExtension request", + "expectedHeight", app.state.Height, + "requestHeight", req.Height, + ) + return &abci.ResponseVerifyVoteExtension{ + Status: abci.ResponseVerifyVoteExtension_REJECT, + }, nil + } + + num, err := parseVoteExtension(req.VoteExtension) + if err != nil { + app.logger.Error("failed to verify vote extension", "req", req, "err", err) + return &abci.ResponseVerifyVoteExtension{ + Status: abci.ResponseVerifyVoteExtension_REJECT, + }, nil + } + app.logger.Info("verified vote extension value", "req", req, "num", num) + return &abci.ResponseVerifyVoteExtension{ + Status: abci.ResponseVerifyVoteExtension_ACCEPT, + }, nil } func (app *Application) Rollback() error { + app.mu.Lock() + defer app.mu.Unlock() + return app.state.Rollback() } @@ -406,3 +625,19 @@ func parseTx(tx []byte) (string, string, error) { } return string(parts[0]), string(parts[1]), nil } + +// parseVoteExtension attempts to parse the given extension data into a positive +// integer value. +func parseVoteExtension(ext []byte) (int64, error) { + num, errVal := binary.Varint(ext) + if errVal == 0 { + return 0, errors.New("vote extension is too small to parse") + } + if errVal < 0 { + return 0, errors.New("vote extension value is too large") + } + if num >= voteExtensionMaxVal { + return 0, fmt.Errorf("vote extension value must be smaller than %d (was %d)", voteExtensionMaxVal, num) + } + return num, nil +} diff --git a/test/e2e/app/snapshots.go b/test/e2e/app/snapshots.go index 6a9c0e0dcf..61e34bd076 100644 --- a/test/e2e/app/snapshots.go +++ b/test/e2e/app/snapshots.go @@ -5,7 +5,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "math" "os" "path/filepath" @@ -48,7 +47,7 @@ func (s *SnapshotStore) loadMetadata() error { file := filepath.Join(s.dir, "metadata.json") metadata := []abci.Snapshot{} - bz, err := ioutil.ReadFile(file) + bz, err := os.ReadFile(file) switch { case errors.Is(err, os.ErrNotExist): case err != nil: @@ -75,7 +74,7 @@ func (s *SnapshotStore) saveMetadata() error { // save the file to a new file and move it to make saving atomic. newFile := filepath.Join(s.dir, "metadata.json.new") file := filepath.Join(s.dir, "metadata.json") - err = ioutil.WriteFile(newFile, bz, 0644) // nolint: gosec + err = os.WriteFile(newFile, bz, 0644) // nolint: gosec if err != nil { return err } @@ -93,10 +92,10 @@ func (s *SnapshotStore) Create(state *State) (abci.Snapshot, error) { snapshot := abci.Snapshot{ Height: state.Height, Format: 1, - Hash: hashItems(state.Values), + Hash: hashItems(state.Values, state.Height), Chunks: byteChunks(bz), } - err = ioutil.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", state.Height)), bz, 0644) + err = os.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", state.Height)), bz, 0644) if err != nil { return abci.Snapshot{}, err } @@ -146,7 +145,7 @@ func (s *SnapshotStore) LoadChunk(height uint64, format uint32, chunk uint32) ([ defer s.RUnlock() for _, snapshot := range s.metadata { if snapshot.Height == height && snapshot.Format == format { - bz, err := ioutil.ReadFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height))) + bz, err := os.ReadFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height))) if err != nil { return nil, err } diff --git a/test/e2e/app/state.go b/test/e2e/app/state.go index ee86c9e299..3e79e7a295 100644 --- a/test/e2e/app/state.go +++ b/test/e2e/app/state.go @@ -3,6 +3,7 @@ package app import ( "crypto/sha256" + "encoding/binary" "encoding/json" "errors" "fmt" @@ -39,7 +40,7 @@ func NewState(dir string, persistInterval uint64) (*State, error) { previousFile: filepath.Join(dir, prevStateFileName), persistInterval: persistInterval, } - state.Hash = hashItems(state.Values) + state.Hash = hashItems(state.Values, state.Height) err := state.load() switch { case errors.Is(err, os.ErrNotExist): @@ -115,7 +116,7 @@ func (s *State) Import(height uint64, jsonBytes []byte) error { } s.Height = height s.Values = values - s.Hash = hashItems(values) + s.Hash = hashItems(values, height) return s.save() } @@ -141,7 +142,6 @@ func (s *State) Set(key, value string) { func (s *State) Commit() (uint64, []byte, error) { s.Lock() defer s.Unlock() - s.Hash = hashItems(s.Values) switch { case s.Height > 0: s.Height++ @@ -150,6 +150,7 @@ func (s *State) Commit() (uint64, []byte, error) { default: s.Height = 1 } + s.Hash = hashItems(s.Values, s.Height) if s.persistInterval > 0 && s.Height%s.persistInterval == 0 { err := s.save() if err != nil { @@ -172,7 +173,7 @@ func (s *State) Rollback() error { } // hashItems hashes a set of key/value items. -func hashItems(items map[string]string) []byte { +func hashItems(items map[string]string, height uint64) []byte { keys := make([]string, 0, len(items)) for key := range items { keys = append(keys, key) @@ -180,6 +181,9 @@ func hashItems(items map[string]string) []byte { sort.Strings(keys) hasher := sha256.New() + var b [8]byte + binary.BigEndian.PutUint64(b[:], height) + _, _ = hasher.Write(b[:]) for _, key := range keys { _, _ = hasher.Write([]byte(key)) _, _ = hasher.Write([]byte{0}) diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index ef0f4d38e7..f634fc5f6d 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -16,8 +16,7 @@ var ( // separate testnet for each combination (Cartesian product) of options. testnetCombinations = map[string][]interface{}{ "topology": {"single", "quad", "large"}, - "p2p": {NewP2PMode, LegacyP2PMode, HybridP2PMode}, - "queueType": {"priority"}, // "fifo", "wdrr" + "queueType": {"priority"}, // "fifo" "initialHeight": {0, 1000}, "initialState": { map[string]string{}, @@ -49,9 +48,6 @@ var ( "tcp": 20, "unix": 10, } - // FIXME: v2 disabled due to flake - nodeBlockSyncs = uniformChoice{"v0"} // "v2" - nodeMempools = uniformChoice{"v0", "v1"} nodeStateSyncs = weightedChoice{ e2e.StateSyncDisabled: 10, e2e.StateSyncP2P: 45, @@ -59,8 +55,12 @@ var ( } nodePersistIntervals = uniformChoice{0, 1, 5} nodeSnapshotIntervals = uniformChoice{0, 5} - nodeRetainBlocks = uniformChoice{0, 2 * int(e2e.EvidenceAgeHeight), 4 * int(e2e.EvidenceAgeHeight)} - nodePerturbations = probSetChoice{ + nodeRetainBlocks = uniformChoice{ + 0, + 2 * int(e2e.EvidenceAgeHeight), + 4 * int(e2e.EvidenceAgeHeight), + } + nodePerturbations = probSetChoice{ "disconnect": 0.1, "pause": 0.1, "kill": 0.1, @@ -75,19 +75,6 @@ var ( // Generate generates random testnets using the given RNG. func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) { manifests := []e2e.Manifest{} - switch opts.P2P { - case NewP2PMode, LegacyP2PMode, HybridP2PMode: - defer func() { - // avoid modifying the global state. - original := make([]interface{}, len(testnetCombinations["p2p"])) - copy(original, testnetCombinations["p2p"]) - testnetCombinations["p2p"] = original - }() - - testnetCombinations["p2p"] = []interface{}{opts.P2P} - case MixedP2PMode: - testnetCombinations["p2p"] = []interface{}{NewP2PMode, LegacyP2PMode, HybridP2PMode} - } for _, opt := range combinations(testnetCombinations) { manifest, err := generateTestnet(r, opt) @@ -99,12 +86,6 @@ func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) { continue } - if len(manifest.Nodes) == 1 { - if opt["p2p"] == HybridP2PMode { - continue - } - } - if opts.MaxNetworkSize > 0 && len(manifest.Nodes) >= opts.MaxNetworkSize { continue } @@ -120,20 +101,9 @@ type Options struct { MaxNetworkSize int NumGroups int Directory string - P2P P2PMode Reverse bool } -type P2PMode string - -const ( - NewP2PMode P2PMode = "new" - LegacyP2PMode P2PMode = "legacy" - HybridP2PMode P2PMode = "hybrid" - // mixed means that all combination are generated - MixedP2PMode P2PMode = "mixed" -) - // generateTestnet generates a single testnet with the given options. func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, error) { manifest := e2e.Manifest{ @@ -155,13 +125,6 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er ChainLockUpdates: map[string]int64{}, } - p2pMode := opt["p2p"].(P2PMode) - switch p2pMode { - case NewP2PMode, LegacyP2PMode, HybridP2PMode: - default: - return manifest, fmt.Errorf("unknown p2p mode %s", p2pMode) - } - topology, ok := topologies[opt["topology"].(string)] if !ok { return manifest, fmt.Errorf("unknown topology %q", opt["topology"]) @@ -220,6 +183,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er nextStartAt += 5 } node := generateNode(r, manifest, e2e.ModeFull, startAt, false) + manifest.Nodes[fmt.Sprintf("full%02d", i)] = node } @@ -281,13 +245,9 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // choose one of the seeds manifest.Nodes[name].Seeds = uniformSetChoice(seedNames).Choose(r) - } else if i > 0 { + } else if i > 1 && r.Float64() >= 0.5 { peers := uniformSetChoice(peerNames[:i]) - if manifest.Nodes[name].StateSync == e2e.StateSyncP2P { - manifest.Nodes[name].PersistentPeers = peers.ChooseAtLeast(r, 2) - } else { - manifest.Nodes[name].PersistentPeers = peers.Choose(r) - } + manifest.Nodes[name].PersistentPeers = peers.ChooseAtLeast(r, 2) } } @@ -319,8 +279,6 @@ func generateNode( StartAt: startAt, Database: nodeDatabases.Choose(r), PrivvalProtocol: nodePrivvalProtocols.Choose(r), - BlockSync: nodeBlockSyncs.Choose(r).(string), - Mempool: nodeMempools.Choose(r).(string), StateSync: e2e.StateSyncDisabled, PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))), SnapshotInterval: uint64(nodeSnapshotIntervals.Choose(r).(int)), @@ -367,10 +325,6 @@ func generateNode( } } - if node.StateSync != e2e.StateSyncDisabled { - node.BlockSync = "v0" - } - return &node } diff --git a/test/e2e/generator/generate_test.go b/test/e2e/generator/generate_test.go index fabe387a02..1edfa5fba3 100644 --- a/test/e2e/generator/generate_test.go +++ b/test/e2e/generator/generate_test.go @@ -5,15 +5,15 @@ import ( "math/rand" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) func TestGenerator(t *testing.T) { - manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: MixedP2PMode}) + manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{}) require.NoError(t, err) - require.True(t, len(manifests) >= 64, "insufficient combinations") + require.True(t, len(manifests) >= 24, "insufficient combinations %d", len(manifests)) // this just means that the numbers reported by the test // failures map to the test cases that you'd see locally. @@ -23,44 +23,23 @@ func TestGenerator(t *testing.T) { t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) { for name, node := range m.Nodes { t.Run(name, func(t *testing.T) { - if node.StartAt > m.InitialHeight+5 && !node.Stateless() { - require.NotEqual(t, node.StateSync, e2e.StateSyncDisabled) - } - if node.StateSync != e2e.StateSyncDisabled { - require.Zero(t, node.Seeds, node.StateSync) - require.True(t, len(node.PersistentPeers) >= 2) - require.Equal(t, "v0", node.BlockSync) + t.Run("StateSync", func(t *testing.T) { + if node.StartAt > m.InitialHeight+5 && !node.Stateless() { + require.NotEqual(t, node.StateSync, e2e.StateSyncDisabled) + } + if node.StateSync != e2e.StateSyncDisabled { + require.Zero(t, node.Seeds, node.StateSync) + require.True(t, len(node.PersistentPeers) >= 2 || len(node.PersistentPeers) == 0, + "peers: %v", node.PersistentPeers) + } + }) + if e2e.Mode(node.Mode) != e2e.ModeLight { + t.Run("PrivvalProtocol", func(t *testing.T) { + require.NotZero(t, node.PrivvalProtocol) + }) } - }) } }) } - - t.Run("Hybrid", func(t *testing.T) { - manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: HybridP2PMode}) - require.NoError(t, err) - require.True(t, len(manifests) >= 16, "insufficient combinations: %d", len(manifests)) - - // failures map to the test cases that you'd see locally. - e2e.SortManifests(manifests, false /* ascending */) - - for idx, m := range manifests { - t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) { - require.True(t, len(m.Nodes) > 1) - - var numLegacy, numNew int - for _, node := range m.Nodes { - if node.UseLegacyP2P { - numLegacy++ - } else { - numNew++ - } - } - - assert.True(t, numNew >= 1, "not enough new nodes [%d/%d]", - numNew, len(m.Nodes)) - }) - } - }) } diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index 68a089ed77..210a5e12d1 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -2,7 +2,9 @@ package main import ( + "context" "fmt" + stdlog "log" "math/rand" "os" "path/filepath" @@ -17,41 +19,41 @@ const ( randomSeed int64 = 4827085738 ) -var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) - func main() { - NewCLI().Run() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cli, err := NewCLI() + if err != nil { + stdlog.Fatal(err) + } + + cli.Run(ctx) } // CLI is the Cobra-based command-line interface. type CLI struct { - root *cobra.Command - opts Options + root *cobra.Command + opts Options + logger log.Logger } // NewCLI sets up the CLI. -func NewCLI() *CLI { - cli := &CLI{} +func NewCLI() (*CLI, error) { + logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) + if err != nil { + return nil, err + } + + cli := &CLI{ + logger: logger, + } cli.root = &cobra.Command{ Use: "generator", Short: "End-to-end testnet generator", SilenceUsage: true, SilenceErrors: true, // we'll output them ourselves in Run() RunE: func(cmd *cobra.Command, args []string) error { - var err error - - p2pMode, err := cmd.Flags().GetString("p2p") - if err != nil { - return err - } - - switch mode := P2PMode(p2pMode); mode { - case NewP2PMode, LegacyP2PMode, HybridP2PMode, MixedP2PMode: - cli.opts.P2P = mode - default: - return fmt.Errorf("p2p mode must be either new, legacy, hybrid or mixed got %s", p2pMode) - } - configPreset, err := cmd.Flags().GetString("preset") if err != nil { return err @@ -60,7 +62,6 @@ func NewCLI() *CLI { if err != nil { return err } - return cli.generate() }, } @@ -69,8 +70,6 @@ func NewCLI() *CLI { _ = cli.root.MarkPersistentFlagRequired("dir") cli.root.Flags().BoolVarP(&cli.opts.Reverse, "reverse", "r", false, "Reverse sort order") cli.root.PersistentFlags().IntVarP(&cli.opts.NumGroups, "groups", "g", 0, "Number of groups") - cli.root.PersistentFlags().StringP("p2p", "p", string(MixedP2PMode), - "P2P typology to be generated [\"new\", \"legacy\", \"hybrid\" or \"mixed\" ]") cli.root.PersistentFlags().IntVarP(&cli.opts.MinNetworkSize, "min-size", "", 1, "Minimum network size (nodes)") cli.root.PersistentFlags().IntVarP(&cli.opts.MaxNetworkSize, "max-size", "", 0, @@ -78,7 +77,7 @@ func NewCLI() *CLI { cli.root.PersistentFlags().StringP("preset", "", "default", "Config preset, by default is used \"default\"") - return cli + return cli, nil } // generate generates manifests in a directory. @@ -117,9 +116,9 @@ func (cli *CLI) generate() error { } // Run runs the CLI. -func (cli *CLI) Run() { - if err := cli.root.Execute(); err != nil { - logger.Error(err.Error()) +func (cli *CLI) Run(ctx context.Context) { + if err := cli.root.ExecuteContext(ctx); err != nil { + cli.logger.Error(err.Error()) os.Exit(1) } } diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml index cf5691f22b..38ada84b2b 100644 --- a/test/e2e/networks/ci.toml +++ b/test/e2e/networks/ci.toml @@ -1,7 +1,6 @@ # This testnet is run by CI, and attempts to cover a broad range of # functionality with a single network. -disable_legacy_p2p = false evidence = 5 initial_height = 1000 initial_state = { initial01 = "a", initial02 = "b", initial03 = "c" } @@ -37,11 +36,12 @@ seeds = ["seed01"] [node.validator01] seeds = ["seed01"] snapshot_interval = 5 +block_sync = "v0" perturb = ["disconnect"] [node.validator02] abci_protocol = "tcp" -database = "boltdb" +database = "cleveldb" persist_interval = 0 perturb = ["restart"] privval_protocol = "tcp" @@ -65,21 +65,21 @@ database = "rocksdb" persistent_peers = ["validator01"] perturb = ["pause"] block_sync = "v0" +privval_protocol = "tcp" [node.validator05] -database = "cleveldb" block_sync = "v0" +database = "badgerdb" state_sync = "p2p" -seeds = ["seed01"] start_at = 1005 # Becomes part of the validator set at 1010 -abci_protocol = "grpc" +abci_protocol = "builtin" perturb = ["pause", "disconnect", "restart"] -privval_protocol = "tcp" [node.full01] mode = "full" start_at = 1010 block_sync = "v0" +database = "boltdb" persistent_peers = ["validator01", "validator02", "validator03", "validator04"] perturb = ["restart"] retain_blocks = 10 diff --git a/test/e2e/node/main.go b/test/e2e/node/main.go index 5fb1fc4748..96cdc414c6 100644 --- a/test/e2e/node/main.go +++ b/test/e2e/node/main.go @@ -11,16 +11,15 @@ import ( "strings" "time" + "github.com/dashevo/dashd-go/btcjson" "github.com/spf13/viper" "google.golang.org/grpc" - "github.com/dashevo/dashd-go/btcjson" - abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" - dashcore "github.com/tendermint/tendermint/dashcore/rpc" + dashcore "github.com/tendermint/tendermint/dash/core" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" @@ -38,8 +37,6 @@ import ( "github.com/tendermint/tendermint/test/e2e/pkg/mockcoreserver" ) -var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) - var ( tmhome string tmcfg *config.Config @@ -60,6 +57,9 @@ func init() { // main is the binary entrypoint. func main() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if len(os.Args) != 2 { fmt.Printf("Usage: %v ", os.Args[0]) return @@ -69,88 +69,116 @@ func main() { configFile = os.Args[1] } - if err := run(configFile); err != nil { - logger.Error(err.Error()) + if err := run(ctx, configFile); err != nil { + _, _ = fmt.Fprintln(os.Stderr, "ERROR:", err) os.Exit(1) } } // run runs the application - basically like main() with error handling. -func run(configFile string) error { +func run(ctx context.Context, configFile string) error { cfg, err := LoadConfig(configFile) if err != nil { return err } + logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) + if err != nil { + return err + } + // Start remote signer (must start before node if running builtin). if cfg.PrivValServer != "" { - if cfg.PrivValServerType == "dashcore" { - fmt.Printf("Starting mock core server at address %v\n", cfg.PrivValServer) - // Start mock core-server - coreSrv, err := setupCoreServer(cfg) - if err != nil { - return fmt.Errorf("unable to setup mock core server: %w", err) - } - go func() { - coreSrv.Start() - }() - - dashCoreRPCClient, err = dashcore.NewRPCClient( - cfg.PrivValServer, - tmcfg.PrivValidator.CoreRPCUsername, - tmcfg.PrivValidator.CoreRPCPassword, - logger.With("module", dashcore.ModuleName), - ) - if err != nil { - return fmt.Errorf("connection to Dash Core RPC failed: %w", err) - } - } else { - if err = startSigner(cfg); err != nil { - return err - } - if cfg.Protocol == "builtin" { - time.Sleep(1 * time.Second) - } + err := startRemoteSigner(ctx, cfg, logger) + if err != nil { + return err } } // Start app server. + err = startAppServer(ctx, cfg, logger) + if err != nil { + logger.Error("starting node", + "protocol", cfg.Protocol, + "mode", cfg.Mode, + "err", err) + return err + } + + // Apparently there's no way to wait for the server, so we just sleep + for { + time.Sleep(1 * time.Hour) + } +} + +func startAppServer(ctx context.Context, cfg *Config, logger log.Logger) error { switch cfg.Protocol { case "socket", "grpc": - err = startApp(cfg) + return startApp(ctx, logger, cfg) case "builtin": switch cfg.Mode { case string(e2e.ModeLight): - err = startLightNode(cfg) + return startLightNode(ctx, logger, cfg) case string(e2e.ModeSeed): - err = startSeedNode(cfg) + return startSeedNode(ctx) default: - err = startNode(cfg) + return startNode(ctx, cfg) } - default: - err = fmt.Errorf("invalid protocol %q", cfg.Protocol) } + return fmt.Errorf("invalid protocol %q", cfg.Protocol) +} + +func startMockCoreSrv(cfg *Config, logger log.Logger) error { + fmt.Printf("Starting mock core server at address %v\n", cfg.PrivValServer) + // Start mock core-server + coreSrv, err := setupCoreServer(cfg) if err != nil { - return err + return fmt.Errorf("unable to setup mock core server: %w", err) } + go func() { + coreSrv.Start() + }() - // Apparently there's no way to wait for the server, so we just sleep - for { - time.Sleep(1 * time.Hour) + dashCoreRPCClient, err = dashcore.NewRPCClient( + cfg.PrivValServer, + tmcfg.PrivValidator.CoreRPCUsername, + tmcfg.PrivValidator.CoreRPCPassword, + logger.With("module", dashcore.ModuleName), + ) + if err != nil { + return fmt.Errorf("connection to Dash Core RPC failed: %w", err) } + return nil +} + +func startRemoteSigner(ctx context.Context, cfg *Config, logger log.Logger) error { + if cfg.PrivValServerType == "dashcore" { + return startMockCoreSrv(cfg, logger) + } + err := startSigner(ctx, logger, cfg) + if err != nil { + logger.Error("starting signer", + "server", cfg.PrivValServer, + "err", err) + return err + } + if cfg.Protocol == "builtin" { + time.Sleep(1 * time.Second) + } + return nil } // startApp starts the application server, listening for connections from Tendermint. -func startApp(cfg *Config) error { +func startApp(ctx context.Context, logger log.Logger, cfg *Config) error { app, err := app.NewApplication(cfg.App()) if err != nil { return err } - server, err := server.NewServer(cfg.Listen, cfg.Protocol, app) + srv, err := server.NewServer(logger, cfg.Listen, cfg.Protocol, app) if err != nil { return err } - err = server.Start() + err = srv.Start(ctx) if err != nil { return err } @@ -162,7 +190,7 @@ func startApp(cfg *Config) error { // configuration is in $TMHOME/config/tenderdash.toml. // // FIXME There is no way to simply load the configuration from a file, so we need to pull in Viper. -func startNode(cfg *Config) error { +func startNode(ctx context.Context, cfg *Config) error { app, err := app.NewApplication(cfg.App()) if err != nil { return err @@ -173,19 +201,20 @@ func startNode(cfg *Config) error { return fmt.Errorf("failed to setup config: %w", err) } - n, err := node.New(tmcfg, + n, err := node.New( + ctx, + tmcfg, nodeLogger, - abciclient.NewLocalCreator(app), + abciclient.NewLocalClient(nodeLogger, app), nil, - dashCoreRPCClient, ) if err != nil { return err } - return n.Start() + return n.Start(ctx) } -func startSeedNode(cfg *Config) error { +func startSeedNode(ctx context.Context) error { tmcfg, nodeLogger, err := setupNode() if err != nil { return fmt.Errorf("failed to setup config: %w", err) @@ -193,14 +222,14 @@ func startSeedNode(cfg *Config) error { tmcfg.Mode = config.ModeSeed - n, err := node.New(tmcfg, nodeLogger, nil, nil, dashCoreRPCClient) + n, err := node.New(ctx, tmcfg, nodeLogger, nil, nil) if err != nil { return err } - return n.Start() + return n.Start(ctx) } -func startLightNode(cfg *Config) error { +func startLightNode(ctx context.Context, logger log.Logger, cfg *Config) error { tmcfg, nodeLogger, err := setupNode() if err != nil { return err @@ -215,7 +244,7 @@ func startLightNode(cfg *Config) error { providers := rpcEndpoints(tmcfg.P2P.PersistentPeers) c, err := light.NewHTTPClient( - context.Background(), + ctx, cfg.ChainID, providers[0], providers[1:], @@ -245,7 +274,7 @@ func startLightNode(cfg *Config) error { } logger.Info("Starting proxy...", "laddr", tmcfg.RPC.ListenAddress) - if err := p.ListenAndServe(); err != http.ErrServerClosed { + if err := p.ListenAndServe(ctx); err != http.ErrServerClosed { // Error starting or closing listener: logger.Error("proxy ListenAndServe", "err", err) } @@ -254,7 +283,7 @@ func startLightNode(cfg *Config) error { } // startSigner starts a signer server connecting to the given endpoint. -func startSigner(cfg *Config) error { +func startSigner(ctx context.Context, logger log.Logger, cfg *Config) error { filePV, err := privval.LoadFilePV(cfg.PrivValKey, cfg.PrivValState) if err != nil { return err @@ -272,7 +301,7 @@ func startSigner(cfg *Config) error { if err != nil { return err } - ss := grpcprivval.NewSignerServer(cfg.ChainID, filePV, logger) + ss := grpcprivval.NewSignerServer(logger, cfg.ChainID, filePV) s := grpc.NewServer() @@ -282,6 +311,10 @@ func startSigner(cfg *Config) error { if err := s.Serve(lis); err != nil { panic(err) } + go func() { + <-ctx.Done() + s.GracefulStop() + }() }() return nil @@ -292,7 +325,8 @@ func startSigner(cfg *Config) error { endpoint := privval.NewSignerDialerEndpoint(logger, dialFn, privval.SignerDialerEndpointRetryWaitInterval(1*time.Second), privval.SignerDialerEndpointConnRetries(100)) - err = privval.NewSignerServer(endpoint, cfg.ChainID, filePV).Start() + + err = privval.NewSignerServer(endpoint, cfg.ChainID, filePV).Start(ctx) if err != nil { return err } @@ -328,7 +362,7 @@ func setupNode() (*config.Config, log.Logger, error) { return nil, nil, fmt.Errorf("error in config file: %w", err) } - nodeLogger, err := log.NewDefaultLogger(tmcfg.LogFormat, tmcfg.LogLevel, false) + nodeLogger, err := log.NewDefaultLogger(tmcfg.LogFormat, tmcfg.LogLevel) if err != nil { return nil, nil, err } diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index cdf5a261cb..c44108edc3 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -126,10 +126,6 @@ type ManifestNode struct { // runner will wait for the network to reach at least this block height. StartAt int64 `toml:"start_at"` - // BlockSync specifies the block sync mode: "" (disable), "v0" or "v2". - // Defaults to disabled. - BlockSync string `toml:"block_sync"` - // Mempool specifies which version of mempool to use. Either "v0" or "v1" Mempool string `toml:"mempool_version"` @@ -167,9 +163,6 @@ type ManifestNode struct { // This is helpful when debugging a specific problem. This overrides the network // level. LogLevel string `toml:"log_level"` - - // UseLegacyP2P enables use of the legacy p2p layer for this node. - UseLegacyP2P bool `toml:"use_legacy_p2p"` } // Stateless reports whether m is a node that does not own state, including light and seed nodes. diff --git a/test/e2e/pkg/mockcoreserver/server_test.go b/test/e2e/pkg/mockcoreserver/server_test.go index 517b3b313e..2ce57208cd 100644 --- a/test/e2e/pkg/mockcoreserver/server_test.go +++ b/test/e2e/pkg/mockcoreserver/server_test.go @@ -14,7 +14,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" - dashcore "github.com/tendermint/tendermint/dashcore/rpc" + dashcore "github.com/tendermint/tendermint/dash/core" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/privval" ) @@ -22,7 +22,7 @@ import ( func TestServer(t *testing.T) { ctx := context.Background() srv := NewHTTPServer(":9981") - logger := log.TestingLogger() + logger := log.NewTestingLogger(t) go func() { srv.Start() }() @@ -80,7 +80,7 @@ func TestDashCoreSignerPingMethod(t *testing.T) { go func() { srv.Start() }() - logger := log.TestingLogger() + logger := log.NewTestingLogger(t) dashCoreRPCClient, err := dashcore.NewRPCClient(addr, "root", "root", logger) assert.NoError(t, err) client, err := privval.NewDashCoreSignerClient(dashCoreRPCClient, btcjson.LLMQType_5_60) @@ -127,7 +127,7 @@ func TestGetPubKey(t *testing.T) { srv.Start() }() - logger := log.TestingLogger() + logger := log.NewTestingLogger(t) dashCoreRPCClient, err := dashcore.NewRPCClient(addr, "root", "root", logger) assert.NoError(t, err) client, err := privval.NewDashCoreSignerClient(dashCoreRPCClient, btcjson.LLMQType_5_60) diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 2dd290b502..e873e50ffa 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -110,7 +110,6 @@ type Node struct { IP net.IP ProxyPort uint32 StartAt int64 - BlockSync string Mempool string StateSync string Database string @@ -123,7 +122,6 @@ type Node struct { PersistentPeers []*Node Perturbations []Perturbation LogLevel string - UseLegacyP2P bool QueueType string HasStarted bool } @@ -244,7 +242,6 @@ func LoadTestnet(file string) (*Testnet, error) { ABCIProtocol: Protocol(testnet.ABCIProtocol), PrivvalProtocol: ProtocolFile, StartAt: nodeManifest.StartAt, - BlockSync: nodeManifest.BlockSync, Mempool: nodeManifest.Mempool, StateSync: nodeManifest.StateSync, PersistInterval: 1, @@ -253,7 +250,6 @@ func LoadTestnet(file string) (*Testnet, error) { Perturbations: []Perturbation{}, LogLevel: manifest.LogLevel, QueueType: manifest.QueueType, - UseLegacyP2P: nodeManifest.UseLegacyP2P, } if node.StartAt == testnet.InitialHeight { node.StartAt = 0 // normalize to 0 for initial nodes, since code expects this @@ -481,11 +477,6 @@ func (n Node) Validate(testnet Testnet) error { return fmt.Errorf("validator %s must have a proTxHash of size 32 (%d)", n.Name, len(n.ProTxHash)) } } - switch n.BlockSync { - case "", "v0", "v2": - default: - return fmt.Errorf("invalid block sync setting %q", n.BlockSync) - } switch n.StateSync { case StateSyncDisabled, StateSyncP2P, StateSyncRPC: default: @@ -497,7 +488,7 @@ func (n Node) Validate(testnet Testnet) error { return fmt.Errorf("invalid mempool version %q", n.Mempool) } switch n.QueueType { - case "", "priority", "wdrr", "fifo": + case "", "priority", "fifo": default: return fmt.Errorf("unsupported p2p queue type: %s", n.QueueType) } diff --git a/test/e2e/runner/benchmark.go b/test/e2e/runner/benchmark.go index 50a2c33f93..91c748ff73 100644 --- a/test/e2e/runner/benchmark.go +++ b/test/e2e/runner/benchmark.go @@ -8,6 +8,7 @@ import ( "path/filepath" "time" + "github.com/tendermint/tendermint/libs/log" e2e "github.com/tendermint/tendermint/test/e2e/pkg" "github.com/tendermint/tendermint/types" ) @@ -21,7 +22,7 @@ import ( // // Metrics are based of the `benchmarkLength`, the amount of consecutive blocks // sampled from in the testnet -func Benchmark(ctx context.Context, testnet *e2e.Testnet, benchmarkLength int64) error { +func Benchmark(ctx context.Context, logger log.Logger, testnet *e2e.Testnet, benchmarkLength int64) error { block, err := getLatestBlock(ctx, testnet) if err != nil { return err @@ -43,7 +44,7 @@ func Benchmark(ctx context.Context, testnet *e2e.Testnet, benchmarkLength int64) logger.Info("Ending benchmark period", "height", block.Height) // fetch a sample of blocks - blocks, err := fetchBlockChainSample(testnet, benchmarkLength) + blocks, err := fetchBlockChainSample(ctx, testnet, benchmarkLength) if err != nil { return err } @@ -128,7 +129,7 @@ func (t *testnetStats) String() string { // fetchBlockChainSample waits for `benchmarkLength` amount of blocks to pass, fetching // all of the headers for these blocks from an archive node and returning it. -func fetchBlockChainSample(testnet *e2e.Testnet, benchmarkLength int64) ([]*types.BlockMeta, error) { +func fetchBlockChainSample(ctx context.Context, testnet *e2e.Testnet, benchmarkLength int64) ([]*types.BlockMeta, error) { var blocks []*types.BlockMeta // Find the first archive node @@ -139,7 +140,6 @@ func fetchBlockChainSample(testnet *e2e.Testnet, benchmarkLength int64) ([]*type } // find the latest height - ctx := context.Background() s, err := c.Status(ctx) if err != nil { return nil, err diff --git a/test/e2e/runner/cleanup.go b/test/e2e/runner/cleanup.go index 98ce618b2a..1c23e3fef6 100644 --- a/test/e2e/runner/cleanup.go +++ b/test/e2e/runner/cleanup.go @@ -6,21 +6,22 @@ import ( "os" "path/filepath" + "github.com/tendermint/tendermint/libs/log" e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) // Cleanup removes the Docker Compose containers and testnet directory. -func Cleanup(testnet *e2e.Testnet) error { - err := cleanupDocker() +func Cleanup(logger log.Logger, testnet *e2e.Testnet) error { + err := cleanupDocker(logger) if err != nil { return err } - return cleanupDir(testnet.Dir) + return cleanupDir(logger, testnet.Dir) } // cleanupDocker removes all E2E resources (with label e2e=True), regardless // of testnet. -func cleanupDocker() error { +func cleanupDocker(logger log.Logger) error { logger.Info("Removing Docker containers and networks") // GNU xargs requires the -r flag to not run when input is empty, macOS @@ -38,7 +39,7 @@ func cleanupDocker() error { } // cleanupDir cleans up a testnet directory -func cleanupDir(dir string) error { +func cleanupDir(logger log.Logger, dir string) error { if dir == "" { return errors.New("no directory set") } diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index 02a7af0de3..afe18d8e6e 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -2,17 +2,17 @@ package main import ( "context" + "encoding/json" "errors" "fmt" - "io/ioutil" "math/rand" + "os" "path/filepath" "time" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/test/factory" - tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/privval" e2e "github.com/tendermint/tendermint/test/e2e/pkg" "github.com/tendermint/tendermint/types" @@ -21,19 +21,15 @@ import ( // InjectEvidence takes a running testnet and generates an amount of valid // evidence and broadcasts it to a random node through the rpc endpoint `/broadcast_evidence`. // Evidence is random and can be a mixture of DuplicateVoteEvidence. -func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amount int) error { +func InjectEvidence(ctx context.Context, logger log.Logger, r *rand.Rand, testnet *e2e.Testnet, amount int) error { // select a random node var targetNode *e2e.Node for _, idx := range r.Perm(len(testnet.Nodes)) { - targetNode = testnet.Nodes[idx] - - if targetNode.Mode == e2e.ModeSeed || targetNode.Mode == e2e.ModeLight { - targetNode = nil - continue + if !testnet.Nodes[idx].Stateless() { + targetNode = testnet.Nodes[idx] + break } - - break } if targetNode == nil { @@ -48,16 +44,15 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo } // request the latest block and validator set from the node - blockRes, err := client.Block(context.Background(), nil) + blockRes, err := client.Block(ctx, nil) if err != nil { return err } - evidenceHeight := blockRes.Block.Height - waitHeight := blockRes.Block.Height + 3 + evidenceHeight := blockRes.Block.Height - 3 nValidators := 100 requestQuorumInfo := true - valRes, err := client.Validators(context.Background(), &evidenceHeight, nil, &nValidators, &requestQuorumInfo) + valRes, err := client.Validators(ctx, &evidenceHeight, nil, &nValidators, &requestQuorumInfo) if err != nil { return err } @@ -81,43 +76,43 @@ func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amo return err } - wctx, cancel := context.WithTimeout(ctx, time.Minute) - defer cancel() - - // wait for the node to reach the height above the forged height so that - // it is able to validate the evidence - _, err = waitForNode(wctx, targetNode, waitHeight) + // request the latest block and validator set from the node + blockRes, err = client.Block(ctx, &evidenceHeight) if err != nil { return err } var ev types.Evidence for i := 1; i <= amount; i++ { - ev, err = generateDuplicateVoteEvidence( + ev, err = generateDuplicateVoteEvidence(ctx, privVals, evidenceHeight, valSet, testnet.Name, blockRes.Block.Time, ) if err != nil { return err } - _, err := client.BroadcastEvidence(context.Background(), ev) + _, err := client.BroadcastEvidence(ctx, ev) if err != nil { return err } } - wctx, cancel = context.WithTimeout(ctx, 30*time.Second) + logger.Info("Finished sending evidence", + "node", testnet.Name, + "amount", amount, + "height", evidenceHeight, + ) + + wctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() - // wait for the node to reach the height above the forged height so that - // it is able to validate the evidence - _, err = waitForNode(wctx, targetNode, blockRes.Block.Height+2) + // wait for the node to make progress after submitting + // evidence (3 (forged height) + 1 (progress)) + _, err = waitForNode(wctx, logger, targetNode, evidenceHeight+4) if err != nil { return err } - logger.Info(fmt.Sprintf("Finished sending evidence (height %d)", blockRes.Block.Height+2)) - return nil } @@ -153,6 +148,7 @@ func getPrivateValidatorKeys(testnet *e2e.Testnet, thresholdPublicKey crypto.Pub // generateDuplicateVoteEvidence picks a random validator from the val set and // returns duplicate vote evidence against the validator func generateDuplicateVoteEvidence( + ctx context.Context, privVals []*types.MockPV, height int64, vals *types.ValidatorSet, @@ -164,11 +160,11 @@ func generateDuplicateVoteEvidence( return nil, err } stateID := types.RandStateID() - voteA, err := factory.MakeVote(privVal, vals, chainID, valIdx, height, 0, 2, makeRandomBlockID(), stateID) + voteA, err := factory.MakeVote(ctx, privVal, vals, chainID, valIdx, height, 0, 2, makeRandomBlockID(), stateID) if err != nil { return nil, err } - voteB, err := factory.MakeVote(privVal, vals, chainID, valIdx, height, 0, 2, makeRandomBlockID(), stateID) + voteB, err := factory.MakeVote(ctx, privVal, vals, chainID, valIdx, height, 0, 2, makeRandomBlockID(), stateID) if err != nil { return nil, err } @@ -194,12 +190,12 @@ func getRandomValidatorIndex(privVals []*types.MockPV, vals *types.ValidatorSet) } func readPrivKey(keyFilePath string, quorumHash crypto.QuorumHash) (crypto.PrivKey, error) { - keyJSONBytes, err := ioutil.ReadFile(keyFilePath) + keyJSONBytes, err := os.ReadFile(keyFilePath) if err != nil { return nil, err } pvKey := privval.FilePVKey{} - err = tmjson.Unmarshal(keyJSONBytes, &pvKey) + err = json.Unmarshal(keyJSONBytes, &pvKey) if err != nil { return nil, fmt.Errorf("error reading PrivValidator key from %v: %w", keyFilePath, err) } @@ -207,13 +203,13 @@ func readPrivKey(keyFilePath string, quorumHash crypto.QuorumHash) (crypto.PrivK } func makeRandomBlockID() types.BlockID { - return makeBlockID(crypto.CRandBytes(tmhash.Size), 100, crypto.CRandBytes(tmhash.Size)) + return makeBlockID(crypto.CRandBytes(crypto.HashSize), 100, crypto.CRandBytes(crypto.HashSize)) } func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.BlockID { var ( - h = make([]byte, tmhash.Size) - psH = make([]byte, tmhash.Size) + h = make([]byte, crypto.HashSize) + psH = make([]byte, crypto.HashSize) ) copy(h, hash) copy(psH, partSetHash) diff --git a/test/e2e/runner/load.go b/test/e2e/runner/load.go index 674972d545..eac2d682af 100644 --- a/test/e2e/runner/load.go +++ b/test/e2e/runner/load.go @@ -7,6 +7,7 @@ import ( "math/rand" "time" + "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" rpchttp "github.com/tendermint/tendermint/rpc/client/http" e2e "github.com/tendermint/tendermint/test/e2e/pkg" @@ -15,7 +16,7 @@ import ( // Load generates transactions against the network until the given context is // canceled. -func Load(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet) error { +func Load(ctx context.Context, logger log.Logger, r *rand.Rand, testnet *e2e.Testnet) error { // Since transactions are executed across all nodes in the network, we need // to reduce transaction load for larger networks to avoid using too much // CPU. This gives high-throughput small networks and low-throughput large ones. diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index fb6ce4a8cb..c4a73d33f9 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -3,6 +3,7 @@ package main import ( "context" "fmt" + stdlog "log" "math/rand" "os" "strconv" @@ -16,10 +17,16 @@ import ( const randomSeed = 2308084734268 -var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) - func main() { - NewCLI().Run() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger, err := log.NewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo) + if err != nil { + stdlog.Fatal(err) + } + + NewCLI(logger).Run(ctx, logger) } // CLI is the Cobra-based command-line interface. @@ -30,7 +37,7 @@ type CLI struct { } // NewCLI sets up the CLI. -func NewCLI() *CLI { +func NewCLI(logger log.Logger) *CLI { cli := &CLI{} cli.root = &cobra.Command{ Use: "runner", @@ -51,7 +58,7 @@ func NewCLI() *CLI { return nil }, RunE: func(cmd *cobra.Command, args []string) (err error) { - if err = Cleanup(cli.testnet); err != nil { + if err = Cleanup(logger, cli.testnet); err != nil { return err } defer func() { @@ -60,11 +67,11 @@ func NewCLI() *CLI { } else if err != nil { logger.Info("Preserving testnet that encountered error", "err", err) - } else if err := Cleanup(cli.testnet); err != nil { - logger.Error("Error cleaning up testnet contents", "err", err) + } else if err := Cleanup(logger, cli.testnet); err != nil { + logger.Error("error cleaning up testnet contents", "err", err) } }() - if err = Setup(cli.testnet); err != nil { + if err = Setup(logger, cli.testnet); err != nil { return err } @@ -77,31 +84,31 @@ func NewCLI() *CLI { lctx, loadCancel := context.WithCancel(ctx) defer loadCancel() go func() { - chLoadResult <- Load(lctx, r, cli.testnet) + chLoadResult <- Load(lctx, logger, r, cli.testnet) }() startAt := time.Now() - if err = Start(ctx, cli.testnet); err != nil { + if err = Start(ctx, logger, cli.testnet); err != nil { return err } - if err = Wait(ctx, cli.testnet, 5); err != nil { // allow some txs to go through + if err = Wait(ctx, logger, cli.testnet, 5); err != nil { // allow some txs to go through return err } if cli.testnet.HasPerturbations() { - if err = Perturb(ctx, cli.testnet); err != nil { + if err = Perturb(ctx, logger, cli.testnet); err != nil { return err } - if err = Wait(ctx, cli.testnet, 5); err != nil { // allow some txs to go through + if err = Wait(ctx, logger, cli.testnet, 5); err != nil { // allow some txs to go through return err } } if cli.testnet.Evidence > 0 { - if err = InjectEvidence(ctx, r, cli.testnet, cli.testnet.Evidence); err != nil { + if err = InjectEvidence(ctx, logger, r, cli.testnet, cli.testnet.Evidence); err != nil { return err } - if err = Wait(ctx, cli.testnet, 5); err != nil { // ensure chain progress + if err = Wait(ctx, logger, cli.testnet, 5); err != nil { // ensure chain progress return err } } @@ -124,7 +131,7 @@ func NewCLI() *CLI { if err = <-chLoadResult; err != nil { return fmt.Errorf("transaction load failed: %w", err) } - if err = Wait(ctx, cli.testnet, 5); err != nil { // wait for network to settle before tests + if err = Wait(ctx, logger, cli.testnet, 5); err != nil { // wait for network to settle before tests return err } if err := Test(cli.testnet); err != nil { @@ -149,7 +156,7 @@ func NewCLI() *CLI { Use: "setup", Short: "Generates the testnet directory and configuration", RunE: func(cmd *cobra.Command, args []string) error { - return Setup(cli.testnet) + return Setup(logger, cli.testnet) }, }) @@ -159,12 +166,12 @@ func NewCLI() *CLI { RunE: func(cmd *cobra.Command, args []string) error { _, err := os.Stat(cli.testnet.Dir) if os.IsNotExist(err) { - err = Setup(cli.testnet) + err = Setup(logger, cli.testnet) } if err != nil { return err } - return Start(cmd.Context(), cli.testnet) + return Start(cmd.Context(), logger, cli.testnet) }, }) @@ -172,7 +179,7 @@ func NewCLI() *CLI { Use: "perturb", Short: "Perturbs the Docker testnet, e.g. by restarting or disconnecting nodes", RunE: func(cmd *cobra.Command, args []string) error { - return Perturb(cmd.Context(), cli.testnet) + return Perturb(cmd.Context(), logger, cli.testnet) }, }) @@ -180,7 +187,7 @@ func NewCLI() *CLI { Use: "wait", Short: "Waits for a few blocks to be produced and all nodes to catch up", RunE: func(cmd *cobra.Command, args []string) error { - return Wait(cmd.Context(), cli.testnet, 5) + return Wait(cmd.Context(), logger, cli.testnet, 5) }, }) @@ -217,6 +224,7 @@ func NewCLI() *CLI { RunE: func(cmd *cobra.Command, args []string) (err error) { return Load( cmd.Context(), + logger, rand.New(rand.NewSource(randomSeed)), // nolint: gosec cli.testnet, ) @@ -239,6 +247,7 @@ func NewCLI() *CLI { return InjectEvidence( cmd.Context(), + logger, rand.New(rand.NewSource(randomSeed)), // nolint: gosec cli.testnet, amount, @@ -258,7 +267,7 @@ func NewCLI() *CLI { Use: "cleanup", Short: "Removes the testnet directory", RunE: func(cmd *cobra.Command, args []string) error { - return Cleanup(cli.testnet) + return Cleanup(logger, cli.testnet) }, }) @@ -297,16 +306,16 @@ over a 100 block sampling period. Does not run any perbutations. `, RunE: func(cmd *cobra.Command, args []string) error { - if err := Cleanup(cli.testnet); err != nil { + if err := Cleanup(logger, cli.testnet); err != nil { return err } defer func() { - if err := Cleanup(cli.testnet); err != nil { - logger.Error("Error cleaning up testnet contents", "err", err) + if err := Cleanup(logger, cli.testnet); err != nil { + logger.Error("error cleaning up testnet contents", "err", err) } }() - if err := Setup(cli.testnet); err != nil { + if err := Setup(logger, cli.testnet); err != nil { return err } @@ -319,20 +328,19 @@ Does not run any perbutations. lctx, loadCancel := context.WithCancel(ctx) defer loadCancel() go func() { - err := Load(lctx, r, cli.testnet) - chLoadResult <- err + chLoadResult <- Load(lctx, logger, r, cli.testnet) }() - if err := Start(ctx, cli.testnet); err != nil { + if err := Start(ctx, logger, cli.testnet); err != nil { return err } - if err := Wait(ctx, cli.testnet, 5); err != nil { // allow some txs to go through + if err := Wait(ctx, logger, cli.testnet, 5); err != nil { // allow some txs to go through return err } // we benchmark performance over the next 100 blocks - if err := Benchmark(ctx, cli.testnet, 100); err != nil { + if err := Benchmark(ctx, logger, cli.testnet, 100); err != nil { return err } @@ -349,8 +357,8 @@ Does not run any perbutations. } // Run runs the CLI. -func (cli *CLI) Run() { - if err := cli.root.Execute(); err != nil { +func (cli *CLI) Run(ctx context.Context, logger log.Logger) { + if err := cli.root.ExecuteContext(ctx); err != nil { logger.Error(err.Error()) os.Exit(1) } diff --git a/test/e2e/runner/perturb.go b/test/e2e/runner/perturb.go index ccb3f6c510..acabf7f342 100644 --- a/test/e2e/runner/perturb.go +++ b/test/e2e/runner/perturb.go @@ -5,12 +5,13 @@ import ( "fmt" "time" + "github.com/tendermint/tendermint/libs/log" rpctypes "github.com/tendermint/tendermint/rpc/coretypes" e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) // Perturbs a running testnet. -func Perturb(ctx context.Context, testnet *e2e.Testnet) error { +func Perturb(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error { timer := time.NewTimer(0) // first tick fires immediately; reset below defer timer.Stop() @@ -20,7 +21,7 @@ func Perturb(ctx context.Context, testnet *e2e.Testnet) error { case <-ctx.Done(): return ctx.Err() case <-timer.C: - _, err := PerturbNode(ctx, node, perturbation) + _, err := PerturbNode(ctx, logger, node, perturbation) if err != nil { return err } @@ -35,7 +36,7 @@ func Perturb(ctx context.Context, testnet *e2e.Testnet) error { // PerturbNode perturbs a node with a given perturbation, returning its status // after recovering. -func PerturbNode(ctx context.Context, node *e2e.Node, perturbation e2e.Perturbation) (*rpctypes.ResultStatus, error) { +func PerturbNode(ctx context.Context, logger log.Logger, node *e2e.Node, perturbation e2e.Perturbation) (*rpctypes.ResultStatus, error) { testnet := node.Testnet switch perturbation { case e2e.PerturbationDisconnect: @@ -90,7 +91,7 @@ func PerturbNode(ctx context.Context, node *e2e.Node, perturbation e2e.Perturbat ctx, cancel := context.WithTimeout(ctx, 5*time.Minute) defer cancel() - status, err := waitForNode(ctx, node, 0) + status, err := waitForNode(ctx, logger, node, 0) if err != nil { return nil, err } diff --git a/test/e2e/runner/rpc.go b/test/e2e/runner/rpc.go index 98c786b43f..b7a69168f7 100644 --- a/test/e2e/runner/rpc.go +++ b/test/e2e/runner/rpc.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + "github.com/tendermint/tendermint/libs/log" rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpctypes "github.com/tendermint/tendermint/rpc/coretypes" e2e "github.com/tendermint/tendermint/test/e2e/pkg" @@ -74,9 +75,7 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty clients[node.Name] = client } - wctx, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - result, err := client.Status(wctx) + result, err := client.Status(ctx) if err != nil { continue } @@ -133,7 +132,9 @@ func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*ty } // waitForNode waits for a node to become available and catch up to the given block height. -func waitForNode(ctx context.Context, node *e2e.Node, height int64) (*rpctypes.ResultStatus, error) { +func waitForNode(ctx context.Context, logger log.Logger, node *e2e.Node, height int64) (*rpctypes.ResultStatus, error) { + // If the node is the light client or seed note, we do not check for the last height. + // The light client and seed note can be behind the full node and validator if node.Mode == e2e.ModeSeed { return nil, nil } @@ -173,7 +174,10 @@ func waitForNode(ctx context.Context, node *e2e.Node, height int64) (*rpctypes.R return nil, fmt.Errorf("timed out waiting for %v to reach height %v", node.Name, height) case errors.Is(err, context.Canceled): return nil, err - case err == nil && status.SyncInfo.LatestBlockHeight >= height: + // If the node is the light client, it is not essential to wait for it to catch up, but we must return status info + case err == nil && node.Mode == e2e.ModeLight: + return status, nil + case err == nil && node.Mode != e2e.ModeLight && status.SyncInfo.LatestBlockHeight >= height: return status, nil case counter%500 == 0: switch { diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 0e48d8f883..241908b8af 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -8,7 +8,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -25,6 +24,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/privval" e2e "github.com/tendermint/tendermint/test/e2e/pkg" "github.com/tendermint/tendermint/types" @@ -47,7 +47,7 @@ const ( ) // Setup sets up the testnet configuration. -func Setup(testnet *e2e.Testnet) error { +func Setup(logger log.Logger, testnet *e2e.Testnet) error { logger.Info(fmt.Sprintf("Generating testnet files in %q", testnet.Dir)) err := os.MkdirAll(testnet.Dir, os.ModePerm) @@ -59,7 +59,7 @@ func Setup(testnet *e2e.Testnet) error { if err != nil { return err } - err = ioutil.WriteFile(filepath.Join(testnet.Dir, "docker-compose.yml"), compose, 0644) + err = os.WriteFile(filepath.Join(testnet.Dir, "docker-compose.yml"), compose, 0644) if err != nil { return err } @@ -106,7 +106,7 @@ func Setup(testnet *e2e.Testnet) error { if err != nil { return err } - err = ioutil.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0644) + err = os.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0644) if err != nil { return err } @@ -118,7 +118,10 @@ func Setup(testnet *e2e.Testnet) error { if err != nil { return err } - pv.Save() + err = pv.Save() + if err != nil { + return err + } continue } @@ -137,7 +140,10 @@ func Setup(testnet *e2e.Testnet) error { if err != nil { return err } - pv.Save() + err = pv.Save() + if err != nil { + return err + } } // Set up a dummy validator. Tenderdash requires a file PV even when not used, so we // give it a dummy such that it will fail if it actually tries to use it. @@ -145,7 +151,10 @@ func Setup(testnet *e2e.Testnet) error { if err != nil { return err } - pv.Save() + err = pv.Save() + if err != nil { + return err + } } return nil @@ -289,6 +298,7 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg := config.DefaultConfig() cfg.Moniker = node.Name cfg.ProxyApp = AppAddressTCP + cfg.TxIndex = config.TestTxIndexConfig() if node.LogLevel != "" { cfg.LogLevel = node.LogLevel @@ -297,9 +307,7 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.RPC.ListenAddress = "tcp://0.0.0.0:26657" cfg.RPC.PprofListenAddress = ":6060" cfg.P2P.ExternalAddress = fmt.Sprintf("tcp://%v", node.AddressP2P(false)) - cfg.P2P.AddrBookStrict = false cfg.Consensus.AppHashSize = crypto.DefaultAppHashSize - cfg.P2P.UseLegacy = node.UseLegacyP2P cfg.P2P.QueueType = node.QueueType cfg.DBBackend = node.Database cfg.StateSync.DiscoveryTime = 5 * time.Second @@ -360,16 +368,6 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { return nil, fmt.Errorf("unexpected mode %q", node.Mode) } - if node.Mempool != "" { - cfg.Mempool.Version = node.Mempool - } - - if node.BlockSync == "" { - cfg.BlockSync.Enable = false - } else { - cfg.BlockSync.Version = node.BlockSync - } - switch node.StateSync { case e2e.StateSyncP2P: cfg.StateSync.Enable = true @@ -418,14 +416,13 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { "listen": AppAddressUNIX, "mode": node.Mode, "proxy_port": node.ProxyPort, - "privval_server_type": "dashcore", - "privval_server": PrivvalAddressDashCore, "protocol": "socket", "persist_interval": node.PersistInterval, "snapshot_interval": node.SnapshotInterval, "retain_blocks": node.RetainBlocks, "key_type": bls12381.KeyType, - "use_legacy_p2p": node.UseLegacyP2P, + "privval_server_type": "dashcore", + "privval_server": PrivvalAddressDashCore, } switch node.ABCIProtocol { case e2e.ProtocolUNIX: @@ -528,13 +525,13 @@ func UpdateConfigStateSync(node *e2e.Node, height int64, hash []byte) error { // FIXME Apparently there's no function to simply load a config file without // involving the entire Viper apparatus, so we'll just resort to regexps. - bz, err := ioutil.ReadFile(cfgPath) + bz, err := os.ReadFile(cfgPath) if err != nil { return err } bz = regexp.MustCompile(`(?m)^trust-height =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust-height = %v`, height))) bz = regexp.MustCompile(`(?m)^trust-hash =.*`).ReplaceAll(bz, []byte(fmt.Sprintf(`trust-hash = "%X"`, hash))) - return ioutil.WriteFile(cfgPath, bz, 0644) + return os.WriteFile(cfgPath, bz, 0644) } func newDefaultFilePV(node *e2e.Node, nodeDir string) (*privval.FilePV, error) { diff --git a/test/e2e/runner/start.go b/test/e2e/runner/start.go index 967d2519cf..be9661df3a 100644 --- a/test/e2e/runner/start.go +++ b/test/e2e/runner/start.go @@ -6,10 +6,11 @@ import ( "sort" "time" + "github.com/tendermint/tendermint/libs/log" e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) -func Start(ctx context.Context, testnet *e2e.Testnet) error { +func Start(ctx context.Context, logger log.Logger, testnet *e2e.Testnet) error { if len(testnet.Nodes) == 0 { return fmt.Errorf("no nodes in testnet") } @@ -51,7 +52,7 @@ func Start(ctx context.Context, testnet *e2e.Testnet) error { ctx, cancel := context.WithTimeout(ctx, time.Minute) defer cancel() - _, err := waitForNode(ctx, node, 0) + _, err := waitForNode(ctx, logger, node, 0) return err }(); err != nil { return err @@ -110,7 +111,7 @@ func Start(ctx context.Context, testnet *e2e.Testnet) error { } wctx, wcancel := context.WithTimeout(ctx, 8*time.Minute) - status, err := waitForNode(wctx, node, node.StartAt) + status, err := waitForNode(wctx, logger, node, node.StartAt) if err != nil { wcancel() return err @@ -118,8 +119,17 @@ func Start(ctx context.Context, testnet *e2e.Testnet) error { wcancel() node.HasStarted = true + + var lastNodeHeight int64 + + // If the node is a light client, we fetch its current height + if node.Mode == e2e.ModeLight { + lastNodeHeight = status.LightClientInfo.LastTrustedHeight + } else { + lastNodeHeight = status.SyncInfo.LatestBlockHeight + } logger.Info(fmt.Sprintf("Node %v up on http://127.0.0.1:%v at height %v", - node.Name, node.ProxyPort, status.SyncInfo.LatestBlockHeight)) + node.Name, node.ProxyPort, lastNodeHeight)) } return nil diff --git a/test/e2e/runner/test.go b/test/e2e/runner/test.go index 0748797aa2..2237588a11 100644 --- a/test/e2e/runner/test.go +++ b/test/e2e/runner/test.go @@ -8,8 +8,6 @@ import ( // Test runs test cases under tests/ func Test(testnet *e2e.Testnet) error { - logger.Info("Running tests in ./tests/...") - err := os.Setenv("E2E_MANIFEST", testnet.File) if err != nil { return err diff --git a/test/e2e/runner/wait.go b/test/e2e/runner/wait.go index e3f9550711..b8e8d0d4de 100644 --- a/test/e2e/runner/wait.go +++ b/test/e2e/runner/wait.go @@ -4,21 +4,22 @@ import ( "context" "fmt" + "github.com/tendermint/tendermint/libs/log" e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) // Wait waits for a number of blocks to be produced, and for all nodes to catch // up with it. -func Wait(ctx context.Context, testnet *e2e.Testnet, blocks int64) error { +func Wait(ctx context.Context, logger log.Logger, testnet *e2e.Testnet, blocks int64) error { block, err := getLatestBlock(ctx, testnet) if err != nil { return err } - return WaitUntil(ctx, testnet, block.Height+blocks) + return WaitUntil(ctx, logger, testnet, block.Height+blocks) } // WaitUntil waits until a given height has been reached. -func WaitUntil(ctx context.Context, testnet *e2e.Testnet, height int64) error { +func WaitUntil(ctx context.Context, logger log.Logger, testnet *e2e.Testnet, height int64) error { logger.Info(fmt.Sprintf("Waiting for all nodes to reach height %v...", height)) _, _, err := waitForHeight(ctx, testnet, height) diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index ba82e0e894..ed041e1861 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "math/rand" + "strconv" "testing" "time" @@ -23,7 +24,7 @@ const ( // Tests that any initial state given in genesis has made it into the app. func TestApp_InitialState(t *testing.T) { - testNode(t, func(t *testing.T, node e2e.Node) { + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { if len(node.Testnet.InitialState) == 0 { return } @@ -42,35 +43,37 @@ func TestApp_InitialState(t *testing.T) { // Tests that the app hash (as reported by the app) matches the last // block and the node sync status. func TestApp_Hash(t *testing.T) { - testNode(t, func(t *testing.T, node e2e.Node) { + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { client, err := node.Client() require.NoError(t, err) + info, err := client.ABCIInfo(ctx) require.NoError(t, err) require.NotEmpty(t, info.Response.LastBlockAppHash, "expected app to return app hash") - status, err := client.Status(ctx) - require.NoError(t, err) - require.NotZero(t, status.SyncInfo.LatestBlockHeight) + // In next-block execution, the app hash is stored in the next block + blockHeight := info.Response.LastBlockHeight + 1 - block, err := client.Block(ctx, &info.Response.LastBlockHeight) - require.NoError(t, err) - - if info.Response.LastBlockHeight == block.Block.Height { - require.Equal(t, - fmt.Sprintf("%x", info.Response.LastBlockAppHash), - fmt.Sprintf("%x", block.Block.AppHash.Bytes()), - "app hash does not match last block's app hash") - } + require.Eventually(t, func() bool { + status, err := client.Status(ctx) + require.NoError(t, err) + require.NotZero(t, status.SyncInfo.LatestBlockHeight) + return status.SyncInfo.LatestBlockHeight >= blockHeight + }, 60*time.Second, 500*time.Millisecond) - require.True(t, status.SyncInfo.LatestBlockHeight >= info.Response.LastBlockHeight, - "status out of sync with application") + block, err := client.Block(ctx, &blockHeight) + require.NoError(t, err) + require.Equal(t, blockHeight, block.Block.Height) + require.Equal(t, + fmt.Sprintf("%x", info.Response.LastBlockAppHash), + fmt.Sprintf("%x", block.Block.AppHash.Bytes()), + "app hash does not match last block's app hash") }) } // Tests that the app and blockstore have and report the same height. func TestApp_Height(t *testing.T) { - testNode(t, func(t *testing.T, node e2e.Node) { + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { client, err := node.Client() require.NoError(t, err) info, err := client.ABCIInfo(ctx) @@ -151,7 +154,7 @@ func TestApp_Tx(t *testing.T) { } t.Run(test.Name, func(t *testing.T) { test := testCases[idx] - testNode(t, func(t *testing.T, node e2e.Node) { + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { client, err := node.Client() require.NoError(t, err) @@ -184,3 +187,18 @@ func TestApp_Tx(t *testing.T) { } } + +func TestApp_VoteExtensions(t *testing.T) { + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { + client, err := node.Client() + require.NoError(t, err) + + // This special value should have been created by way of vote extensions + resp, err := client.ABCIQuery(ctx, "", []byte("extensionSum")) + require.NoError(t, err) + + extSum, err := strconv.Atoi(string(resp.Response.Value)) + require.NoError(t, err) + require.GreaterOrEqual(t, extSum, 0) + }) +} diff --git a/test/e2e/tests/block_test.go b/test/e2e/tests/block_test.go index c02bc7c54b..2d9a77efa5 100644 --- a/test/e2e/tests/block_test.go +++ b/test/e2e/tests/block_test.go @@ -1,6 +1,7 @@ package e2e_test import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -12,8 +13,11 @@ import ( // Tests that block headers are identical across nodes where present. func TestBlock_Header(t *testing.T) { - blocks := fetchBlockChain(t) - testNode(t, func(t *testing.T, node e2e.Node) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blocks := fetchBlockChain(ctx, t) + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { client, err := node.Client() require.NoError(t, err) status, err := client.Status(ctx) @@ -60,7 +64,7 @@ func TestBlock_Header(t *testing.T) { // Tests that the node contains the expected block range. func TestBlock_Range(t *testing.T) { - testNode(t, func(t *testing.T, node e2e.Node) { + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { client, err := node.Client() require.NoError(t, err) status, err := client.Status(ctx) diff --git a/test/e2e/tests/e2e_test.go b/test/e2e/tests/e2e_test.go index b1a51d5b81..7b3d829d1c 100644 --- a/test/e2e/tests/e2e_test.go +++ b/test/e2e/tests/e2e_test.go @@ -23,7 +23,6 @@ func init() { } var ( - ctx = context.Background() testnetCache = map[string]e2e.Testnet{} testnetCacheMtx = sync.Mutex{} blocksCache = map[string][]*types.Block{} @@ -38,7 +37,7 @@ var ( // these tests are skipped so that they're not picked up during normal unit // test runs. If E2E_NODE is also set, only the specified node is tested, // otherwise all nodes are tested. -func testNode(t *testing.T, testFunc func(*testing.T, e2e.Node)) { +func testNode(t *testing.T, testFunc func(context.Context, *testing.T, e2e.Node)) { t.Helper() testnet := loadTestnet(t) @@ -62,7 +61,10 @@ func testNode(t *testing.T, testFunc func(*testing.T, e2e.Node)) { } t.Run(node.Name, func(t *testing.T) { - testFunc(t, node) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testFunc(ctx, t, node) }) } } @@ -90,7 +92,7 @@ func loadTestnet(t *testing.T) e2e.Testnet { // fetchBlockChain fetches a complete, up-to-date block history from // the freshest testnet archive node. -func fetchBlockChain(t *testing.T) []*types.Block { +func fetchBlockChain(ctx context.Context, t *testing.T) []*types.Block { t.Helper() testnet := loadTestnet(t) diff --git a/test/e2e/tests/evidence_test.go b/test/e2e/tests/evidence_test.go index f7f2ede790..a757c85503 100644 --- a/test/e2e/tests/evidence_test.go +++ b/test/e2e/tests/evidence_test.go @@ -1,6 +1,7 @@ package e2e_test import ( + "context" "testing" "github.com/stretchr/testify/require" @@ -9,12 +10,15 @@ import ( // assert that all nodes that have blocks at the height of a misbehavior has evidence // for that misbehavior func TestEvidence_Misbehavior(t *testing.T) { - blocks := fetchBlockChain(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blocks := fetchBlockChain(ctx, t) testnet := loadTestnet(t) seenEvidence := 0 for _, block := range blocks { - if len(block.Evidence.Evidence) != 0 { - seenEvidence += len(block.Evidence.Evidence) + if len(block.Evidence) != 0 { + seenEvidence += len(block.Evidence) } } require.Equal(t, testnet.Evidence, seenEvidence, diff --git a/test/e2e/tests/net_test.go b/test/e2e/tests/net_test.go index e6ff27a0e3..71a9584122 100644 --- a/test/e2e/tests/net_test.go +++ b/test/e2e/tests/net_test.go @@ -1,6 +1,7 @@ package e2e_test import ( + "context" "testing" "github.com/stretchr/testify/require" @@ -13,7 +14,7 @@ func TestNet_Peers(t *testing.T) { // FIXME Skip test since nodes aren't always able to fully mesh t.SkipNow() - testNode(t, func(t *testing.T, node e2e.Node) { + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { client, err := node.Client() require.NoError(t, err) netInfo, err := client.NetInfo(ctx) diff --git a/test/e2e/tests/validator_test.go b/test/e2e/tests/validator_test.go index 9ed71ca668..8b07eec1ae 100644 --- a/test/e2e/tests/validator_test.go +++ b/test/e2e/tests/validator_test.go @@ -2,6 +2,7 @@ package e2e_test import ( "bytes" + "context" "testing" "github.com/dashevo/dashd-go/btcjson" @@ -16,7 +17,7 @@ import ( // Tests that validator sets are available and correct according to // scheduled validator updates. func TestValidator_Sets(t *testing.T) { - testNode(t, func(t *testing.T, node e2e.Node) { + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { client, err := node.Client() require.NoError(t, err) status, err := client.Status(ctx) @@ -39,7 +40,7 @@ func TestValidator_Sets(t *testing.T) { } valSchedule := newValidatorSchedule(*node.Testnet) - valSchedule.Increment(first - node.Testnet.InitialHeight) + require.NoError(t, valSchedule.Increment(first-node.Testnet.InitialHeight)) for h := first; h <= last; h++ { validators := []*types.Validator{} @@ -75,7 +76,7 @@ func TestValidator_Sets(t *testing.T) { "incorrect validator set at height %v", h) require.Equal(t, valSchedule.Set.ThresholdPublicKey, thresholdPublicKey, "incorrect thresholdPublicKey at height %v", h) - valSchedule.Increment(1) + require.NoError(t, valSchedule.Increment(1)) } }) } @@ -83,8 +84,11 @@ func TestValidator_Sets(t *testing.T) { // Tests that a validator proposes blocks when it's supposed to. It tolerates some // missed blocks, e.g. due to testnet perturbations. func TestValidator_Propose(t *testing.T) { - blocks := fetchBlockChain(t) - testNode(t, func(t *testing.T, node e2e.Node) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + blocks := fetchBlockChain(ctx, t) + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { if node.Mode != e2e.ModeValidator { return } @@ -100,7 +104,7 @@ func TestValidator_Propose(t *testing.T) { proposeCount++ } } - valSchedule.Increment(1) + require.NoError(t, valSchedule.Increment(1)) } require.False(t, proposeCount == 0 && expectCount > 0, @@ -131,14 +135,12 @@ func newValidatorSchedule(testnet e2e.Testnet) *validatorSchedule { } if v, ok := testnet.ValidatorUpdates[0]; ok { // InitChain validators valMap = v - if t, ok := testnet.ThresholdPublicKeyUpdates[0]; ok { // InitChain threshold public key - thresholdPublicKey = t - } else { + thresholdPublicKey, ok = testnet.ThresholdPublicKeyUpdates[0] + if !ok { // InitChain threshold public key panic("threshold public key must be set for height 0 if validator changes") } - if q, ok := testnet.QuorumHashUpdates[0]; ok { // InitChain threshold public key - quorumHash = q - } else { + quorumHash, ok = testnet.QuorumHashUpdates[0] + if !ok { // InitChain threshold public key panic("quorum hash key must be set for height 0 if validator changes") } } @@ -152,7 +154,7 @@ func newValidatorSchedule(testnet e2e.Testnet) *validatorSchedule { } } -func (s *validatorSchedule) Increment(heights int64) { +func (s *validatorSchedule) Increment(heights int64) error { for i := int64(0); i < heights; i++ { s.height++ if s.height > 2 { @@ -163,7 +165,7 @@ func (s *validatorSchedule) Increment(heights int64) { if quorumHashUpdate, ok := s.quorumHashUpdates[s.height-2]; ok { if bytes.Equal(quorumHashUpdate, s.Set.QuorumHash) { if err := s.Set.UpdateWithChangeSet(makeVals(update), thresholdPublicKeyUpdate, quorumHashUpdate); err != nil { - panic(err) + return err } } else { s.Set = types.NewValidatorSet(makeVals(update), thresholdPublicKeyUpdate, btcjson.LLMQType_5_60, @@ -175,6 +177,7 @@ func (s *validatorSchedule) Increment(heights int64) { } s.Set.IncrementProposerPriority(1) } + return nil } func makeVals(valMap e2e.ValidatorsMap) []*types.Validator { diff --git a/test/fuzz/Makefile b/test/fuzz/Makefile deleted file mode 100644 index 3d34e0a43b..0000000000 --- a/test/fuzz/Makefile +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/make -f - -.PHONY: fuzz-mempool-v1 -fuzz-mempool-v1: - cd mempool/v1 && \ - rm -f *-fuzz.zip && \ - go-fuzz-build && \ - go-fuzz - -.PHONY: fuzz-mempool-v0 -fuzz-mempool-v0: - cd mempool/v0 && \ - rm -f *-fuzz.zip && \ - go-fuzz-build && \ - go-fuzz - -.PHONY: fuzz-p2p-addrbook -fuzz-p2p-addrbook: - cd p2p/addrbook && \ - rm -f *-fuzz.zip && \ - go run ./init-corpus/main.go && \ - go-fuzz-build && \ - go-fuzz - -.PHONY: fuzz-p2p-pex -fuzz-p2p-pex: - cd p2p/pex && \ - rm -f *-fuzz.zip && \ - go run ./init-corpus/main.go && \ - go-fuzz-build && \ - go-fuzz - -.PHONY: fuzz-p2p-sc -fuzz-p2p-sc: - cd p2p/secret_connection && \ - rm -f *-fuzz.zip && \ - go run ./init-corpus/main.go && \ - go-fuzz-build && \ - go-fuzz - -.PHONY: fuzz-rpc-server -fuzz-rpc-server: - cd rpc/jsonrpc/server && \ - rm -f *-fuzz.zip && \ - go-fuzz-build && \ - go-fuzz - -clean: - find . -name corpus -type d -exec rm -rf {} +; - find . -name crashers -type d -exec rm -rf {} +; - find . -name suppressions -type d -exec rm -rf {} +; - find . -name *\.zip -type f -delete diff --git a/test/fuzz/README.md b/test/fuzz/README.md index 707217afd4..11ec9d5216 100644 --- a/test/fuzz/README.md +++ b/test/fuzz/README.md @@ -5,68 +5,18 @@ Fuzzing for various packages in Tendermint using [go-fuzz](https://github.com/dv Inputs: - mempool `CheckTx` (using kvstore in-process ABCI app) -- p2p `Addrbook#AddAddress` -- p2p `pex.Reactor#Receive` - p2p `SecretConnection#Read` and `SecretConnection#Write` - rpc jsonrpc server -## Directory structure - -``` -| test -| |- corpus/ -| |- crashers/ -| |- init-corpus/ -| |- suppressions/ -| |- testdata/ -| |- .go -``` - -`/corpus` directory contains corpus data. The idea is to help the fuzzier to -understand what bytes sequences are semantically valid (e.g. if we're testing -PNG decoder, then we would put black-white PNG into corpus directory; with -blockchain reactor - we would put blockchain messages into corpus). - -`/init-corpus` (if present) contains a script for generating corpus data. - -`/testdata` directory may contain an additional data (like `addrbook.json`). - -Upon running the fuzzier, `/crashers` and `/suppressions` dirs will be created, -along with .zip archive. `/crashers` will show any inputs, which have -lead to panics (plus a trace). `/suppressions` will show any suppressed inputs. - ## Running -```sh -make fuzz-mempool -make fuzz-p2p-addrbook -make fuzz-p2p-pex -make fuzz-p2p-sc -make fuzz-rpc-server -``` - -Each command will create corpus data (if needed), generate a fuzz archive and -call `go-fuzz` executable. - -Then watch out for the respective outputs in the fuzzer output to announce new -crashers which can be found in the directory `crashers`. - -For example if we find +The fuzz tests are in native Go fuzzing format. Use the `go` +tool to run them: ```sh -ls crashers/ -61bde465f47c93254d64d643c3b2480e0a54666e -61bde465f47c93254d64d643c3b2480e0a54666e.output -61bde465f47c93254d64d643c3b2480e0a54666e.quoted -da39a3ee5e6b4b0d3255bfef95601890afd80709 -da39a3ee5e6b4b0d3255bfef95601890afd80709.output -da39a3ee5e6b4b0d3255bfef95601890afd80709.quoted +go test -fuzz Mempool ./tests +go test -fuzz P2PSecretConnection ./tests +go test -fuzz RPCJSONRPCServer ./tests ``` -the crashing bytes generated by the fuzzer will be in -`61bde465f47c93254d64d643c3b2480e0a54666e` the respective crash report in -`61bde465f47c93254d64d643c3b2480e0a54666e.output` - -and the bug report can be created by retrieving the bytes in -`61bde465f47c93254d64d643c3b2480e0a54666e` and feeding those back into the -`Fuzz` function. +See [the Go Fuzzing introduction](https://go.dev/doc/fuzz/) for more information. diff --git a/test/fuzz/mempool/v0/checktx.go b/test/fuzz/mempool/v0/checktx.go deleted file mode 100644 index 62eda97295..0000000000 --- a/test/fuzz/mempool/v0/checktx.go +++ /dev/null @@ -1,37 +0,0 @@ -package v0 - -import ( - "context" - - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" - mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" -) - -var mp mempool.Mempool - -func init() { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - appConnMem, _ := cc() - err := appConnMem.Start() - if err != nil { - panic(err) - } - - cfg := config.DefaultMempoolConfig() - cfg.Broadcast = false - - mp = mempoolv0.NewCListMempool(cfg, appConnMem, 0) -} - -func Fuzz(data []byte) int { - err := mp.CheckTx(context.Background(), data, nil, mempool.TxInfo{}) - if err != nil { - return 0 - } - - return 1 -} diff --git a/test/fuzz/mempool/v0/fuzz_test.go b/test/fuzz/mempool/v0/fuzz_test.go deleted file mode 100644 index 4f8f1e9c8e..0000000000 --- a/test/fuzz/mempool/v0/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package v0_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - mempoolv0 "github.com/tendermint/tendermint/test/fuzz/mempool/v0" -) - -const testdataCasesDir = "testdata/cases" - -func TestMempoolTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := ioutil.ReadAll(f) - require.NoError(t, err) - mempoolv0.Fuzz(input) - }) - } -} diff --git a/test/fuzz/mempool/v0/testdata/cases/empty b/test/fuzz/mempool/v0/testdata/cases/empty deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/fuzz/mempool/v1/checktx.go b/test/fuzz/mempool/v1/checktx.go deleted file mode 100644 index 2ed0b97ff5..0000000000 --- a/test/fuzz/mempool/v1/checktx.go +++ /dev/null @@ -1,37 +0,0 @@ -package v1 - -import ( - "context" - - abciclient "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/mempool" - mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v0" -) - -var mp mempool.Mempool - -func init() { - app := kvstore.NewApplication() - cc := abciclient.NewLocalCreator(app) - appConnMem, _ := cc() - err := appConnMem.Start() - if err != nil { - panic(err) - } - - cfg := config.DefaultMempoolConfig() - cfg.Broadcast = false - - mp = mempoolv1.NewCListMempool(cfg, appConnMem, 0) -} - -func Fuzz(data []byte) int { - err := mp.CheckTx(context.Background(), data, nil, mempool.TxInfo{}) - if err != nil { - return 0 - } - - return 1 -} diff --git a/test/fuzz/mempool/v1/fuzz_test.go b/test/fuzz/mempool/v1/fuzz_test.go deleted file mode 100644 index 863697a0af..0000000000 --- a/test/fuzz/mempool/v1/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package v1_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - mempoolv1 "github.com/tendermint/tendermint/test/fuzz/mempool/v1" -) - -const testdataCasesDir = "testdata/cases" - -func TestMempoolTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := ioutil.ReadAll(f) - require.NoError(t, err) - mempoolv1.Fuzz(input) - }) - } -} diff --git a/test/fuzz/mempool/v1/testdata/cases/empty b/test/fuzz/mempool/v1/testdata/cases/empty deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/fuzz/oss-fuzz-build.sh b/test/fuzz/oss-fuzz-build.sh index 8eb30f5108..836253d4d1 100755 --- a/test/fuzz/oss-fuzz-build.sh +++ b/test/fuzz/oss-fuzz-build.sh @@ -1,14 +1,22 @@ -#!/bin/bash -eu +#!/bin/bash + +set -euo pipefail export FUZZ_ROOT="github.com/tendermint/tendermint" -(cd test/fuzz/p2p/addrbook; go run ./init-corpus/main.go) -compile_go_fuzzer "$FUZZ_ROOT"/test/fuzz/p2p/addrbook Fuzz fuzz_p2p_addrbook fuzz -(cd test/fuzz/p2p/pex; go run ./init-corpus/main.go) -compile_go_fuzzer "$FUZZ_ROOT"/test/fuzz/p2p/pex Fuzz fuzz_p2p_pex fuzz -(cd test/fuzz/p2p/secret_connection; go run ./init-corpus/main.go) -compile_go_fuzzer "$FUZZ_ROOT"/test/fuzz/p2p/secret_connection Fuzz fuzz_p2p_secret_connection fuzz +build_go_fuzzer() { + local function="$1" + local fuzzer="$2" + + gotip run github.com/orijtech/otils/corpus2ossfuzz@latest -o "$OUT"/"$fuzzer"_seed_corpus.zip -corpus test/fuzz/tests/testdata/fuzz/"$function" + compile_native_go_fuzzer "$FUZZ_ROOT"/test/fuzz/tests "$function" "$fuzzer" +} + +gotip get github.com/AdamKorcz/go-118-fuzz-build/utils +gotip get github.com/prometheus/common/expfmt@v0.32.1 + +build_go_fuzzer FuzzP2PSecretConnection fuzz_p2p_secretconnection -compile_go_fuzzer "$FUZZ_ROOT"/test/fuzz/mempool Fuzz fuzz_mempool fuzz +build_go_fuzzer FuzzMempool fuzz_mempool -compile_go_fuzzer "$FUZZ_ROOT"/test/fuzz/rpc/jsonrpc/server Fuzz fuzz_rpc_jsonrpc_server fuzz +build_go_fuzzer FuzzRPCJSONRPCServer fuzz_rpc_jsonrpc_server diff --git a/test/fuzz/p2p/addrbook/fuzz.go b/test/fuzz/p2p/addrbook/fuzz.go deleted file mode 100644 index 6d5548fc73..0000000000 --- a/test/fuzz/p2p/addrbook/fuzz.go +++ /dev/null @@ -1,35 +0,0 @@ -// nolint: gosec -package addrbook - -import ( - "encoding/json" - "fmt" - "math/rand" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" -) - -var addrBook = pex.NewAddrBook("./testdata/addrbook.json", true) - -func Fuzz(data []byte) int { - addr := new(p2p.NetAddress) - if err := json.Unmarshal(data, addr); err != nil { - return -1 - } - - // Fuzz AddAddress. - err := addrBook.AddAddress(addr, addr) - if err != nil { - return 0 - } - - // Also, make sure PickAddress always returns a non-nil address. - bias := rand.Intn(100) - if p := addrBook.PickAddress(bias); p == nil { - panic(fmt.Sprintf("picked a nil address (bias: %d, addrBook size: %v)", - bias, addrBook.Size())) - } - - return 1 -} diff --git a/test/fuzz/p2p/addrbook/fuzz_test.go b/test/fuzz/p2p/addrbook/fuzz_test.go deleted file mode 100644 index 4ec7aebd98..0000000000 --- a/test/fuzz/p2p/addrbook/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package addrbook_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/test/fuzz/p2p/addrbook" -) - -const testdataCasesDir = "testdata/cases" - -func TestAddrbookTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := ioutil.ReadAll(f) - require.NoError(t, err) - addrbook.Fuzz(input) - }) - } -} diff --git a/test/fuzz/p2p/addrbook/init-corpus/main.go b/test/fuzz/p2p/addrbook/init-corpus/main.go deleted file mode 100644 index 1166f9bd74..0000000000 --- a/test/fuzz/p2p/addrbook/init-corpus/main.go +++ /dev/null @@ -1,59 +0,0 @@ -// nolint: gosec -package main - -import ( - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "log" - "net" - "os" - "path/filepath" - - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" -) - -func main() { - baseDir := flag.String("base", ".", `where the "corpus" directory will live`) - flag.Parse() - - initCorpus(*baseDir) -} - -func initCorpus(baseDir string) { - log.SetFlags(0) - - // create "corpus" directory - corpusDir := filepath.Join(baseDir, "corpus") - if err := os.MkdirAll(corpusDir, 0755); err != nil { - log.Fatalf("Creating %q err: %v", corpusDir, err) - } - - // create corpus - privKey := ed25519.GenPrivKey() - addrs := []*p2p.NetAddress{ - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(0, 0, 0, 0), Port: 0}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(127, 0, 0, 0), Port: 80}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(213, 87, 10, 200), Port: 8808}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(111, 111, 111, 111), Port: 26656}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.ParseIP("2001:db8::68"), Port: 26656}, - } - - for i, addr := range addrs { - filename := filepath.Join(corpusDir, fmt.Sprintf("%d.json", i)) - - bz, err := json.Marshal(addr) - if err != nil { - log.Fatalf("can't marshal %v: %v", addr, err) - } - - if err := ioutil.WriteFile(filename, bz, 0644); err != nil { - log.Fatalf("can't write %v to %q: %v", addr, filename, err) - } - - log.Printf("wrote %q", filename) - } -} diff --git a/test/fuzz/p2p/addrbook/testdata/cases/empty b/test/fuzz/p2p/addrbook/testdata/cases/empty deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/test/fuzz/p2p/pex/fuzz_test.go b/test/fuzz/p2p/pex/fuzz_test.go deleted file mode 100644 index 8a194e730c..0000000000 --- a/test/fuzz/p2p/pex/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package pex_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/test/fuzz/p2p/pex" -) - -const testdataCasesDir = "testdata/cases" - -func TestPexTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := ioutil.ReadAll(f) - require.NoError(t, err) - pex.Fuzz(input) - }) - } -} diff --git a/test/fuzz/p2p/pex/init-corpus/main.go b/test/fuzz/p2p/pex/init-corpus/main.go deleted file mode 100644 index e902168647..0000000000 --- a/test/fuzz/p2p/pex/init-corpus/main.go +++ /dev/null @@ -1,84 +0,0 @@ -// nolint: gosec -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "log" - "math/rand" - "os" - "path/filepath" - - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" - tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -func main() { - baseDir := flag.String("base", ".", `where the "corpus" directory will live`) - flag.Parse() - - initCorpus(*baseDir) -} - -func initCorpus(rootDir string) { - log.SetFlags(0) - - corpusDir := filepath.Join(rootDir, "corpus") - if err := os.MkdirAll(corpusDir, 0755); err != nil { - log.Fatalf("Creating %q err: %v", corpusDir, err) - } - sizes := []int{0, 1, 2, 17, 5, 31} - - // Make the PRNG predictable - rand.Seed(10) - - for _, n := range sizes { - var addrs []*p2p.NetAddress - - // IPv4 addresses - for i := 0; i < n; i++ { - privKey := ed25519.GenPrivKey() - addr := fmt.Sprintf( - "%s@%v.%v.%v.%v:26656", - types.NodeIDFromPubKey(privKey.PubKey()), - rand.Int()%256, - rand.Int()%256, - rand.Int()%256, - rand.Int()%256, - ) - netAddr, _ := types.NewNetAddressString(addr) - addrs = append(addrs, netAddr) - } - - // IPv6 addresses - privKey := ed25519.GenPrivKey() - ipv6a, err := types.NewNetAddressString( - fmt.Sprintf("%s@[ff02::1:114]:26656", types.NodeIDFromPubKey(privKey.PubKey()))) - if err != nil { - log.Fatalf("can't create a new netaddress: %v", err) - } - addrs = append(addrs, ipv6a) - - msg := tmp2p.PexMessage{ - Sum: &tmp2p.PexMessage_PexResponse{ - PexResponse: &tmp2p.PexResponse{Addresses: pex.NetAddressesToProto(addrs)}, - }, - } - bz, err := msg.Marshal() - if err != nil { - log.Fatalf("unable to marshal: %v", err) - } - - filename := filepath.Join(rootDir, "corpus", fmt.Sprintf("%d", n)) - - if err := ioutil.WriteFile(filename, bz, 0644); err != nil { - log.Fatalf("can't write %X to %q: %v", bz, filename, err) - } - - log.Printf("wrote %q", filename) - } -} diff --git a/test/fuzz/p2p/pex/reactor_receive.go b/test/fuzz/p2p/pex/reactor_receive.go deleted file mode 100644 index 9582fe813e..0000000000 --- a/test/fuzz/p2p/pex/reactor_receive.go +++ /dev/null @@ -1,95 +0,0 @@ -package pex - -import ( - "net" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" -) - -var ( - pexR *pex.Reactor - peer p2p.Peer - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) -) - -func init() { - addrB := pex.NewAddrBook("./testdata/addrbook1", false) - pexR = pex.NewReactor(addrB, &pex.ReactorConfig{SeedMode: false}) - pexR.SetLogger(logger) - peer = newFuzzPeer() - pexR.AddPeer(peer) - - cfg := config.DefaultP2PConfig() - cfg.PexReactor = true - sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", nil, func(i int, sw *p2p.Switch) *p2p.Switch { - return sw - }, logger) - pexR.SetSwitch(sw) -} - -func Fuzz(data []byte) int { - if len(data) == 0 { - return -1 - } - - pexR.Receive(pex.PexChannel, peer, data) - - if !peer.IsRunning() { - // do not increase priority for msgs which lead to peer being stopped - return 0 - } - - return 1 -} - -type fuzzPeer struct { - *service.BaseService - m map[string]interface{} -} - -var _ p2p.Peer = (*fuzzPeer)(nil) - -func newFuzzPeer() *fuzzPeer { - fp := &fuzzPeer{m: make(map[string]interface{})} - fp.BaseService = service.NewBaseService(nil, "fuzzPeer", fp) - return fp -} - -var privKey = ed25519.GenPrivKey() -var nodeID = types.NodeIDFromPubKey(privKey.PubKey()) -var defaultNodeInfo = types.NodeInfo{ - ProtocolVersion: types.ProtocolVersion{ - P2P: version.P2PProtocol, - Block: version.BlockProtocol, - App: 0, - }, - NodeID: nodeID, - ListenAddr: "127.0.0.1:0", - Moniker: "foo1", -} - -func (fp *fuzzPeer) FlushStop() {} -func (fp *fuzzPeer) ID() types.NodeID { return nodeID } -func (fp *fuzzPeer) RemoteIP() net.IP { return net.IPv4(198, 163, 190, 214) } -func (fp *fuzzPeer) RemoteAddr() net.Addr { - return &net.TCPAddr{IP: fp.RemoteIP(), Port: 26656, Zone: ""} -} -func (fp *fuzzPeer) IsOutbound() bool { return false } -func (fp *fuzzPeer) IsPersistent() bool { return false } -func (fp *fuzzPeer) CloseConn() error { return nil } -func (fp *fuzzPeer) NodeInfo() types.NodeInfo { return defaultNodeInfo } -func (fp *fuzzPeer) Status() p2p.ConnectionStatus { var cs p2p.ConnectionStatus; return cs } -func (fp *fuzzPeer) SocketAddr() *p2p.NetAddress { - return types.NewNetAddress(fp.ID(), fp.RemoteAddr()) -} -func (fp *fuzzPeer) Send(byte, []byte) bool { return true } -func (fp *fuzzPeer) TrySend(byte, []byte) bool { return true } -func (fp *fuzzPeer) Set(key string, value interface{}) { fp.m[key] = value } -func (fp *fuzzPeer) Get(key string) interface{} { return fp.m[key] } diff --git a/test/fuzz/p2p/pex/testdata/addrbook1 b/test/fuzz/p2p/pex/testdata/addrbook1 deleted file mode 100644 index acf3e721d9..0000000000 --- a/test/fuzz/p2p/pex/testdata/addrbook1 +++ /dev/null @@ -1,1705 +0,0 @@ -{ - "Key": "badd73ebd4eeafbaefc01e0c", - "Addrs": [ - { - "Addr": { - "IP": "233.174.138.192", - "Port": 48186 - }, - "Src": { - "IP": "198.37.90.115", - "Port": 29492 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692278-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 55 - ] - }, - { - "Addr": { - "IP": "181.28.96.104", - "Port": 26776 - }, - "Src": { - "IP": "183.12.35.241", - "Port": 26794 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692289-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 18 - ] - }, - { - "Addr": { - "IP": "141.85.194.118", - "Port": 39768 - }, - "Src": { - "IP": "120.130.90.63", - "Port": 61750 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692383-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 185 - ] - }, - { - "Addr": { - "IP": "167.72.9.155", - "Port": 9542 - }, - "Src": { - "IP": "95.158.40.108", - "Port": 14929 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692604-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 250 - ] - }, - { - "Addr": { - "IP": "124.118.94.27", - "Port": 50333 - }, - "Src": { - "IP": "208.169.57.96", - "Port": 19754 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692046-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 0 - ] - }, - { - "Addr": { - "IP": "158.197.4.226", - "Port": 25979 - }, - "Src": { - "IP": "3.129.219.107", - "Port": 50374 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692211-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 174 - ] - }, - { - "Addr": { - "IP": "170.42.135.37", - "Port": 34524 - }, - "Src": { - "IP": "73.125.53.212", - "Port": 49691 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692241-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 14 - ] - }, - { - "Addr": { - "IP": "234.69.254.147", - "Port": 31885 - }, - "Src": { - "IP": "167.106.61.34", - "Port": 22187 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692609-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 213 - ] - }, - { - "Addr": { - "IP": "32.176.173.90", - "Port": 17250 - }, - "Src": { - "IP": "118.91.243.12", - "Port": 26781 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692273-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 35 - ] - }, - { - "Addr": { - "IP": "162.154.114.145", - "Port": 13875 - }, - "Src": { - "IP": "198.178.108.166", - "Port": 59623 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692373-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 216 - ] - }, - { - "Addr": { - "IP": "67.128.167.93", - "Port": 50513 - }, - "Src": { - "IP": "104.93.115.28", - "Port": 48298 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692399-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 18 - ] - }, - { - "Addr": { - "IP": "132.175.221.206", - "Port": 61037 - }, - "Src": { - "IP": "112.49.189.65", - "Port": 56186 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692422-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 31 - ] - }, - { - "Addr": { - "IP": "155.49.24.238", - "Port": 26261 - }, - "Src": { - "IP": "97.10.121.246", - "Port": 8694 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692473-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 23 - ] - }, - { - "Addr": { - "IP": "22.215.7.233", - "Port": 32487 - }, - "Src": { - "IP": "214.236.105.23", - "Port": 26870 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692572-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 255 - ] - }, - { - "Addr": { - "IP": "253.170.228.231", - "Port": 5002 - }, - "Src": { - "IP": "225.49.137.209", - "Port": 16908 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692619-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 161 - ] - }, - { - "Addr": { - "IP": "162.126.204.39", - "Port": 62618 - }, - "Src": { - "IP": "250.214.168.131", - "Port": 3237 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.69203-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 7 - ] - }, - { - "Addr": { - "IP": "83.154.228.215", - "Port": 23508 - }, - "Src": { - "IP": "66.33.77.170", - "Port": 52207 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692153-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 8 - ] - }, - { - "Addr": { - "IP": "132.49.63.65", - "Port": 53651 - }, - "Src": { - "IP": "250.164.163.212", - "Port": 8612 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692253-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 98 - ] - }, - { - "Addr": { - "IP": "200.168.34.12", - "Port": 61901 - }, - "Src": { - "IP": "133.185.186.115", - "Port": 14186 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692488-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 214 - ] - }, - { - "Addr": { - "IP": "31.93.45.219", - "Port": 61036 - }, - "Src": { - "IP": "176.191.214.170", - "Port": 33402 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692024-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 68 - ] - }, - { - "Addr": { - "IP": "250.189.27.93", - "Port": 51665 - }, - "Src": { - "IP": "93.161.116.107", - "Port": 53482 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692196-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 42 - ] - }, - { - "Addr": { - "IP": "50.7.17.126", - "Port": 64300 - }, - "Src": { - "IP": "233.234.64.214", - "Port": 61061 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692444-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 13 - ] - }, - { - "Addr": { - "IP": "88.85.81.64", - "Port": 34834 - }, - "Src": { - "IP": "4.240.150.250", - "Port": 63064 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692248-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 195 - ] - }, - { - "Addr": { - "IP": "242.117.244.198", - "Port": 4363 - }, - "Src": { - "IP": "149.29.34.42", - "Port": 62567 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692263-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 174 - ] - }, - { - "Addr": { - "IP": "245.155.175.114", - "Port": 37262 - }, - "Src": { - "IP": "75.85.36.49", - "Port": 7101 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692313-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 135 - ] - }, - { - "Addr": { - "IP": "224.184.241.26", - "Port": 55870 - }, - "Src": { - "IP": "52.15.194.216", - "Port": 4733 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692327-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 74 - ] - }, - { - "Addr": { - "IP": "43.178.26.188", - "Port": 55914 - }, - "Src": { - "IP": "103.250.250.35", - "Port": 1566 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692577-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 65 - ] - }, - { - "Addr": { - "IP": "102.117.172.117", - "Port": 35855 - }, - "Src": { - "IP": "114.152.204.187", - "Port": 21156 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692158-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 80 - ] - }, - { - "Addr": { - "IP": "39.33.41.199", - "Port": 51600 - }, - "Src": { - "IP": "119.65.88.38", - "Port": 41239 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692188-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 24 - ] - }, - { - "Addr": { - "IP": "63.164.56.227", - "Port": 1660 - }, - "Src": { - "IP": "169.54.47.92", - "Port": 2818 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692227-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 10 - ] - }, - { - "Addr": { - "IP": "50.183.223.115", - "Port": 26910 - }, - "Src": { - "IP": "115.98.199.4", - "Port": 8767 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692201-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 65 - ] - }, - { - "Addr": { - "IP": "132.94.203.167", - "Port": 53156 - }, - "Src": { - "IP": "17.195.234.168", - "Port": 29405 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692294-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 240 - ] - }, - { - "Addr": { - "IP": "135.194.230.212", - "Port": 14340 - }, - "Src": { - "IP": "160.2.241.10", - "Port": 36553 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692363-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 63 - ] - }, - { - "Addr": { - "IP": "116.53.200.25", - "Port": 27092 - }, - "Src": { - "IP": "219.104.163.247", - "Port": 50476 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692543-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 201 - ] - }, - { - "Addr": { - "IP": "125.77.44.185", - "Port": 55291 - }, - "Src": { - "IP": "77.15.232.117", - "Port": 6934 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692589-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 8 - ] - }, - { - "Addr": { - "IP": "27.221.35.172", - "Port": 26418 - }, - "Src": { - "IP": "252.18.49.70", - "Port": 9835 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692068-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 80 - ] - }, - { - "Addr": { - "IP": "133.225.167.135", - "Port": 59468 - }, - "Src": { - "IP": "110.223.163.74", - "Port": 22576 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.69213-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 164 - ] - }, - { - "Addr": { - "IP": "155.131.178.240", - "Port": 60476 - }, - "Src": { - "IP": "143.82.157.1", - "Port": 43821 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692173-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 34 - ] - }, - { - "Addr": { - "IP": "207.13.48.52", - "Port": 28549 - }, - "Src": { - "IP": "238.224.177.29", - "Port": 44100 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692594-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 113 - ] - }, - { - "Addr": { - "IP": "91.137.2.184", - "Port": 44887 - }, - "Src": { - "IP": "72.131.70.84", - "Port": 29960 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692627-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 199 - ] - }, - { - "Addr": { - "IP": "169.59.252.76", - "Port": 57711 - }, - "Src": { - "IP": "194.132.91.119", - "Port": 18037 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692478-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 124 - ] - }, - { - "Addr": { - "IP": "25.174.143.229", - "Port": 41540 - }, - "Src": { - "IP": "58.215.132.148", - "Port": 64950 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692534-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 88 - ] - }, - { - "Addr": { - "IP": "71.239.78.239", - "Port": 46938 - }, - "Src": { - "IP": "156.98.186.169", - "Port": 32046 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692116-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 154 - ] - }, - { - "Addr": { - "IP": "94.137.107.61", - "Port": 20756 - }, - "Src": { - "IP": "101.201.138.179", - "Port": 22877 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692414-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 233 - ] - }, - { - "Addr": { - "IP": "216.62.174.112", - "Port": 60162 - }, - "Src": { - "IP": "225.114.119.144", - "Port": 1575 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692464-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 132 - ] - }, - { - "Addr": { - "IP": "65.183.81.125", - "Port": 17511 - }, - "Src": { - "IP": "12.96.14.61", - "Port": 42308 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692308-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 153 - ] - }, - { - "Addr": { - "IP": "142.26.87.52", - "Port": 41967 - }, - "Src": { - "IP": "60.124.157.139", - "Port": 20727 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692321-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 29 - ] - }, - { - "Addr": { - "IP": "13.77.198.44", - "Port": 54508 - }, - "Src": { - "IP": "142.73.70.174", - "Port": 19525 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692553-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 170 - ] - }, - { - "Addr": { - "IP": "63.192.219.12", - "Port": 46603 - }, - "Src": { - "IP": "26.136.66.29", - "Port": 38924 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692558-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 203 - ] - }, - { - "Addr": { - "IP": "120.82.251.151", - "Port": 43723 - }, - "Src": { - "IP": "136.104.122.219", - "Port": 47452 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692599-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 103 - ] - }, - { - "Addr": { - "IP": "74.79.96.159", - "Port": 46646 - }, - "Src": { - "IP": "218.60.242.116", - "Port": 5361 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692145-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 121 - ] - }, - { - "Addr": { - "IP": "194.65.211.174", - "Port": 43464 - }, - "Src": { - "IP": "87.5.112.153", - "Port": 56348 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692163-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 96 - ] - }, - { - "Addr": { - "IP": "237.158.179.80", - "Port": 32231 - }, - "Src": { - "IP": "210.240.52.244", - "Port": 29142 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692183-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 27 - ] - }, - { - "Addr": { - "IP": "81.157.122.4", - "Port": 9917 - }, - "Src": { - "IP": "213.226.144.152", - "Port": 29950 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692614-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 33 - ] - }, - { - "Addr": { - "IP": "180.147.73.220", - "Port": 367 - }, - "Src": { - "IP": "32.229.253.215", - "Port": 62165 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692529-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 240 - ] - }, - { - "Addr": { - "IP": "83.110.235.17", - "Port": 33231 - }, - "Src": { - "IP": "230.54.162.85", - "Port": 51569 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692563-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 234 - ] - }, - { - "Addr": { - "IP": "100.252.20.2", - "Port": 1633 - }, - "Src": { - "IP": "52.136.47.198", - "Port": 31916 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692644-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 254 - ] - }, - { - "Addr": { - "IP": "74.5.247.79", - "Port": 18703 - }, - "Src": { - "IP": "200.247.68.128", - "Port": 55844 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692378-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 160 - ] - }, - { - "Addr": { - "IP": "17.220.231.87", - "Port": 59015 - }, - "Src": { - "IP": "54.207.49.4", - "Port": 17877 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692404-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 21 - ] - }, - { - "Addr": { - "IP": "156.194.57.127", - "Port": 18944 - }, - "Src": { - "IP": "154.94.235.84", - "Port": 61610 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692439-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 32 - ] - }, - { - "Addr": { - "IP": "137.57.172.158", - "Port": 32031 - }, - "Src": { - "IP": "144.160.225.126", - "Port": 43225 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692568-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 4 - ] - }, - { - "Addr": { - "IP": "101.220.101.200", - "Port": 26480 - }, - "Src": { - "IP": "130.225.42.1", - "Port": 2522 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692637-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 173 - ] - }, - { - "Addr": { - "IP": "136.233.185.164", - "Port": 34011 - }, - "Src": { - "IP": "112.127.216.43", - "Port": 55317 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692649-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 106 - ] - }, - { - "Addr": { - "IP": "101.189.107.148", - "Port": 28671 - }, - "Src": { - "IP": "213.55.140.235", - "Port": 2547 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692178-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 72 - ] - }, - { - "Addr": { - "IP": "61.190.60.64", - "Port": 58467 - }, - "Src": { - "IP": "206.86.120.31", - "Port": 54422 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692358-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 191 - ] - }, - { - "Addr": { - "IP": "227.51.127.223", - "Port": 52754 - }, - "Src": { - "IP": "124.24.12.47", - "Port": 59878 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692393-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 122 - ] - }, - { - "Addr": { - "IP": "101.19.152.238", - "Port": 47491 - }, - "Src": { - "IP": "211.30.216.184", - "Port": 17610 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692135-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 251 - ] - }, - { - "Addr": { - "IP": "182.198.35.238", - "Port": 15065 - }, - "Src": { - "IP": "239.67.104.149", - "Port": 43039 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692268-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 179 - ] - }, - { - "Addr": { - "IP": "233.12.68.51", - "Port": 47544 - }, - "Src": { - "IP": "203.224.119.48", - "Port": 23337 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692454-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 51 - ] - }, - { - "Addr": { - "IP": "181.30.35.80", - "Port": 500 - }, - "Src": { - "IP": "174.200.32.161", - "Port": 10174 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692503-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 69 - ] - }, - { - "Addr": { - "IP": "49.104.89.21", - "Port": 54774 - }, - "Src": { - "IP": "245.95.238.161", - "Port": 14339 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692654-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 120 - ] - }, - { - "Addr": { - "IP": "65.150.169.199", - "Port": 11589 - }, - "Src": { - "IP": "150.110.183.207", - "Port": 17694 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692041-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 182 - ] - }, - { - "Addr": { - "IP": "84.203.198.48", - "Port": 47122 - }, - "Src": { - "IP": "141.209.147.221", - "Port": 26085 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692056-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 74 - ] - }, - { - "Addr": { - "IP": "220.10.106.180", - "Port": 27439 - }, - "Src": { - "IP": "124.170.244.46", - "Port": 5249 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692125-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 166 - ] - }, - { - "Addr": { - "IP": "120.208.32.34", - "Port": 27224 - }, - "Src": { - "IP": "64.194.118.103", - "Port": 24388 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.69251-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 149 - ] - }, - { - "Addr": { - "IP": "245.182.67.231", - "Port": 58067 - }, - "Src": { - "IP": "62.108.238.220", - "Port": 41851 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692522-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 201 - ] - }, - { - "Addr": { - "IP": "50.81.160.105", - "Port": 8113 - }, - "Src": { - "IP": "129.187.68.121", - "Port": 58612 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692284-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 94 - ] - }, - { - "Addr": { - "IP": "101.116.47.155", - "Port": 20287 - }, - "Src": { - "IP": "94.34.167.170", - "Port": 41821 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692299-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 134 - ] - }, - { - "Addr": { - "IP": "159.253.213.86", - "Port": 5222 - }, - "Src": { - "IP": "124.47.162.125", - "Port": 45742 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692429-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 25 - ] - }, - { - "Addr": { - "IP": "124.72.81.213", - "Port": 35723 - }, - "Src": { - "IP": "201.65.186.55", - "Port": 26602 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692493-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 199 - ] - }, - { - "Addr": { - "IP": "77.216.197.130", - "Port": 49129 - }, - "Src": { - "IP": "245.160.14.27", - "Port": 38908 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692517-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 58 - ] - }, - { - "Addr": { - "IP": "175.46.154.0", - "Port": 15297 - }, - "Src": { - "IP": "6.10.7.13", - "Port": 9657 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692583-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 89 - ] - }, - { - "Addr": { - "IP": "176.71.131.235", - "Port": 14342 - }, - "Src": { - "IP": "1.36.215.198", - "Port": 21709 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692206-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 135 - ] - }, - { - "Addr": { - "IP": "34.211.134.186", - "Port": 31608 - }, - "Src": { - "IP": "187.87.12.183", - "Port": 32977 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692221-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 71 - ] - }, - { - "Addr": { - "IP": "238.63.227.107", - "Port": 49502 - }, - "Src": { - "IP": "185.51.127.143", - "Port": 22728 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692483-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 61 - ] - }, - { - "Addr": { - "IP": "160.65.76.45", - "Port": 27307 - }, - "Src": { - "IP": "170.175.198.16", - "Port": 44759 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692051-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 36 - ] - }, - { - "Addr": { - "IP": "152.22.79.90", - "Port": 25861 - }, - "Src": { - "IP": "216.183.31.190", - "Port": 9185 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692409-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 163 - ] - }, - { - "Addr": { - "IP": "200.2.175.37", - "Port": 57270 - }, - "Src": { - "IP": "108.20.254.94", - "Port": 32812 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692434-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 96 - ] - }, - { - "Addr": { - "IP": "111.16.237.10", - "Port": 45200 - }, - "Src": { - "IP": "215.82.246.115", - "Port": 42333 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692469-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 21 - ] - }, - { - "Addr": { - "IP": "166.217.195.221", - "Port": 4579 - }, - "Src": { - "IP": "148.153.131.183", - "Port": 13848 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692498-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 78 - ] - }, - { - "Addr": { - "IP": "1.226.156.147", - "Port": 61660 - }, - "Src": { - "IP": "169.138.16.69", - "Port": 23455 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692548-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 121 - ] - }, - { - "Addr": { - "IP": "108.209.27.58", - "Port": 59102 - }, - "Src": { - "IP": "140.27.139.90", - "Port": 52154 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692014-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 53 - ] - }, - { - "Addr": { - "IP": "221.244.202.95", - "Port": 5032 - }, - "Src": { - "IP": "230.152.141.80", - "Port": 19457 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692168-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 106 - ] - }, - { - "Addr": { - "IP": "55.87.1.138", - "Port": 39686 - }, - "Src": { - "IP": "55.22.167.132", - "Port": 35663 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692258-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 174 - ] - }, - { - "Addr": { - "IP": "209.53.148.74", - "Port": 18502 - }, - "Src": { - "IP": "195.108.121.25", - "Port": 16730 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692304-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 180 - ] - }, - { - "Addr": { - "IP": "21.66.206.236", - "Port": 10771 - }, - "Src": { - "IP": "236.195.50.16", - "Port": 30697 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692368-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 22 - ] - }, - { - "Addr": { - "IP": "190.87.236.91", - "Port": 58378 - }, - "Src": { - "IP": "72.224.218.34", - "Port": 44817 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692459-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 127 - ] - }, - { - "Addr": { - "IP": "197.172.79.170", - "Port": 24958 - }, - "Src": { - "IP": "71.22.4.12", - "Port": 28558 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692036-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 251 - ] - }, - { - "Addr": { - "IP": "160.176.234.94", - "Port": 47013 - }, - "Src": { - "IP": "212.172.24.59", - "Port": 29594 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692062-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 99 - ] - }, - { - "Addr": { - "IP": "170.206.180.18", - "Port": 26212 - }, - "Src": { - "IP": "228.135.62.18", - "Port": 26164 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692234-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 34 - ] - } - ] -} diff --git a/test/fuzz/p2p/secretconnection/fuzz_test.go b/test/fuzz/p2p/secretconnection/fuzz_test.go deleted file mode 100644 index d48dc42670..0000000000 --- a/test/fuzz/p2p/secretconnection/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package secretconnection_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/test/fuzz/p2p/secretconnection" -) - -const testdataCasesDir = "testdata/cases" - -func TestSecretConnectionTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := ioutil.ReadAll(f) - require.NoError(t, err) - secretconnection.Fuzz(input) - }) - } -} diff --git a/test/fuzz/p2p/secretconnection/init-corpus/main.go b/test/fuzz/p2p/secretconnection/init-corpus/main.go deleted file mode 100644 index 635f2d99f9..0000000000 --- a/test/fuzz/p2p/secretconnection/init-corpus/main.go +++ /dev/null @@ -1,48 +0,0 @@ -// nolint: gosec -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" -) - -func main() { - baseDir := flag.String("base", ".", `where the "corpus" directory will live`) - flag.Parse() - - initCorpus(*baseDir) -} - -func initCorpus(baseDir string) { - log.SetFlags(0) - - corpusDir := filepath.Join(baseDir, "corpus") - if err := os.MkdirAll(corpusDir, 0755); err != nil { - log.Fatal(err) - } - - data := []string{ - "dadc04c2-cfb1-4aa9-a92a-c0bf780ec8b6", - "", - " ", - " a ", - `{"a": 12, "tsp": 999, k: "blue"}`, - `9999.999`, - `""`, - `Tendermint fuzzing`, - } - - for i, datum := range data { - filename := filepath.Join(corpusDir, fmt.Sprintf("%d", i)) - - if err := ioutil.WriteFile(filename, []byte(datum), 0644); err != nil { - log.Fatalf("can't write %v to %q: %v", datum, filename, err) - } - - log.Printf("wrote %q", filename) - } -} diff --git a/test/fuzz/rpc/jsonrpc/server/fuzz_test.go b/test/fuzz/rpc/jsonrpc/server/fuzz_test.go deleted file mode 100644 index 50b9194fec..0000000000 --- a/test/fuzz/rpc/jsonrpc/server/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package server_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/test/fuzz/rpc/jsonrpc/server" -) - -const testdataCasesDir = "testdata/cases" - -func TestServerTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := ioutil.ReadAll(f) - require.NoError(t, err) - server.Fuzz(input) - }) - } -} diff --git a/test/fuzz/rpc/jsonrpc/server/handler.go b/test/fuzz/rpc/jsonrpc/server/handler.go deleted file mode 100644 index 08f7e2b6b8..0000000000 --- a/test/fuzz/rpc/jsonrpc/server/handler.go +++ /dev/null @@ -1,63 +0,0 @@ -package server - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "net/http/httptest" - - "github.com/tendermint/tendermint/libs/log" - rs "github.com/tendermint/tendermint/rpc/jsonrpc/server" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" -) - -var rpcFuncMap = map[string]*rs.RPCFunc{ - "c": rs.NewRPCFunc(func(s string, i int) (string, int) { return "foo", 200 }, "s,i", false), -} -var mux *http.ServeMux - -func init() { - mux = http.NewServeMux() - lgr := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) - rs.RegisterRPCFuncs(mux, rpcFuncMap, lgr) -} - -func Fuzz(data []byte) int { - if len(data) == 0 { - return -1 - } - - req, _ := http.NewRequest("POST", "http://localhost/", bytes.NewReader(data)) - rec := httptest.NewRecorder() - mux.ServeHTTP(rec, req) - res := rec.Result() - blob, err := ioutil.ReadAll(res.Body) - if err != nil { - panic(err) - } - if err := res.Body.Close(); err != nil { - panic(err) - } - if len(blob) == 0 { - return 1 - } - - if outputJSONIsSlice(blob) { - recv := []types.RPCResponse{} - if err := json.Unmarshal(blob, &recv); err != nil { - panic(err) - } - return 1 - } - recv := &types.RPCResponse{} - if err := json.Unmarshal(blob, recv); err != nil { - panic(err) - } - return 1 -} - -func outputJSONIsSlice(input []byte) bool { - slice := []interface{}{} - return json.Unmarshal(input, &slice) == nil -} diff --git a/test/fuzz/rpc/jsonrpc/server/testdata/1184f5b8d4b6dd08709cf1513f26744167065e0d b/test/fuzz/rpc/jsonrpc/server/testdata/1184f5b8d4b6dd08709cf1513f26744167065e0d deleted file mode 100644 index 6e7ea636ee..0000000000 --- a/test/fuzz/rpc/jsonrpc/server/testdata/1184f5b8d4b6dd08709cf1513f26744167065e0d +++ /dev/null @@ -1 +0,0 @@ -[0] \ No newline at end of file diff --git a/test/fuzz/rpc/jsonrpc/server/testdata/cases/1184f5b8d4b6dd08709cf1513f26744167065e0d b/test/fuzz/rpc/jsonrpc/server/testdata/cases/1184f5b8d4b6dd08709cf1513f26744167065e0d deleted file mode 100644 index 6e7ea636ee..0000000000 --- a/test/fuzz/rpc/jsonrpc/server/testdata/cases/1184f5b8d4b6dd08709cf1513f26744167065e0d +++ /dev/null @@ -1 +0,0 @@ -[0] \ No newline at end of file diff --git a/test/fuzz/rpc/jsonrpc/server/testdata/cases/bbcffb1cdb2cea50fd3dd8c1524905551d0b2e79 b/test/fuzz/rpc/jsonrpc/server/testdata/cases/bbcffb1cdb2cea50fd3dd8c1524905551d0b2e79 deleted file mode 100644 index e0be2aa4b8..0000000000 --- a/test/fuzz/rpc/jsonrpc/server/testdata/cases/bbcffb1cdb2cea50fd3dd8c1524905551d0b2e79 +++ /dev/null @@ -1 +0,0 @@ -[0,0] \ No newline at end of file diff --git a/test/fuzz/rpc/jsonrpc/server/testdata/cases/clusterfuzz-testcase-minimized-fuzz_rpc_jsonrpc_server-4738572803506176 b/test/fuzz/rpc/jsonrpc/server/testdata/cases/clusterfuzz-testcase-minimized-fuzz_rpc_jsonrpc_server-4738572803506176 deleted file mode 100644 index 0f7836d2fb..0000000000 --- a/test/fuzz/rpc/jsonrpc/server/testdata/cases/clusterfuzz-testcase-minimized-fuzz_rpc_jsonrpc_server-4738572803506176 +++ /dev/null @@ -1 +0,0 @@ -[{"iD":7},{"iD":7}] \ No newline at end of file diff --git a/test/fuzz/rpc/jsonrpc/server/testdata/clusterfuzz-testcase-minimized-fuzz_rpc_jsonrpc_server-4738572803506176 b/test/fuzz/rpc/jsonrpc/server/testdata/clusterfuzz-testcase-minimized-fuzz_rpc_jsonrpc_server-4738572803506176 deleted file mode 100644 index 0f7836d2fb..0000000000 --- a/test/fuzz/rpc/jsonrpc/server/testdata/clusterfuzz-testcase-minimized-fuzz_rpc_jsonrpc_server-4738572803506176 +++ /dev/null @@ -1 +0,0 @@ -[{"iD":7},{"iD":7}] \ No newline at end of file diff --git a/test/fuzz/tests/mempool_test.go b/test/fuzz/tests/mempool_test.go new file mode 100644 index 0000000000..2c86230366 --- /dev/null +++ b/test/fuzz/tests/mempool_test.go @@ -0,0 +1,33 @@ +//go:build gofuzz || go1.18 + +package tests + +import ( + "context" + "testing" + + abciclient "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/libs/log" +) + +func FuzzMempool(f *testing.F) { + app := kvstore.NewApplication() + logger := log.NewNopLogger() + conn := abciclient.NewLocalClient(logger, app) + err := conn.Start(context.TODO()) + if err != nil { + panic(err) + } + + cfg := config.DefaultMempoolConfig() + cfg.Broadcast = false + + mp := mempool.NewTxMempool(logger, cfg, conn) + + f.Fuzz(func(t *testing.T, data []byte) { + _ = mp.CheckTx(context.Background(), data, nil, mempool.TxInfo{}) + }) +} diff --git a/test/fuzz/p2p/secretconnection/read_write.go b/test/fuzz/tests/p2p_secretconnection_test.go similarity index 92% rename from test/fuzz/p2p/secretconnection/read_write.go rename to test/fuzz/tests/p2p_secretconnection_test.go index 9701460f50..65f268a7bf 100644 --- a/test/fuzz/p2p/secretconnection/read_write.go +++ b/test/fuzz/tests/p2p_secretconnection_test.go @@ -1,19 +1,28 @@ -package secretconnection +//go:build gofuzz || go1.18 + +package tests import ( "bytes" "fmt" "io" "log" + "testing" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/internal/libs/async" sc "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/async" ) -func Fuzz(data []byte) int { +func FuzzP2PSecretConnection(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + fuzz(data) + }) +} + +func fuzz(data []byte) { if len(data) == 0 { - return -1 + return } fooConn, barConn := makeSecretConnPair() @@ -44,14 +53,11 @@ func Fuzz(data []byte) int { } copy(dataRead[totalRead:], buf[:m]) totalRead += m - log.Printf("total read: %d", totalRead) } if !bytes.Equal(data, dataRead) { panic("bytes written != read") } - - return 1 } type kvstoreConn struct { diff --git a/test/fuzz/tests/rpc_jsonrpc_server_test.go b/test/fuzz/tests/rpc_jsonrpc_server_test.go new file mode 100644 index 0000000000..67dee9ef28 --- /dev/null +++ b/test/fuzz/tests/rpc_jsonrpc_server_test.go @@ -0,0 +1,72 @@ +//go:build gofuzz || go1.18 + +package tests + +import ( + "bytes" + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "testing" + + "github.com/tendermint/tendermint/libs/log" + rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" + "github.com/tendermint/tendermint/rpc/jsonrpc/types" +) + +func FuzzRPCJSONRPCServer(f *testing.F) { + type args struct { + S string `json:"s"` + I int `json:"i"` + } + var rpcFuncMap = map[string]*rpcserver.RPCFunc{ + "c": rpcserver.NewRPCFunc(func(context.Context, *args) (string, error) { + return "foo", nil + }), + } + + mux := http.NewServeMux() + rpcserver.RegisterRPCFuncs(mux, rpcFuncMap, log.NewNopLogger()) + f.Fuzz(func(t *testing.T, data []byte) { + if len(data) == 0 { + return + } + + req, err := http.NewRequest("POST", "http://localhost/", bytes.NewReader(data)) + if err != nil { + panic(err) + } + rec := httptest.NewRecorder() + mux.ServeHTTP(rec, req) + res := rec.Result() + blob, err := io.ReadAll(res.Body) + if err != nil { + panic(err) + } + if err := res.Body.Close(); err != nil { + panic(err) + } + if len(blob) == 0 { + return + } + + if outputJSONIsSlice(blob) { + var recv []types.RPCResponse + if err := json.Unmarshal(blob, &recv); err != nil { + panic(err) + } + return + } + var recv types.RPCResponse + if err := json.Unmarshal(blob, &recv); err != nil { + panic(err) + } + }) +} + +func outputJSONIsSlice(input []byte) bool { + var slice []json.RawMessage + return json.Unmarshal(input, &slice) == nil +} diff --git a/test/fuzz/tests/testdata/fuzz/FuzzMempool/1daffc1033a0bfc7f0c2bccb7440674e67a9e2cc0a4531863076254ada059863 b/test/fuzz/tests/testdata/fuzz/FuzzMempool/1daffc1033a0bfc7f0c2bccb7440674e67a9e2cc0a4531863076254ada059863 new file mode 100644 index 0000000000..88467017a6 --- /dev/null +++ b/test/fuzz/tests/testdata/fuzz/FuzzMempool/1daffc1033a0bfc7f0c2bccb7440674e67a9e2cc0a4531863076254ada059863 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("S1") diff --git a/test/fuzz/tests/testdata/fuzz/FuzzMempool/582528ddfad69eb57775199a43e0f9fd5c94bba343ce7bb6724d4ebafe311ed4 b/test/fuzz/tests/testdata/fuzz/FuzzMempool/582528ddfad69eb57775199a43e0f9fd5c94bba343ce7bb6724d4ebafe311ed4 new file mode 100644 index 0000000000..a96f5599e6 --- /dev/null +++ b/test/fuzz/tests/testdata/fuzz/FuzzMempool/582528ddfad69eb57775199a43e0f9fd5c94bba343ce7bb6724d4ebafe311ed4 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("0") diff --git a/test/fuzz/tests/testdata/fuzz/FuzzMempool/d40a98862ed393eb712e47a91bcef18e6f24cf368bb4bd248c7a7101ef8e178d b/test/fuzz/tests/testdata/fuzz/FuzzMempool/d40a98862ed393eb712e47a91bcef18e6f24cf368bb4bd248c7a7101ef8e178d new file mode 100644 index 0000000000..e0f2da225c --- /dev/null +++ b/test/fuzz/tests/testdata/fuzz/FuzzMempool/d40a98862ed393eb712e47a91bcef18e6f24cf368bb4bd248c7a7101ef8e178d @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("") \ No newline at end of file diff --git a/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/0f1a3d10e4d642e42a3ccd9bad652d355431f5824327271aca6f648e8cd4e786 b/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/0f1a3d10e4d642e42a3ccd9bad652d355431f5824327271aca6f648e8cd4e786 new file mode 100644 index 0000000000..f0b8ea88be --- /dev/null +++ b/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/0f1a3d10e4d642e42a3ccd9bad652d355431f5824327271aca6f648e8cd4e786 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte(" ") \ No newline at end of file diff --git a/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/172c521d1c5e7a5cce55e39b235928fc1c8c4adbb4635913c204c4724cf47d20 b/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/172c521d1c5e7a5cce55e39b235928fc1c8c4adbb4635913c204c4724cf47d20 new file mode 100644 index 0000000000..a3668a6db3 --- /dev/null +++ b/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/172c521d1c5e7a5cce55e39b235928fc1c8c4adbb4635913c204c4724cf47d20 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("{\"a\": 12, \"tsp\": 999, k: \"blue\"}") \ No newline at end of file diff --git a/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/a9481542b8154bfe8fe868c8907cb66557347cb9b45709b17da861997d7cabea b/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/a9481542b8154bfe8fe868c8907cb66557347cb9b45709b17da861997d7cabea new file mode 100644 index 0000000000..98241189c1 --- /dev/null +++ b/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/a9481542b8154bfe8fe868c8907cb66557347cb9b45709b17da861997d7cabea @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("\"\"") \ No newline at end of file diff --git a/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/ba3758980fe724f83bdf1cb97caa73657b4a78d48e5fd6fc3b1590d24799e803 b/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/ba3758980fe724f83bdf1cb97caa73657b4a78d48e5fd6fc3b1590d24799e803 new file mode 100644 index 0000000000..c479f26049 --- /dev/null +++ b/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/ba3758980fe724f83bdf1cb97caa73657b4a78d48e5fd6fc3b1590d24799e803 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("9999.999") \ No newline at end of file diff --git a/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/c22ff3cdf5145a03ecc6a2c18a7ec4eb3c9e1384af92cfa14cf50951535b6c85 b/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/c22ff3cdf5145a03ecc6a2c18a7ec4eb3c9e1384af92cfa14cf50951535b6c85 new file mode 100644 index 0000000000..280f15bf79 --- /dev/null +++ b/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/c22ff3cdf5145a03ecc6a2c18a7ec4eb3c9e1384af92cfa14cf50951535b6c85 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte(" a ") \ No newline at end of file diff --git a/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/d40a98862ed393eb712e47a91bcef18e6f24cf368bb4bd248c7a7101ef8e178d b/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/d40a98862ed393eb712e47a91bcef18e6f24cf368bb4bd248c7a7101ef8e178d new file mode 100644 index 0000000000..e0f2da225c --- /dev/null +++ b/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/d40a98862ed393eb712e47a91bcef18e6f24cf368bb4bd248c7a7101ef8e178d @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("") \ No newline at end of file diff --git a/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/dc7304b2cddeadd08647d30b1d027f749960376c338e14a81e0396ffc6e6d6bd b/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/dc7304b2cddeadd08647d30b1d027f749960376c338e14a81e0396ffc6e6d6bd new file mode 100644 index 0000000000..017f8d03fc --- /dev/null +++ b/test/fuzz/tests/testdata/fuzz/FuzzP2PSecretConnection/dc7304b2cddeadd08647d30b1d027f749960376c338e14a81e0396ffc6e6d6bd @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("Tendermint fuzzing") \ No newline at end of file diff --git a/test/fuzz/tests/testdata/fuzz/FuzzRPCJSONRPCServer/058ae08103537df220789dea46edb8b7cf7368e90da0cb35888a1452f4d114a2 b/test/fuzz/tests/testdata/fuzz/FuzzRPCJSONRPCServer/058ae08103537df220789dea46edb8b7cf7368e90da0cb35888a1452f4d114a2 new file mode 100644 index 0000000000..53742f182c --- /dev/null +++ b/test/fuzz/tests/testdata/fuzz/FuzzRPCJSONRPCServer/058ae08103537df220789dea46edb8b7cf7368e90da0cb35888a1452f4d114a2 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("[{\"iD\":7},{\"iD\":7}]") \ No newline at end of file diff --git a/test/fuzz/tests/testdata/fuzz/FuzzRPCJSONRPCServer/2ab633cb322fca9e76fc965b430076844ebd0b3c4f30f5263b94a3d50f00bce6 b/test/fuzz/tests/testdata/fuzz/FuzzRPCJSONRPCServer/2ab633cb322fca9e76fc965b430076844ebd0b3c4f30f5263b94a3d50f00bce6 new file mode 100644 index 0000000000..ef2bd593af --- /dev/null +++ b/test/fuzz/tests/testdata/fuzz/FuzzRPCJSONRPCServer/2ab633cb322fca9e76fc965b430076844ebd0b3c4f30f5263b94a3d50f00bce6 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("[0,0]") \ No newline at end of file diff --git a/test/fuzz/tests/testdata/fuzz/FuzzRPCJSONRPCServer/aadb440fa55da05c1185e3e64b33c804d994cce06781e8c39481411793a8a73f b/test/fuzz/tests/testdata/fuzz/FuzzRPCJSONRPCServer/aadb440fa55da05c1185e3e64b33c804d994cce06781e8c39481411793a8a73f new file mode 100644 index 0000000000..fb9f339634 --- /dev/null +++ b/test/fuzz/tests/testdata/fuzz/FuzzRPCJSONRPCServer/aadb440fa55da05c1185e3e64b33c804d994cce06781e8c39481411793a8a73f @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("[0]") \ No newline at end of file diff --git a/test/fuzz/tests/testdata/fuzz/FuzzRPCJSONRPCServer/d40a98862ed393eb712e47a91bcef18e6f24cf368bb4bd248c7a7101ef8e178d b/test/fuzz/tests/testdata/fuzz/FuzzRPCJSONRPCServer/d40a98862ed393eb712e47a91bcef18e6f24cf368bb4bd248c7a7101ef8e178d new file mode 100644 index 0000000000..e0f2da225c --- /dev/null +++ b/test/fuzz/tests/testdata/fuzz/FuzzRPCJSONRPCServer/d40a98862ed393eb712e47a91bcef18e6f24cf368bb4bd248c7a7101ef8e178d @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("") \ No newline at end of file diff --git a/third_party/proto/gogoproto/gogo.proto b/third_party/proto/gogoproto/gogo.proto deleted file mode 100644 index 27960ecfbe..0000000000 --- a/third_party/proto/gogoproto/gogo.proto +++ /dev/null @@ -1,147 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copied from https://github.com/gogo/protobuf/blob/master/gogoproto/gogo.proto -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; -package gogoproto; - -import "google/protobuf/descriptor.proto"; - -option java_package = "com.google.protobuf"; -option java_outer_classname = "GoGoProtos"; -option go_package = "github.com/gogo/protobuf/gogoproto"; - -extend google.protobuf.EnumOptions { - optional bool goproto_enum_prefix = 62001; - optional bool goproto_enum_stringer = 62021; - optional bool enum_stringer = 62022; - optional string enum_customname = 62023; - optional bool enumdecl = 62024; -} - -extend google.protobuf.EnumValueOptions { - optional string enumvalue_customname = 66001; -} - -extend google.protobuf.FileOptions { - optional bool goproto_getters_all = 63001; - optional bool goproto_enum_prefix_all = 63002; - optional bool goproto_stringer_all = 63003; - optional bool verbose_equal_all = 63004; - optional bool face_all = 63005; - optional bool gostring_all = 63006; - optional bool populate_all = 63007; - optional bool stringer_all = 63008; - optional bool onlyone_all = 63009; - - optional bool equal_all = 63013; - optional bool description_all = 63014; - optional bool testgen_all = 63015; - optional bool benchgen_all = 63016; - optional bool marshaler_all = 63017; - optional bool unmarshaler_all = 63018; - optional bool stable_marshaler_all = 63019; - - optional bool sizer_all = 63020; - - optional bool goproto_enum_stringer_all = 63021; - optional bool enum_stringer_all = 63022; - - optional bool unsafe_marshaler_all = 63023; - optional bool unsafe_unmarshaler_all = 63024; - - optional bool goproto_extensions_map_all = 63025; - optional bool goproto_unrecognized_all = 63026; - optional bool gogoproto_import = 63027; - optional bool protosizer_all = 63028; - optional bool compare_all = 63029; - optional bool typedecl_all = 63030; - optional bool enumdecl_all = 63031; - - optional bool goproto_registration = 63032; - optional bool messagename_all = 63033; - - optional bool goproto_sizecache_all = 63034; - optional bool goproto_unkeyed_all = 63035; -} - -extend google.protobuf.MessageOptions { - optional bool goproto_getters = 64001; - optional bool goproto_stringer = 64003; - optional bool verbose_equal = 64004; - optional bool face = 64005; - optional bool gostring = 64006; - optional bool populate = 64007; - optional bool stringer = 67008; - optional bool onlyone = 64009; - - optional bool equal = 64013; - optional bool description = 64014; - optional bool testgen = 64015; - optional bool benchgen = 64016; - optional bool marshaler = 64017; - optional bool unmarshaler = 64018; - optional bool stable_marshaler = 64019; - - optional bool sizer = 64020; - - optional bool unsafe_marshaler = 64023; - optional bool unsafe_unmarshaler = 64024; - - optional bool goproto_extensions_map = 64025; - optional bool goproto_unrecognized = 64026; - - optional bool protosizer = 64028; - optional bool compare = 64029; - - optional bool typedecl = 64030; - - optional bool messagename = 64033; - - optional bool goproto_sizecache = 64034; - optional bool goproto_unkeyed = 64035; -} - -extend google.protobuf.FieldOptions { - optional bool nullable = 65001; - optional bool embed = 65002; - optional string customtype = 65003; - optional string customname = 65004; - optional string jsontag = 65005; - optional string moretags = 65006; - optional string casttype = 65007; - optional string castkey = 65008; - optional string castvalue = 65009; - - optional bool stdtime = 65010; - optional bool stdduration = 65011; - optional bool wktpointer = 65012; - - optional string castrepeated = 65013; -} \ No newline at end of file diff --git a/tools/proto/Dockerfile b/tools/proto/Dockerfile deleted file mode 100644 index 5008226904..0000000000 --- a/tools/proto/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -FROM bufbuild/buf:latest as buf - -FROM golang:1.14-alpine3.11 as builder - -RUN apk add --update --no-cache build-base curl git upx && \ - rm -rf /var/cache/apk/* - -ENV GOLANG_PROTOBUF_VERSION=1.3.1 \ - GOGO_PROTOBUF_VERSION=1.3.2 - -RUN GO111MODULE=on go get \ - github.com/golang/protobuf/protoc-gen-go@v${GOLANG_PROTOBUF_VERSION} \ - github.com/gogo/protobuf/protoc-gen-gogo@v${GOGO_PROTOBUF_VERSION} \ - github.com/gogo/protobuf/protoc-gen-gogofaster@v${GOGO_PROTOBUF_VERSION} && \ - mv /go/bin/protoc-gen-go* /usr/local/bin/ - - -FROM alpine:edge - -WORKDIR /work - -RUN echo 'http://dl-cdn.alpinelinux.org/alpine/edge/testing' >> /etc/apk/repositories && \ - apk add --update --no-cache clang && \ - rm -rf /var/cache/apk/* - -COPY --from=builder /usr/local/bin /usr/local/bin -COPY --from=buf /usr/local/bin /usr/local/bin diff --git a/tools/tm-signer-harness/Dockerfile b/tools/tm-signer-harness/Dockerfile deleted file mode 100644 index c1df615722..0000000000 --- a/tools/tm-signer-harness/Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -ARG TENDERMINT_VERSION=latest -FROM dashpay/tenderdash:${TENDERMINT_VERSION} - -COPY tm-signer-harness /usr/bin/tm-signer-harness diff --git a/tools/tm-signer-harness/Makefile b/tools/tm-signer-harness/Makefile deleted file mode 100644 index 37aefbb40c..0000000000 --- a/tools/tm-signer-harness/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -.PHONY: build install docker-image - -TENDERMINT_VERSION?=latest -BUILD_TAGS?='tenderdash' -VERSION := $(shell git describe --always) -BUILD_FLAGS = -ldflags "-X github.com/dashevo/tenderdash/version.TMCoreSemVer=$(VERSION) - -.DEFAULT_GOAL := build - -build: - CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o ../../build/tm-signer-harness main.go - -install: - CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) . - -docker-image: - GOOS=linux GOARCH=amd64 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o tm-signer-harness main.go - docker build \ - --build-arg TENDERMINT_VERSION=$(TENDERMINT_VERSION) \ - -t tendermint/tm-signer-harness:$(TENDERMINT_VERSION) . - rm -rf tm-signer-harness diff --git a/tools/tm-signer-harness/README.md b/tools/tm-signer-harness/README.md deleted file mode 100644 index 7add3a9977..0000000000 --- a/tools/tm-signer-harness/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# tm-signer-harness - -See the [`tm-signer-harness` -documentation](https://tendermint.com/docs/tools/remote-signer-validation.html) -for more details. diff --git a/tools/tm-signer-harness/internal/test_harness.go b/tools/tm-signer-harness/internal/test_harness.go deleted file mode 100644 index 3dcfd72fa9..0000000000 --- a/tools/tm-signer-harness/internal/test_harness.go +++ /dev/null @@ -1,443 +0,0 @@ -package internal - -import ( - "bytes" - "context" - "fmt" - "net" - "os" - "os/signal" - "time" - - "github.com/dashevo/dashd-go/btcjson" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/tmhash" - "github.com/tendermint/tendermint/internal/state" - "github.com/tendermint/tendermint/libs/log" - tmnet "github.com/tendermint/tendermint/libs/net" - tmos "github.com/tendermint/tendermint/libs/os" - "github.com/tendermint/tendermint/privval" - tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" -) - -// Test harness error codes (which act as exit codes when the test harness fails). -const ( - NoError int = iota // 0 - ErrInvalidParameters // 1 - ErrMaxAcceptRetriesReached // 2 - ErrFailedToLoadGenesisFile // 3 - ErrFailedToCreateListener // 4 - ErrFailedToStartListener // 5 - ErrInterrupted // 6 - ErrOther // 7 - ErrTestPublicKeyFailed // 8 - ErrTestSignProposalFailed // 9 - ErrTestSignVoteFailed // 10 -) - -var voteTypes = []tmproto.SignedMsgType{tmproto.PrevoteType, tmproto.PrecommitType} - -// TestHarnessError allows us to keep track of which exit code should be used -// when exiting the main program. -type TestHarnessError struct { - Code int // The exit code to return - Err error // The original error - Info string // Any additional information -} - -var _ error = (*TestHarnessError)(nil) - -// TestHarness allows for testing of a remote signer to ensure compatibility -// with this version of Tendermint. -type TestHarness struct { - addr string - signerClient *privval.SignerClient - fpv *privval.FilePV - chainID string - quorumHash crypto.QuorumHash - acceptRetries int - logger log.Logger - exitWhenComplete bool - exitCode int -} - -// TestHarnessConfig provides configuration to set up a remote signer test -// harness. -type TestHarnessConfig struct { - BindAddr string - - KeyFile string - StateFile string - GenesisFile string - - AcceptDeadline time.Duration - ConnDeadline time.Duration - AcceptRetries int - - SecretConnKey ed25519.PrivKey - - ExitWhenComplete bool // Whether or not to call os.Exit when the harness has completed. -} - -// timeoutError can be used to check if an error returned from the netp package -// was due to a timeout. -type timeoutError interface { - Timeout() bool -} - -// NewTestHarness will load Tendermint data from the given files (including -// validator public/private keypairs and chain details) and create a new -// harness. -func NewTestHarness(logger log.Logger, cfg TestHarnessConfig) (*TestHarness, error) { - keyFile := ExpandPath(cfg.KeyFile) - stateFile := ExpandPath(cfg.StateFile) - logger.Info("Loading private validator configuration", "keyFile", keyFile, "stateFile", stateFile) - // NOTE: LoadFilePV ultimately calls os.Exit on failure. No error will be - // returned if this call fails. - fpv, err := privval.LoadFilePV(keyFile, stateFile) - if err != nil { - return nil, err - } - - genesisFile := ExpandPath(cfg.GenesisFile) - logger.Info("Loading chain ID from genesis file", "genesisFile", genesisFile) - st, err := state.MakeGenesisDocFromFile(genesisFile) - if err != nil { - return nil, newTestHarnessError(ErrFailedToLoadGenesisFile, err, genesisFile) - } - logger.Info("Loaded genesis file", "chainID", st.ChainID) - - spv, err := newTestHarnessListener(logger, cfg) - if err != nil { - return nil, newTestHarnessError(ErrFailedToCreateListener, err, "") - } - - signerClient, err := privval.NewSignerClient(spv, st.ChainID) - if err != nil { - return nil, newTestHarnessError(ErrFailedToCreateListener, err, "") - } - - return &TestHarness{ - addr: cfg.BindAddr, - signerClient: signerClient, - fpv: fpv, - chainID: st.ChainID, - quorumHash: st.QuorumHash, - acceptRetries: cfg.AcceptRetries, - logger: logger, - exitWhenComplete: cfg.ExitWhenComplete, - exitCode: 0, - }, nil -} - -// Run will execute the tests associated with this test harness. The intention -// here is to call this from one's `main` function, as the way it succeeds or -// fails at present is to call os.Exit() with an exit code related to the error -// that caused the tests to fail, or exit code 0 on success. -func (th *TestHarness) Run() { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt) - go func() { - for sig := range c { - th.logger.Info("Caught interrupt, terminating...", "sig", sig) - th.Shutdown(newTestHarnessError(ErrInterrupted, nil, "")) - } - }() - - th.logger.Info("Starting test harness") - accepted := false - var startErr error - - for acceptRetries := th.acceptRetries; acceptRetries > 0; acceptRetries-- { - th.logger.Info("Attempting to accept incoming connection", "acceptRetries", acceptRetries) - - if err := th.signerClient.WaitForConnection(10 * time.Millisecond); err != nil { - // if it wasn't a timeout error - if _, ok := err.(timeoutError); !ok { - th.logger.Error("Failed to start listener", "err", err) - th.Shutdown(newTestHarnessError(ErrFailedToStartListener, err, "")) - // we need the return statements in case this is being run - // from a unit test - otherwise this function will just die - // when os.Exit is called - return - } - startErr = err - } else { - th.logger.Info("Accepted external connection") - accepted = true - break - } - } - if !accepted { - th.logger.Error("Maximum accept retries reached", "acceptRetries", th.acceptRetries) - th.Shutdown(newTestHarnessError(ErrMaxAcceptRetriesReached, startErr, "")) - return - } - - // Run the tests - if err := th.TestPublicKey(); err != nil { - th.Shutdown(err) - return - } - if err := th.TestSignProposal(); err != nil { - th.Shutdown(err) - return - } - if err := th.TestSignVote(); err != nil { - th.Shutdown(err) - return - } - th.logger.Info("SUCCESS! All tests passed.") - th.Shutdown(nil) -} - -// TestPublicKey just validates that we can (1) fetch the public key from the -// remote signer, and (2) it matches the public key we've configured for our -// local Tendermint version. -func (th *TestHarness) TestPublicKey() error { - th.logger.Info("TEST: Public key of remote signer") - fpvk, err := th.fpv.GetPubKey(context.Background(), th.quorumHash) - if err != nil { - return err - } - th.logger.Info("Local", "pubKey", fpvk) - sck, err := th.signerClient.GetPubKey(context.Background(), th.quorumHash) - if err != nil { - return err - } - th.logger.Info("Remote", "pubKey", sck) - if !bytes.Equal(fpvk.Bytes(), sck.Bytes()) { - th.logger.Error("FAILED: Local and remote public keys do not match") - return newTestHarnessError(ErrTestPublicKeyFailed, nil, "") - } - return nil -} - -// TestSignProposal makes sure the remote signer can successfully sign -// proposals. -func (th *TestHarness) TestSignProposal() error { - th.logger.Info("TEST: Signing of proposals") - // sha256 hash of "hash" - hash := tmhash.Sum([]byte("hash")) - prop := &types.Proposal{ - Type: tmproto.ProposalType, - Height: 100, - CoreChainLockedHeight: 1, - Round: 0, - POLRound: -1, - BlockID: types.BlockID{ - Hash: hash, - PartSetHeader: types.PartSetHeader{ - Hash: hash, - Total: 1000000, - }, - }, - Timestamp: time.Now(), - } - p := prop.ToProto() - propSignID := types.ProposalBlockSignID(th.chainID, p, btcjson.LLMQType_5_60, th.quorumHash) - if _, err := th.signerClient.SignProposal(context.Background(), th.chainID, btcjson.LLMQType_5_60, th.quorumHash, p); err != nil { - th.logger.Error("FAILED: Signing of proposal", "err", err) - return newTestHarnessError(ErrTestSignProposalFailed, err, "") - } - prop.Signature = p.Signature - th.logger.Debug("Signed proposal", "prop", prop) - // first check that it's a basically valid proposal - if err := prop.ValidateBasic(); err != nil { - th.logger.Error("FAILED: Signed proposal is invalid", "err", err) - return newTestHarnessError(ErrTestSignProposalFailed, err, "") - } - sck, err := th.signerClient.GetPubKey(context.Background(), th.quorumHash) - if err != nil { - return err - } - // now validate the signature on the proposal - if sck.VerifySignatureDigest(propSignID, prop.Signature) { - th.logger.Info("Successfully validated proposal signature") - } else { - th.logger.Error("FAILED: Proposal signature validation failed") - return newTestHarnessError(ErrTestSignProposalFailed, nil, "signature validation failed") - } - return nil -} - -// TestSignVote makes sure the remote signer can successfully sign all kinds of -// votes. -func (th *TestHarness) TestSignVote() error { - th.logger.Info("TEST: Signing of votes") - for _, voteType := range voteTypes { - th.logger.Info("Testing vote type", "type", voteType) - hash := tmhash.Sum([]byte("hash")) - vote := &types.Vote{ - Type: voteType, - Height: 101, - Round: 0, - BlockID: types.BlockID{ - Hash: hash, - PartSetHeader: types.PartSetHeader{ - Hash: hash, - Total: 1000000, - }, - }, - ValidatorIndex: 0, - ValidatorProTxHash: tmhash.Sum([]byte("pro_tx_hash")), - } - - stateID := types.RandStateID().WithHeight(vote.Height - 1) - - v := vote.ToProto() - - voteBlockID := types.VoteBlockSignID(th.chainID, v, btcjson.LLMQType_5_60, th.quorumHash) - stateIDSignID := stateID.SignID(th.chainID, btcjson.LLMQType_5_60, th.quorumHash) - // sign the vote - if err := th.signerClient.SignVote(context.Background(), th.chainID, btcjson.LLMQType_5_60, th.quorumHash, v, stateID, nil); err != nil { - th.logger.Error("FAILED: Signing of vote", "err", err) - return newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf("voteType=%d", voteType)) - } - vote.BlockSignature = v.BlockSignature - vote.StateSignature = v.StateSignature - th.logger.Debug("Signed vote", "vote", vote) - // validate the contents of the vote - if err := vote.ValidateBasic(); err != nil { - th.logger.Error("FAILED: Signed vote is invalid", "err", err) - return newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf("voteType=%d", voteType)) - } - sck, err := th.signerClient.GetPubKey(context.Background(), th.quorumHash) - if err != nil { - return err - } - - // now validate the signature on the proposal - if sck.VerifySignatureDigest(voteBlockID, vote.BlockSignature) { - th.logger.Info("Successfully validated vote signature", "type", voteType) - } else { - th.logger.Error("FAILED: Vote signature validation failed", "type", voteType) - return newTestHarnessError(ErrTestSignVoteFailed, nil, "signature validation failed") - } - - if sck.VerifySignatureDigest(stateIDSignID, vote.StateSignature) { - th.logger.Info("Successfully validated vote signature", "type", voteType) - } else { - th.logger.Error("FAILED: Vote signature validation failed", "type", voteType) - return newTestHarnessError(ErrTestSignVoteFailed, nil, "signature validation failed") - } - } - return nil -} - -// Shutdown will kill the test harness and attempt to close all open sockets -// gracefully. If the supplied error is nil, it is assumed that the exit code -// should be 0. If err is not nil, it will exit with an exit code related to the -// error. -func (th *TestHarness) Shutdown(err error) { - var exitCode int - - if err == nil { - exitCode = NoError - } else if therr, ok := err.(*TestHarnessError); ok { - exitCode = therr.Code - } else { - exitCode = ErrOther - } - th.exitCode = exitCode - - // in case sc.Stop() takes too long - if th.exitWhenComplete { - go func() { - time.Sleep(time.Duration(5) * time.Second) - th.logger.Error("Forcibly exiting program after timeout") - os.Exit(exitCode) - }() - } - - err = th.signerClient.Close() - if err != nil { - th.logger.Error("Failed to cleanly stop listener: %s", err.Error()) - } - - if th.exitWhenComplete { - os.Exit(exitCode) - } -} - -// newTestHarnessListener creates our client instance which we will use for testing. -func newTestHarnessListener(logger log.Logger, cfg TestHarnessConfig) (*privval.SignerListenerEndpoint, error) { - proto, addr := tmnet.ProtocolAndAddress(cfg.BindAddr) - if proto == "unix" { - // make sure the socket doesn't exist - if so, try to delete it - if tmos.FileExists(addr) { - if err := os.Remove(addr); err != nil { - logger.Error("Failed to remove existing Unix domain socket", "addr", addr) - return nil, err - } - } - } - ln, err := net.Listen(proto, addr) - if err != nil { - return nil, err - } - logger.Info("Listening", "proto", proto, "addr", addr) - var svln net.Listener - switch proto { - case "unix": - unixLn := privval.NewUnixListener(ln) - privval.UnixListenerTimeoutAccept(cfg.AcceptDeadline)(unixLn) - privval.UnixListenerTimeoutReadWrite(cfg.ConnDeadline)(unixLn) - svln = unixLn - case "tcp": - tcpLn := privval.NewTCPListener(ln, cfg.SecretConnKey) - privval.TCPListenerTimeoutAccept(cfg.AcceptDeadline)(tcpLn) - privval.TCPListenerTimeoutReadWrite(cfg.ConnDeadline)(tcpLn) - logger.Info("Resolved TCP address for listener", "addr", tcpLn.Addr()) - svln = tcpLn - default: - _ = ln.Close() - logger.Error("Unsupported protocol (must be unix:// or tcp://)", "proto", proto) - return nil, newTestHarnessError(ErrInvalidParameters, nil, fmt.Sprintf("Unsupported protocol: %s", proto)) - } - return privval.NewSignerListenerEndpoint(logger, svln), nil -} - -func newTestHarnessError(code int, err error, info string) *TestHarnessError { - return &TestHarnessError{ - Code: code, - Err: err, - Info: info, - } -} - -func (e *TestHarnessError) Error() string { - var msg string - switch e.Code { - case ErrInvalidParameters: - msg = "Invalid parameters supplied to application" - case ErrMaxAcceptRetriesReached: - msg = "Maximum accept retries reached" - case ErrFailedToLoadGenesisFile: - msg = "Failed to load genesis file" - case ErrFailedToCreateListener: - msg = "Failed to create listener" - case ErrFailedToStartListener: - msg = "Failed to start listener" - case ErrInterrupted: - msg = "Interrupted" - case ErrTestPublicKeyFailed: - msg = "Public key validation test failed" - case ErrTestSignProposalFailed: - msg = "Proposal signing validation test failed" - case ErrTestSignVoteFailed: - msg = "Vote signing validation test failed" - default: - msg = "Unknown error" - } - if len(e.Info) > 0 { - msg = fmt.Sprintf("%s: %s", msg, e.Info) - } - if e.Err != nil { - msg = fmt.Sprintf("%s (original error: %s)", msg, e.Err.Error()) - } - return msg -} diff --git a/tools/tm-signer-harness/internal/test_harness_test.go b/tools/tm-signer-harness/internal/test_harness_test.go deleted file mode 100644 index 349314fa07..0000000000 --- a/tools/tm-signer-harness/internal/test_harness_test.go +++ /dev/null @@ -1,256 +0,0 @@ -package internal - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "testing" - "time" - - "github.com/tendermint/tendermint/crypto/bls12381" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/types" -) - -const ( - keyFileContents = `{ - "private_keys" : { - "28405D978AE15B97876411212E3ABD66515A285D901ACE06758DC1012030DA07" : { - "pub_key": { - "type": "tendermint/PubKeyBLS12381", - "value": "F5BjXeh0DppqaxX7a3LzoWr6CXPZcZeba6VHYdbiUCxQ23b00mFD8FRZpCz9Ug1E" - }, - "priv_key": { - "type": "tendermint/PrivKeyBLS12381", - "value": "RokcLOxJWTyBkh5HPbdIACng/B65M8a5PYH1Nw6xn70=" - }, - "threshold_public_key": { - "type": "tendermint/PubKeyBLS12381", - "value": "F5BjXeh0DppqaxX7a3LzoWr6CXPZcZeba6VHYdbiUCxQ23b00mFD8FRZpCz9Ug1E" - } - } - }, - "update_heights":{}, - "first_height_of_quorums":{}, - "pro_tx_hash": "51BF39CC1F41B9FC63DFA5B1EDF3F0CA3AD5CAFAE4B12B4FE9263B08BB50C45F" -}` - - stateFileContents = `{ - "height": "0", - "round": 0, - "step": 0 -}` - - genesisFileContents = `{ - "genesis_time": "2019-01-15T11:56:34.8963Z", - "chain_id": "test-chain-0XwP5E", - "consensus_params": { - "block": { - "max_bytes": "22020096", - "max_gas": "-1", - "time_iota_ms": "1000" - }, - "evidence": { - "max_age_num_blocks": "100000", - "max_age_duration": "172800000000000", - "max_num": 50 - }, - "validator": { - "pub_key_types": [ - "bls12381" - ] - } - }, - "validators": [ - { - "pub_key": { - "type": "tendermint/PubKeyBLS12381", - "value": "F5BjXeh0DppqaxX7a3LzoWr6CXPZcZeba6VHYdbiUCxQ23b00mFD8FRZpCz9Ug1E" - }, - "power": "100", - "name": "", - "pro_tx_hash": "51BF39CC1F41B9FC63DFA5B1EDF3F0CA3AD5CAFAE4B12B4FE9263B08BB50C45F" - } - ], - "quorum_hash": "28405D978AE15B97876411212E3ABD66515A285D901ACE06758DC1012030DA07", - "threshold_public_key": { - "type": "tendermint/PubKeyBLS12381", - "value": "F5BjXeh0DppqaxX7a3LzoWr6CXPZcZeba6VHYdbiUCxQ23b00mFD8FRZpCz9Ug1E" - }, - "app_hash": "" -}` - - defaultConnDeadline = 100 -) - -func TestRemoteSignerTestHarnessMaxAcceptRetriesReached(t *testing.T) { - cfg := makeConfig(t, 1, 2) - defer cleanup(cfg) - - th, err := NewTestHarness(log.TestingLogger(), cfg) - require.NoError(t, err) - th.Run() - assert.Equal(t, ErrMaxAcceptRetriesReached, th.exitCode) -} - -func TestRemoteSignerTestHarnessSuccessfulRun(t *testing.T) { - harnessTest( - t, - func(th *TestHarness) *privval.SignerServer { - privKey, err := th.fpv.GetPrivateKey(context.TODO(), th.quorumHash) - if err != nil { - panic(err) - } - thresholdPublicKey, err := th.fpv.Key.ThresholdPublicKeyForQuorumHash(th.quorumHash) - if err != nil { - panic(err) - } - return newMockSignerServer(t, th, privKey, th.fpv.Key.ProTxHash, th.quorumHash, thresholdPublicKey, - false, false) - }, - NoError, - ) -} - -func TestRemoteSignerPublicKeyCheckFailed(t *testing.T) { - harnessTest( - t, - func(th *TestHarness) *privval.SignerServer { - thresholdPublicKey, err := th.fpv.Key.ThresholdPublicKeyForQuorumHash(th.quorumHash) - if err != nil { - panic(err) - } - return newMockSignerServer(t, th, bls12381.GenPrivKey(), crypto.RandProTxHash(), th.quorumHash, - thresholdPublicKey, false, false) - }, - ErrTestPublicKeyFailed, - ) -} - -func TestRemoteSignerProposalSigningFailed(t *testing.T) { - harnessTest( - t, - func(th *TestHarness) *privval.SignerServer { - privKey, err := th.fpv.GetPrivateKey(context.TODO(), th.quorumHash) - if err != nil { - panic(err) - } - thresholdPublicKey, err := th.fpv.Key.ThresholdPublicKeyForQuorumHash(th.quorumHash) - if err != nil { - panic(err) - } - return newMockSignerServer(t, th, privKey, th.fpv.Key.ProTxHash, th.quorumHash, thresholdPublicKey, - true, false) - }, - ErrTestSignProposalFailed, - ) -} - -func TestRemoteSignerVoteSigningFailed(t *testing.T) { - harnessTest( - t, - func(th *TestHarness) *privval.SignerServer { - privKey, err := th.fpv.GetPrivateKey(context.TODO(), th.quorumHash) - if err != nil { - panic(err) - } - thresholdPublicKey, err := th.fpv.Key.ThresholdPublicKeyForQuorumHash(th.quorumHash) - if err != nil { - panic(err) - } - return newMockSignerServer(t, th, privKey, th.fpv.Key.ProTxHash, th.quorumHash, thresholdPublicKey, - false, true) - }, - ErrTestSignVoteFailed, - ) -} - -func newMockSignerServer( - t *testing.T, - th *TestHarness, - privKey crypto.PrivKey, - proTxHash crypto.ProTxHash, - quorumHash crypto.QuorumHash, - thresholdPublicKey crypto.PubKey, - breakProposalSigning bool, - breakVoteSigning bool, -) *privval.SignerServer { - mockPV := types.NewMockPVWithParams(privKey, proTxHash, quorumHash, thresholdPublicKey, - breakProposalSigning, breakVoteSigning) - - dialerEndpoint := privval.NewSignerDialerEndpoint( - th.logger, - privval.DialTCPFn( - th.addr, - time.Duration(defaultConnDeadline)*time.Millisecond, - ed25519.GenPrivKey(), - ), - ) - - return privval.NewSignerServer(dialerEndpoint, th.chainID, mockPV) -} - -// For running relatively standard tests. -func harnessTest(t *testing.T, signerServerMaker func(th *TestHarness) *privval.SignerServer, expectedExitCode int) { - cfg := makeConfig(t, 100, 3) - defer cleanup(cfg) - - th, err := NewTestHarness(log.TestingLogger(), cfg) - require.NoError(t, err) - donec := make(chan struct{}) - go func() { - defer close(donec) - th.Run() - }() - - ss := signerServerMaker(th) - require.NoError(t, ss.Start()) - assert.True(t, ss.IsRunning()) - defer ss.Stop() //nolint:errcheck // ignore for tests - - <-donec - assert.Equal(t, expectedExitCode, th.exitCode) -} - -func makeConfig(t *testing.T, acceptDeadline, acceptRetries int) TestHarnessConfig { - return TestHarnessConfig{ - BindAddr: privval.GetFreeLocalhostAddrPort(), - KeyFile: makeTempFile("tm-testharness-keyfile", keyFileContents), - StateFile: makeTempFile("tm-testharness-statefile", stateFileContents), - GenesisFile: makeTempFile("tm-testharness-genesisfile", genesisFileContents), - AcceptDeadline: time.Duration(acceptDeadline) * time.Millisecond, - ConnDeadline: time.Duration(defaultConnDeadline) * time.Millisecond, - AcceptRetries: acceptRetries, - SecretConnKey: ed25519.GenPrivKey(), - ExitWhenComplete: false, - } -} - -func cleanup(cfg TestHarnessConfig) { - os.Remove(cfg.KeyFile) - os.Remove(cfg.StateFile) - os.Remove(cfg.GenesisFile) -} - -func makeTempFile(name, content string) string { - tempFile, err := ioutil.TempFile("", fmt.Sprintf("%s-*", name)) - if err != nil { - panic(err) - } - if _, err := tempFile.Write([]byte(content)); err != nil { - tempFile.Close() - panic(err) - } - if err := tempFile.Close(); err != nil { - panic(err) - } - return tempFile.Name() -} diff --git a/tools/tm-signer-harness/internal/utils.go b/tools/tm-signer-harness/internal/utils.go deleted file mode 100644 index 9783ca95b3..0000000000 --- a/tools/tm-signer-harness/internal/utils.go +++ /dev/null @@ -1,25 +0,0 @@ -package internal - -import ( - "os/user" - "path/filepath" - "strings" -) - -// ExpandPath will check if the given path begins with a "~" symbol, and if so, -// will expand it to become the user's home directory. If it fails to expand the -// path it will automatically return the original path itself. -func ExpandPath(path string) string { - usr, err := user.Current() - if err != nil { - return path - } - - if path == "~" { - return usr.HomeDir - } else if strings.HasPrefix(path, "~/") { - return filepath.Join(usr.HomeDir, path[2:]) - } - - return path -} diff --git a/tools/tm-signer-harness/main.go b/tools/tm-signer-harness/main.go deleted file mode 100644 index 28d81d97f8..0000000000 --- a/tools/tm-signer-harness/main.go +++ /dev/null @@ -1,203 +0,0 @@ -package main - -import ( - "context" - "flag" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/tools/tm-signer-harness/internal" - "github.com/tendermint/tendermint/version" -) - -const ( - defaultAcceptRetries = 100 - defaultBindAddr = "tcp://127.0.0.1:0" - defaultAcceptDeadline = 1 - defaultConnDeadline = 3 - defaultExtractKeyOutput = "./signing.key" -) - -var defaultTMHome string - -var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) - -// Command line flags -var ( - flagAcceptRetries int - flagBindAddr string - flagTMHome string - flagKeyOutputPath string -) - -// Command line commands -var ( - rootCmd *flag.FlagSet - runCmd *flag.FlagSet - extractKeyCmd *flag.FlagSet - versionCmd *flag.FlagSet -) - -func init() { - rootCmd = flag.NewFlagSet("root", flag.ExitOnError) - rootCmd.Usage = func() { - fmt.Println(`Remote signer test harness for Tendermint. - -Usage: - tm-signer-harness [flags] - -Available Commands: - extract_key Extracts a signing key from a local Tendermint instance - help Help on the available commands - run Runs the test harness - version Display version information and exit - -Use "tm-signer-harness help " for more information about that command.`) - fmt.Println("") - } - - hd, err := os.UserHomeDir() - if err != nil { - fmt.Println("The UserHomeDir is not defined, setting the default TM Home PATH to \"~/.tendermint\"") - defaultTMHome = "~/.tendermint" - } else { - defaultTMHome = fmt.Sprintf("%s/.tendermint", hd) - } - - runCmd = flag.NewFlagSet("run", flag.ExitOnError) - runCmd.IntVar(&flagAcceptRetries, - "accept-retries", - defaultAcceptRetries, - "The number of attempts to listen for incoming connections") - runCmd.StringVar(&flagBindAddr, "addr", defaultBindAddr, "Bind to this address for the testing") - runCmd.StringVar(&flagTMHome, "tmhome", defaultTMHome, "Path to the Tendermint home directory") - runCmd.Usage = func() { - fmt.Println(`Runs the remote signer test harness for Tendermint. - -Usage: - tm-signer-harness run [flags] - -Flags:`) - runCmd.PrintDefaults() - fmt.Println("") - } - - extractKeyCmd = flag.NewFlagSet("extract_key", flag.ExitOnError) - extractKeyCmd.StringVar(&flagKeyOutputPath, - "output", - defaultExtractKeyOutput, - "Path to which signing key should be written") - extractKeyCmd.StringVar(&flagTMHome, "tmhome", defaultTMHome, "Path to the Tendermint home directory") - extractKeyCmd.Usage = func() { - fmt.Println(`Extracts a signing key from a local Tendermint instance for use in the remote -signer under test. - -Usage: - tm-signer-harness extract_key [flags] - -Flags:`) - extractKeyCmd.PrintDefaults() - fmt.Println("") - } - - versionCmd = flag.NewFlagSet("version", flag.ExitOnError) - versionCmd.Usage = func() { - fmt.Println(` -Prints the Tendermint version for which this remote signer harness was built. - -Usage: - tm-signer-harness version`) - fmt.Println("") - } -} - -func runTestHarness(acceptRetries int, bindAddr, tmhome string) { - tmhome = internal.ExpandPath(tmhome) - cfg := internal.TestHarnessConfig{ - BindAddr: bindAddr, - KeyFile: filepath.Join(tmhome, "config", "priv_validator_key.json"), - StateFile: filepath.Join(tmhome, "data", "priv_validator_state.json"), - GenesisFile: filepath.Join(tmhome, "config", "genesis.json"), - AcceptDeadline: time.Duration(defaultAcceptDeadline) * time.Second, - AcceptRetries: acceptRetries, - ConnDeadline: time.Duration(defaultConnDeadline) * time.Second, - SecretConnKey: ed25519.GenPrivKey(), - ExitWhenComplete: true, - } - harness, err := internal.NewTestHarness(logger, cfg) - if err != nil { - logger.Error(err.Error()) - if therr, ok := err.(*internal.TestHarnessError); ok { - os.Exit(therr.Code) - } - os.Exit(internal.ErrOther) - } - harness.Run() -} - -func extractKey(tmhome, outputPath string) { - keyFile := filepath.Join(internal.ExpandPath(tmhome), "config", "priv_validator_key.json") - stateFile := filepath.Join(internal.ExpandPath(tmhome), "data", "priv_validator_state.json") - fpv, err := privval.LoadFilePV(keyFile, stateFile) - if err != nil { - logger.Error("Can't load file pv", "err", err) - os.Exit(1) - } - quorumHash, _ := fpv.GetFirstQuorumHash(context.Background()) - privKey, _ := fpv.GetPrivateKey(context.TODO(), quorumHash) - pkb := privKey.Bytes() - if err := ioutil.WriteFile(internal.ExpandPath(outputPath), pkb[:32], 0600); err != nil { - logger.Info("Failed to write private key", "output", outputPath, "err", err) - os.Exit(1) - } - logger.Info("Successfully wrote private key", "output", outputPath) -} - -func main() { - if err := rootCmd.Parse(os.Args[1:]); err != nil { - fmt.Printf("Error parsing flags: %v\n", err) - os.Exit(1) - } - if rootCmd.NArg() == 0 || (rootCmd.NArg() == 1 && rootCmd.Arg(0) == "help") { - rootCmd.Usage() - os.Exit(0) - } - - switch rootCmd.Arg(0) { - case "help": - switch rootCmd.Arg(1) { - case "run": - runCmd.Usage() - case "extract_key": - extractKeyCmd.Usage() - case "version": - versionCmd.Usage() - default: - fmt.Printf("Unrecognized command: %s\n", rootCmd.Arg(1)) - os.Exit(1) - } - case "run": - if err := runCmd.Parse(os.Args[2:]); err != nil { - fmt.Printf("Error parsing flags: %v\n", err) - os.Exit(1) - } - runTestHarness(flagAcceptRetries, flagBindAddr, flagTMHome) - case "extract_key": - if err := extractKeyCmd.Parse(os.Args[2:]); err != nil { - fmt.Printf("Error parsing flags: %v\n", err) - os.Exit(1) - } - extractKey(flagTMHome, flagKeyOutputPath) - case "version": - fmt.Println(version.TMCoreSemVer) - default: - fmt.Printf("Unrecognized command: %s\n", flag.Arg(0)) - os.Exit(1) - } -} diff --git a/types/block.go b/types/block.go index 7cb4c956f4..eb722e247b 100644 --- a/types/block.go +++ b/types/block.go @@ -2,24 +2,23 @@ package types import ( "bytes" + "crypto/sha256" "encoding/base64" "encoding/binary" "encoding/hex" "errors" "fmt" "strings" + "sync" "time" "github.com/dashevo/dashd-go/btcjson" - "github.com/rs/zerolog" - "github.com/gogo/protobuf/proto" gogotypes "github.com/gogo/protobuf/types" + "github.com/rs/zerolog" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/merkle" - "github.com/tendermint/tendermint/crypto/tmhash" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmbytes "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" @@ -48,12 +47,12 @@ const ( // Block defines the atomic unit of a Tendermint blockchain. type Block struct { - mtx tmsync.Mutex + mtx sync.Mutex Header `json:"header"` Data `json:"data"` CoreChainLock *CoreChainLock `json:"core_chain_lock"` - Evidence EvidenceData `json:"evidence"` + Evidence EvidenceList `json:"evidence"` LastCommit *Commit `json:"last_commit"` } @@ -63,8 +62,12 @@ func (b *Block) StateID() StateID { } // BlockID returns a block ID of this block -func (b *Block) BlockID() BlockID { - return BlockID{Hash: b.Hash(), PartSetHeader: b.MakePartSet(BlockPartSizeBytes).Header()} +func (b *Block) BlockID() (BlockID, error) { + parSet, err := b.MakePartSet(BlockPartSizeBytes) + if err != nil { + return BlockID{}, err + } + return BlockID{Hash: b.Hash(), PartSetHeader: parSet.Header()}, nil } // ValidateBasic performs basic validation that doesn't involve state data. @@ -93,7 +96,7 @@ func (b *Block) ValidateBasic() error { return errors.New("nil LastPrecommits") } if err := b.LastCommit.ValidateBasic(); err != nil { - return fmt.Errorf("wrong LastPrecommits: %v", err) + return fmt.Errorf("wrong LastPrecommits: %w", err) } if w, g := b.LastCommit.Hash(), b.LastCommitHash; !bytes.Equal(w, g) { @@ -105,8 +108,8 @@ func (b *Block) ValidateBasic() error { return fmt.Errorf("wrong Header.DataHash. Expected %X, got %X", w, g) } - // NOTE: b.Evidence.Evidence may be nil, but we're just looping. - for i, ev := range b.Evidence.Evidence { + // NOTE: b.Evidence may be nil, but we're just looping. + for i, ev := range b.Evidence { if err := ev.ValidateBasic(); err != nil { return fmt.Errorf("invalid evidence (#%d): %v", i, err) } @@ -151,22 +154,22 @@ func (b *Block) Hash() tmbytes.HexBytes { // MakePartSet returns a PartSet containing parts of a serialized block. // This is the form in which the block is gossipped to peers. // CONTRACT: partSize is greater than zero. -func (b *Block) MakePartSet(partSize uint32) *PartSet { +func (b *Block) MakePartSet(partSize uint32) (*PartSet, error) { if b == nil { - return nil + return nil, errors.New("nil block") } b.mtx.Lock() defer b.mtx.Unlock() pbb, err := b.ToProto() if err != nil { - panic(err) + return nil, err } bz, err := proto.Marshal(pbb) if err != nil { - panic(err) + return nil, err } - return NewPartSetFromData(bz, partSize) + return NewPartSetFromData(bz, partSize), nil } // HashesTo is a convenience function that checks if a block hashes to the given argument. @@ -358,7 +361,7 @@ func MakeBlock(height int64, coreHeight uint32, coreChainLock *CoreChainLock, tx Txs: txs, }, CoreChainLock: coreChainLock, - Evidence: EvidenceData{Evidence: evidence}, + Evidence: evidence, LastCommit: lastCommit, } block.fillHeader() @@ -371,12 +374,12 @@ func MakeBlock(height int64, coreHeight uint32, coreChainLock *CoreChainLock, tx // NOTE: changes to the Header should be duplicated in: // - header.Hash() // - abci.Header -// - https://github.com/tendermint/spec/blob/master/spec/blockchain/blockchain.md +// - https://github.com/tendermint/tendermint/blob/master/spec/core/data_structures.md type Header struct { // basic block info Version version.Consensus `json:"version"` ChainID string `json:"chain_id"` - Height int64 `json:"height"` + Height int64 `json:"height,string"` CoreChainLockedHeight uint32 `json:"core_chain_locked_height"` Time time.Time `json:"time"` @@ -447,15 +450,15 @@ func (h Header) ValidateBasic() error { } if err := ValidateHash(h.LastCommitHash); err != nil { - return fmt.Errorf("wrong LastCommitHash: %v", err) + return fmt.Errorf("wrong LastCommitHash: %w", err) } if err := ValidateHash(h.DataHash); err != nil { - return fmt.Errorf("wrong DataHash: %v", err) + return fmt.Errorf("wrong DataHash: %w", err) } if err := ValidateHash(h.EvidenceHash); err != nil { - return fmt.Errorf("wrong EvidenceHash: %v", err) + return fmt.Errorf("wrong EvidenceHash: %w", err) } if len(h.ProposerProTxHash) != crypto.DefaultHashSize { @@ -468,17 +471,17 @@ func (h Header) ValidateBasic() error { // Basic validation of hashes related to application data. // Will validate fully against state in state#ValidateBlock. if err := ValidateHash(h.ValidatorsHash); err != nil { - return fmt.Errorf("wrong ValidatorsHash: %v", err) + return fmt.Errorf("wrong ValidatorsHash: %w", err) } if err := ValidateHash(h.NextValidatorsHash); err != nil { - return fmt.Errorf("wrong NextValidatorsHash: %v", err) + return fmt.Errorf("wrong NextValidatorsHash: %w", err) } if err := ValidateHash(h.ConsensusHash); err != nil { - return fmt.Errorf("wrong ConsensusHash: %v", err) + return fmt.Errorf("wrong ConsensusHash: %w", err) } // NOTE: AppHash is arbitrary length if err := ValidateHash(h.LastResultsHash); err != nil { - return fmt.Errorf("wrong LastResultsHash: %v", err) + return fmt.Errorf("wrong LastResultsHash: %w", err) } return nil @@ -569,7 +572,8 @@ func (h *Header) StringIndented(indent string) string { indent, h.EvidenceHash, indent, h.ProposerProTxHash, indent, h.ProposedAppVersion, - indent, h.Hash()) + indent, h.Hash(), + ) } // ToProto converts Header to protobuf @@ -691,7 +695,6 @@ func NewCommit(height int64, round int32, blockID BlockID, stateID StateID, quor } // GetCanonicalVote returns the message that is being voted on in the form of a vote without signatures. -// func (commit *Commit) GetCanonicalVote() *Vote { return &Vote{ Type: tmproto.PrecommitType, @@ -716,7 +719,8 @@ func (commit *Commit) VoteBlockRequestID() []byte { requestIDMessage = append(requestIDMessage, heightByteArray...) requestIDMessage = append(requestIDMessage, roundByteArray...) - return crypto.Sha256(requestIDMessage) + hash := sha256.Sum256(requestIDMessage) + return hash[:] } // CanonicalVoteVerifySignBytes returns the bytes of the Canonical Vote that is threshold signed. @@ -773,7 +777,7 @@ func (commit *Commit) ValidateBasic() error { } if commit.Height >= 1 { - if commit.BlockID.IsZero() { + if commit.BlockID.IsNil() { return errors.New("commit cannot be for nil block") } if len(commit.ThresholdBlockSignature) != SignatureSize { @@ -1004,97 +1008,6 @@ func DataFromProto(dp *tmproto.Data) (Data, error) { return *data, nil } -//----------------------------------------------------------------------------- - -// EvidenceData contains any evidence of malicious wrong-doing by validators -type EvidenceData struct { - Evidence EvidenceList `json:"evidence"` - - // Volatile. Used as cache - hash tmbytes.HexBytes - byteSize int64 -} - -// Hash returns the hash of the data. -func (data *EvidenceData) Hash() tmbytes.HexBytes { - if data.hash == nil { - data.hash = data.Evidence.Hash() - } - return data.hash -} - -// ByteSize returns the total byte size of all the evidence -func (data *EvidenceData) ByteSize() int64 { - if data.byteSize == 0 && len(data.Evidence) != 0 { - pb, err := data.ToProto() - if err != nil { - panic(err) - } - data.byteSize = int64(pb.Size()) - } - return data.byteSize -} - -// StringIndented returns a string representation of the evidence. -func (data *EvidenceData) StringIndented(indent string) string { - if data == nil { - return "nil-Evidence" - } - evStrings := make([]string, tmmath.MinInt(len(data.Evidence), 21)) - for i, ev := range data.Evidence { - if i == 20 { - evStrings[i] = fmt.Sprintf("... (%v total)", len(data.Evidence)) - break - } - evStrings[i] = fmt.Sprintf("Evidence:%v", ev) - } - return fmt.Sprintf(`EvidenceData{ -%s %v -%s}#%v`, - indent, strings.Join(evStrings, "\n"+indent+" "), - indent, data.hash) -} - -// ToProto converts EvidenceData to protobuf -func (data *EvidenceData) ToProto() (*tmproto.EvidenceList, error) { - if data == nil { - return nil, errors.New("nil evidence data") - } - - evi := new(tmproto.EvidenceList) - eviBzs := make([]tmproto.Evidence, len(data.Evidence)) - for i := range data.Evidence { - protoEvi, err := EvidenceToProto(data.Evidence[i]) - if err != nil { - return nil, err - } - eviBzs[i] = *protoEvi - } - evi.Evidence = eviBzs - - return evi, nil -} - -// FromProto sets a protobuf EvidenceData to the given pointer. -func (data *EvidenceData) FromProto(eviData *tmproto.EvidenceList) error { - if eviData == nil { - return errors.New("nil evidenceData") - } - - eviBzs := make(EvidenceList, len(eviData.Evidence)) - for i := range eviData.Evidence { - evi, err := EvidenceFromProto(&eviData.Evidence[i]) - if err != nil { - return err - } - eviBzs[i] = evi - } - data.Evidence = eviBzs - data.byteSize = int64(eviData.Size()) - - return nil -} - //-------------------------------------------------------------------------------- // BlockID @@ -1132,17 +1045,17 @@ func (blockID BlockID) ValidateBasic() error { return nil } -// IsZero returns true if this is the BlockID of a nil block. -func (blockID BlockID) IsZero() bool { +// IsNil returns true if this is the BlockID of a nil block. +func (blockID BlockID) IsNil() bool { return len(blockID.Hash) == 0 && blockID.PartSetHeader.IsZero() } // IsComplete returns true if this is a valid BlockID of a non-nil block. func (blockID BlockID) IsComplete() bool { - return len(blockID.Hash) == tmhash.Size && + return len(blockID.Hash) == crypto.HashSize && blockID.PartSetHeader.Total > 0 && - len(blockID.PartSetHeader.Hash) == tmhash.Size + len(blockID.PartSetHeader.Hash) == crypto.HashSize } // String returns a human readable string representation of the BlockID. diff --git a/types/block_meta.go b/types/block_meta.go index 0c503e750f..8f23425411 100644 --- a/types/block_meta.go +++ b/types/block_meta.go @@ -10,11 +10,13 @@ import ( // BlockMeta contains meta information. type BlockMeta struct { - BlockID BlockID `json:"block_id"` - BlockSize int `json:"block_size"` - Header Header `json:"header"` - HasCoreChainLock bool `json:"has_core_chain_lock"` - NumTxs int `json:"num_txs"` + BlockID BlockID `json:"block_id"` + BlockSize int `json:"block_size,string"` + Header Header `json:"header"` + NumTxs int `json:"num_txs,string"` + + // dash fields + HasCoreChainLock bool `json:"has_core_chain_lock"` } // NewBlockMeta returns a new BlockMeta. diff --git a/types/block_meta_test.go b/types/block_meta_test.go index a1a382ffa2..0ce90c40b4 100644 --- a/types/block_meta_test.go +++ b/types/block_meta_test.go @@ -5,13 +5,13 @@ import ( "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/crypto" tmrand "github.com/tendermint/tendermint/libs/rand" ) func TestBlockMeta_ToProto(t *testing.T) { h := MakeRandHeader() - bi := BlockID{Hash: h.Hash(), PartSetHeader: PartSetHeader{Total: 123, Hash: tmrand.Bytes(tmhash.Size)}} + bi := BlockID{Hash: h.Hash(), PartSetHeader: PartSetHeader{Total: 123, Hash: tmrand.Bytes(crypto.HashSize)}} bm := &BlockMeta{ BlockID: bi, @@ -48,9 +48,9 @@ func TestBlockMeta_ToProto(t *testing.T) { func TestBlockMeta_ValidateBasic(t *testing.T) { h := MakeRandHeader() - bi := BlockID{Hash: h.Hash(), PartSetHeader: PartSetHeader{Total: 123, Hash: tmrand.Bytes(tmhash.Size)}} - bi2 := BlockID{Hash: tmrand.Bytes(tmhash.Size), - PartSetHeader: PartSetHeader{Total: 123, Hash: tmrand.Bytes(tmhash.Size)}} + bi := BlockID{Hash: h.Hash(), PartSetHeader: PartSetHeader{Total: 123, Hash: tmrand.Bytes(crypto.HashSize)}} + bi2 := BlockID{Hash: tmrand.Bytes(crypto.HashSize), + PartSetHeader: PartSetHeader{Total: 123, Hash: tmrand.Bytes(crypto.HashSize)}} bi3 := BlockID{Hash: []byte("incorrect hash"), PartSetHeader: PartSetHeader{Total: 123, Hash: []byte("incorrect hash")}} diff --git a/types/block_test.go b/types/block_test.go index 9fe5ae30be..ad78941d6a 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -23,7 +23,6 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" "github.com/tendermint/tendermint/crypto/merkle" - "github.com/tendermint/tendermint/crypto/tmhash" tmbytes "github.com/tendermint/tendermint/libs/bytes" tmrand "github.com/tendermint/tendermint/libs/rand" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -37,6 +36,9 @@ func TestMain(m *testing.M) { } func TestBlockAddEvidence(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + txs := []Tx{Tx("foo"), Tx("bar")} lastID := makeBlockIDRandom() @@ -45,22 +47,25 @@ func TestBlockAddEvidence(t *testing.T) { coreChainLock := NewMockChainLock(1) - voteSet, valSet, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, stateID) - commit, err := MakeCommit(lastID, stateID, h-1, 1, voteSet, vals) + voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, stateID) + commit, err := makeCommit(ctx, lastID, stateID, h-1, 1, voteSet, vals) require.NoError(t, err) - ev, err := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain", valSet.QuorumType, + ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain", valSet.QuorumType, valSet.QuorumHash) require.NoError(t, err) evList := []Evidence{ev} block := MakeBlock(h, coreChainLock.CoreBlockHeight, &coreChainLock, txs, commit, evList, 0) require.NotNil(t, block) - require.Equal(t, 1, len(block.Evidence.Evidence)) + require.Equal(t, 1, len(block.Evidence)) require.NotNil(t, block.EvidenceHash) } func TestBlockValidateBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + require.Error(t, (*Block)(nil).ValidateBasic()) txs := []Tx{Tx("foo"), Tx("bar")} @@ -69,11 +74,11 @@ func TestBlockValidateBasic(t *testing.T) { stateID := RandStateID().WithHeight(h - 2) - voteSet, valSet, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, stateID) - commit, err := MakeCommit(lastID, stateID, h-1, 1, voteSet, vals) + voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, stateID) + commit, err := makeCommit(ctx, lastID, stateID, h-1, 1, voteSet, vals) require.NoError(t, err) - ev, err := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain", valSet.QuorumType, + ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain", valSet.QuorumType, valSet.QuorumHash) require.NoError(t, err) evList := []Evidence{ev} @@ -118,7 +123,7 @@ func TestBlockValidateBasic(t *testing.T) { }, true}, {"Invalid Evidence", func(blk *Block) { emptyEv := &DuplicateVoteEvidence{} - blk.Evidence = EvidenceData{Evidence: []Evidence{emptyEv}} + blk.Evidence = []Evidence{emptyEv} }, true}, } @@ -141,49 +146,61 @@ func TestBlockHash(t *testing.T) { } func TestBlockMakePartSet(t *testing.T) { - assert.Nil(t, (*Block)(nil).MakePartSet(2)) + bps, err := (*Block)(nil).MakePartSet(2) + assert.Error(t, err) + assert.Nil(t, bps) + + partSet, err := MakeBlock(int64(3), 0, nil, []Tx{Tx("Hello World")}, nil, nil, 0).MakePartSet(1024) + require.NoError(t, err) - partSet := MakeBlock(int64(3), 0, nil, []Tx{Tx("Hello World")}, nil, nil, 0).MakePartSet(1024) assert.NotNil(t, partSet) assert.EqualValues(t, 1, partSet.Total()) } func TestBlockMakePartSetWithEvidence(t *testing.T) { - assert.Nil(t, (*Block)(nil).MakePartSet(2)) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bps, err := (*Block)(nil).MakePartSet(2) + assert.Error(t, err) + assert.Nil(t, bps) lastID := makeBlockIDRandom() h := int64(3) stateID := RandStateID().WithHeight(h - 2) - voteSet, valSet, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, stateID) - commit, err := MakeCommit(lastID, stateID, h-1, 1, voteSet, vals) + voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, stateID) + commit, err := makeCommit(ctx, lastID, stateID, h-1, 1, voteSet, vals) require.NoError(t, err) - ev, err := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain", valSet.QuorumType, + ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain", valSet.QuorumType, valSet.QuorumHash) require.NoError(t, err) evList := []Evidence{ev} - block := MakeBlock(h, 0, nil, []Tx{Tx("Hello World")}, commit, evList, 0) - partSet := block.MakePartSet(512) - assert.NotNil(t, partSet) + partSet, err := MakeBlock(h, 0, nil, []Tx{Tx("Hello World")}, commit, evList, 0).MakePartSet(512) + require.NoError(t, err) + // The part set can be either 3 or 4 parts, this is because of variance in sizes due to the non second part of // timestamps marshaling to different sizes assert.True(t, partSet.Total() == 3) } func TestBlockHashesTo(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + assert.False(t, (*Block)(nil).HashesTo(nil)) lastID := makeBlockIDRandom() h := int64(3) stateID := RandStateID().WithHeight(h - 2) - voteSet, valSet, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, stateID) - commit, err := MakeCommit(lastID, stateID, h-1, 1, voteSet, vals) + voteSet, valSet, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, stateID) + commit, err := makeCommit(ctx, lastID, stateID, h-1, 1, voteSet, vals) require.NoError(t, err) - ev, err := NewMockDuplicateVoteEvidenceWithValidator(h, time.Now(), vals[0], "block-test-chain", valSet.QuorumType, + ev, err := NewMockDuplicateVoteEvidenceWithValidator(ctx, h, time.Now(), vals[0], "block-test-chain", valSet.QuorumType, valSet.QuorumHash) require.NoError(t, err) evList := []Evidence{ev} @@ -215,8 +232,8 @@ func TestBlockString(t *testing.T) { func makeBlockIDRandom() BlockID { var ( - blockHash = make([]byte, tmhash.Size) - partSetHash = make([]byte, tmhash.Size) + blockHash = make([]byte, crypto.HashSize) + partSetHash = make([]byte, crypto.HashSize) ) rand.Read(blockHash) //nolint: errcheck // ignore errcheck for read rand.Read(partSetHash) //nolint: errcheck // ignore errcheck for read @@ -225,8 +242,8 @@ func makeBlockIDRandom() BlockID { func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) BlockID { var ( - h = make([]byte, tmhash.Size) - psH = make([]byte, tmhash.Size) + h = make([]byte, crypto.HashSize) + psH = make([]byte, crypto.HashSize) ) copy(h, hash) copy(psH, partSetHash) @@ -257,12 +274,15 @@ func TestNilDataHashDoesntCrash(t *testing.T) { } func TestCommit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + lastID := makeBlockIDRandom() h := int64(3) stateID := RandStateID().WithHeight(h - 2) - voteSet, _, vals := randVoteSet(h-1, 1, tmproto.PrecommitType, 10, stateID) - commit, err := MakeCommit(lastID, stateID, h-1, 1, voteSet, vals) + voteSet, _, vals := randVoteSet(ctx, t, h-1, 1, tmproto.PrecommitType, 10, stateID) + commit, err := makeCommit(ctx, lastID, stateID, h-1, 1, voteSet, vals) require.NoError(t, err) assert.Equal(t, h-1, commit.Height) @@ -271,6 +291,8 @@ func TestCommit(t *testing.T) { require.NotNil(t, commit.ThresholdBlockSignature) require.NotNil(t, commit.ThresholdStateSignature) + // TODO replace an assertion with a correct one + //assert.Equal(t, voteWithoutExtension(voteSet.GetByIndex(0)), commit.GetByIndex(0)) assert.True(t, commit.IsCommit()) } @@ -288,11 +310,16 @@ func TestCommitValidateBasic(t *testing.T) { {"Incorrect round", func(com *Commit) { com.Round = -100 }, true}, } for _, tc := range testCases { - tcRun := tc - t.Run(tcRun.testName, func(t *testing.T) { - com := randCommit(RandStateID().WithHeight(height - 1)) - tcRun.malleateCommit(com) - assert.Equal(t, tcRun.expectErr, com.ValidateBasic() != nil, "Validate Basic had an unexpected result") + tc := tc + t.Run(tc.testName, func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stateID := RandStateID().WithHeight(height - 1) + + com := randCommit(ctx, t, stateID) + + tc.malleateCommit(com) + assert.Equal(t, tc.expectErr, com.ValidateBasic() != nil, "Validate Basic had an unexpected result") }) } } @@ -303,14 +330,14 @@ func TestMaxCommitBytes(t *testing.T) { Height: math.MaxInt64, Round: math.MaxInt32, BlockID: BlockID{ - Hash: tmhash.Sum([]byte("blockID_hash")), + Hash: crypto.Checksum([]byte("blockID_hash")), PartSetHeader: PartSetHeader{ Total: math.MaxInt32, - Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + Hash: crypto.Checksum([]byte("blockID_part_set_header_hash")), }, }, StateID: StateID{ - LastAppHash: tmhash.Sum([]byte("stateID_hash")), + LastAppHash: crypto.Checksum([]byte("stateID_hash")), }, ThresholdBlockSignature: crypto.CRandBytes(SignatureSize), ThresholdStateSignature: crypto.CRandBytes(SignatureSize), @@ -337,18 +364,18 @@ func TestHeaderHash(t *testing.T) { Height: 3, CoreChainLockedHeight: 1, Time: time.Date(2019, 10, 13, 16, 14, 44, 0, time.UTC), - LastBlockID: makeBlockID(make([]byte, tmhash.Size), 6, make([]byte, tmhash.Size)), - LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), - DataHash: tmhash.Sum([]byte("data_hash")), - ValidatorsHash: tmhash.Sum([]byte("validators_hash")), - NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), - ConsensusHash: tmhash.Sum([]byte("consensus_hash")), - AppHash: tmhash.Sum([]byte("app_hash")), - LastResultsHash: tmhash.Sum([]byte("last_results_hash")), - EvidenceHash: tmhash.Sum([]byte("evidence_hash")), + LastBlockID: makeBlockID(make([]byte, crypto.HashSize), 6, make([]byte, crypto.HashSize)), + LastCommitHash: crypto.Checksum([]byte("last_commit_hash")), + DataHash: crypto.Checksum([]byte("data_hash")), + ValidatorsHash: crypto.Checksum([]byte("validators_hash")), + NextValidatorsHash: crypto.Checksum([]byte("next_validators_hash")), + ConsensusHash: crypto.Checksum([]byte("consensus_hash")), + AppHash: crypto.Checksum([]byte("app_hash")), + LastResultsHash: crypto.Checksum([]byte("last_results_hash")), + EvidenceHash: crypto.Checksum([]byte("evidence_hash")), ProposerProTxHash: crypto.ProTxHashFromSeedBytes([]byte("proposer_pro_tx_hash")), ProposedAppVersion: 1, - }, hexBytesFromString("74EEFDA2F09ACE19D46DE191EC2745CE14B42F7DE48AF86E6D65B17939B08D3E")}, + }, hexBytesFromString(t, "74EEFDA2F09ACE19D46DE191EC2745CE14B42F7DE48AF86E6D65B17939B08D3E")}, {"nil header yields nil", nil, nil}, {"nil ValidatorsHash yields nil", &Header{ Version: version.Consensus{Block: 1, App: 2}, @@ -356,15 +383,15 @@ func TestHeaderHash(t *testing.T) { Height: 3, CoreChainLockedHeight: 1, Time: time.Date(2019, 10, 13, 16, 14, 44, 0, time.UTC), - LastBlockID: makeBlockID(make([]byte, tmhash.Size), 6, make([]byte, tmhash.Size)), - LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), - DataHash: tmhash.Sum([]byte("data_hash")), + LastBlockID: makeBlockID(make([]byte, crypto.HashSize), 6, make([]byte, crypto.HashSize)), + LastCommitHash: crypto.Checksum([]byte("last_commit_hash")), + DataHash: crypto.Checksum([]byte("data_hash")), ValidatorsHash: nil, - NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), - ConsensusHash: tmhash.Sum([]byte("consensus_hash")), - AppHash: tmhash.Sum([]byte("app_hash")), - LastResultsHash: tmhash.Sum([]byte("last_results_hash")), - EvidenceHash: tmhash.Sum([]byte("evidence_hash")), + NextValidatorsHash: crypto.Checksum([]byte("next_validators_hash")), + ConsensusHash: crypto.Checksum([]byte("consensus_hash")), + AppHash: crypto.Checksum([]byte("app_hash")), + LastResultsHash: crypto.Checksum([]byte("last_results_hash")), + EvidenceHash: crypto.Checksum([]byte("evidence_hash")), ProposerProTxHash: crypto.ProTxHashFromSeedBytes([]byte("proposer_pro_tx_hash")), ProposedAppVersion: 1, }, nil}, @@ -432,20 +459,21 @@ func TestMaxHeaderBytes(t *testing.T) { timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) h := Header{ - Version: version.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, - ChainID: maxChainID, - Height: math.MaxInt64, + Version: version.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, + ChainID: maxChainID, + Height: math.MaxInt64, + Time: timestamp, + LastBlockID: makeBlockID(make([]byte, crypto.HashSize), math.MaxInt32, make([]byte, crypto.HashSize)), + LastCommitHash: crypto.Checksum([]byte("last_commit_hash")), + DataHash: crypto.Checksum([]byte("data_hash")), + ValidatorsHash: crypto.Checksum([]byte("validators_hash")), + NextValidatorsHash: crypto.Checksum([]byte("next_validators_hash")), + ConsensusHash: crypto.Checksum([]byte("consensus_hash")), + AppHash: crypto.Checksum([]byte("app_hash")), + LastResultsHash: crypto.Checksum([]byte("last_results_hash")), + EvidenceHash: crypto.Checksum([]byte("evidence_hash")), + CoreChainLockedHeight: math.MaxUint32, - Time: timestamp, - LastBlockID: makeBlockID(make([]byte, tmhash.Size), math.MaxInt32, make([]byte, tmhash.Size)), - LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), - DataHash: tmhash.Sum([]byte("data_hash")), - ValidatorsHash: tmhash.Sum([]byte("validators_hash")), - NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), - ConsensusHash: tmhash.Sum([]byte("consensus_hash")), - AppHash: tmhash.Sum([]byte("app_hash")), - LastResultsHash: tmhash.Sum([]byte("last_results_hash")), - EvidenceHash: tmhash.Sum([]byte("evidence_hash")), ProposerProTxHash: crypto.ProTxHashFromSeedBytes([]byte("proposer_pro_tx_hash")), } @@ -455,24 +483,25 @@ func TestMaxHeaderBytes(t *testing.T) { assert.EqualValues(t, MaxHeaderBytes, int64(len(bz))) } -func randCommit(stateID StateID) *Commit { +func randCommit(ctx context.Context, t *testing.T, stateID StateID) *Commit { + t.Helper() lastID := makeBlockIDRandom() height := stateID.Height + 1 + voteSet, _, vals := randVoteSet(ctx, t, height, 1, tmproto.PrecommitType, 10, stateID) + commit, err := makeCommit(ctx, lastID, stateID, height, 1, voteSet, vals) + + require.NoError(t, err) - voteSet, _, vals := randVoteSet(height, 1, tmproto.PrecommitType, 10, stateID) - commit, err := MakeCommit(lastID, stateID, height, 1, voteSet, vals) - if err != nil { - panic(err) - } return commit } -func hexBytesFromString(s string) tmbytes.HexBytes { +func hexBytesFromString(t *testing.T, s string) tmbytes.HexBytes { + t.Helper() + b, err := hex.DecodeString(s) - if err != nil { - panic(err) - } - return tmbytes.HexBytes(b) + require.NoError(t, err) + + return b } func TestBlockMaxDataBytes(t *testing.T) { @@ -554,6 +583,9 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { round = 0 ) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // all votes below use height - 1, so state is at height - 2 stateID := RandStateID().WithHeight(height - 2) @@ -569,12 +601,12 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { } for _, tc := range testCases { - voteSet, valSet, vals := randVoteSet(height-1, round, tmproto.PrecommitType, tc.numValidators, stateID) + voteSet, valSet, vals := randVoteSet(ctx, t, height-1, round, tmproto.PrecommitType, tc.numValidators, stateID) vi := int32(0) for n := range tc.blockIDs { for i := 0; i < tc.numVotes[n]; i++ { - proTxHash, err := vals[vi].GetProTxHash(context.Background()) + proTxHash, err := vals[vi].GetProTxHash(ctx) require.NoError(t, err) vote := &Vote{ ValidatorProTxHash: proTxHash, @@ -585,7 +617,7 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { BlockID: tc.blockIDs[n], } - added, err := signAddVote(vals[vi], vote, voteSet) + added, err := signAddVote(ctx, vals[vi], vote, voteSet) assert.NoError(t, err) assert.True(t, added) @@ -597,7 +629,7 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { commit := voteSet.MakeCommit() // panics without > 2/3 valid votes assert.NotNil(t, commit) err := valSet.VerifyCommit(voteSet.ChainID(), blockID, stateID, height-1, commit) - assert.Nil(t, err) + assert.NoError(t, err) } else { assert.Panics(t, func() { voteSet.MakeCommit() }) } @@ -645,9 +677,12 @@ func TestBlockIDValidateBasic(t *testing.T) { } func TestBlockProtoBuf(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + h := mrand.Int63() stateID := RandStateID().WithHeight(h - 1) - c1 := randCommit(stateID) + c1 := randCommit(ctx, t, stateID) b1 := MakeBlock(h, 0, nil, []Tx{Tx([]byte{1})}, &Commit{}, []Evidence{}, 0) b1.ProposerProTxHash = tmrand.Bytes(crypto.DefaultHashSize) @@ -655,6 +690,7 @@ func TestBlockProtoBuf(t *testing.T) { b2.ProposerProTxHash = tmrand.Bytes(crypto.DefaultHashSize) evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) evi, err := NewMockDuplicateVoteEvidence( + ctx, h, evidenceTime, "block-test-chain", @@ -662,7 +698,7 @@ func TestBlockProtoBuf(t *testing.T) { crypto.RandQuorumHash(), ) require.NoError(t, err) - b2.Evidence = EvidenceData{Evidence: EvidenceList{evi}} + b2.Evidence = EvidenceList{evi} b2.EvidenceHash = b2.Evidence.Hash() b3 := MakeBlock(h, 0, nil, []Tx{}, c1, []Evidence{}, 0) @@ -691,7 +727,7 @@ func TestBlockProtoBuf(t *testing.T) { require.NoError(t, err, tc.msg) require.EqualValues(t, tc.b1.Header, block.Header, tc.msg) require.EqualValues(t, tc.b1.Data, block.Data, tc.msg) - require.EqualValues(t, tc.b1.Evidence.Evidence, block.Evidence.Evidence, tc.msg) + require.EqualValues(t, tc.b1.Evidence, block.Evidence, tc.msg) require.EqualValues(t, *tc.b1.LastCommit, *block.LastCommit, tc.msg) } else { require.Error(t, err, tc.msg) @@ -722,55 +758,12 @@ func TestDataProtoBuf(t *testing.T) { } } -// TestEvidenceDataProtoBuf ensures parity in converting to and from proto. -func TestEvidenceDataProtoBuf(t *testing.T) { - const chainID = "mychain" - ev, err := NewMockDuplicateVoteEvidence( - math.MaxInt64, - time.Now(), - chainID, - btcjson.LLMQType_5_60, - crypto.RandQuorumHash(), - ) - require.NoError(t, err) - data := &EvidenceData{Evidence: EvidenceList{ev}} - _ = data.ByteSize() - testCases := []struct { - msg string - data1 *EvidenceData - expPass1 bool - expPass2 bool - }{ - {"success", data, true, true}, - {"empty evidenceData", &EvidenceData{Evidence: EvidenceList{}}, true, true}, - {"fail nil Data", nil, false, false}, - } - - for _, tc := range testCases { - protoData, err := tc.data1.ToProto() - if tc.expPass1 { - require.NoError(t, err, tc.msg) - } else { - require.Error(t, err, tc.msg) - } - - eviD := new(EvidenceData) - err = eviD.FromProto(protoData) - if tc.expPass2 { - require.NoError(t, err, tc.msg) - require.Equal(t, tc.data1, eviD, tc.msg) - } else { - require.Error(t, err, tc.msg) - } - } -} - // exposed for testing func MakeRandHeader() Header { chainID := "test" t := time.Now() height := mrand.Int63() - randBytes := tmrand.Bytes(tmhash.Size) + randBytes := tmrand.Bytes(crypto.HashSize) randProTxHash := tmrand.Bytes(crypto.DefaultHashSize) h := Header{ Version: version.Consensus{Block: version.BlockProtocol, App: 1}, @@ -846,9 +839,13 @@ func TestBlockIDProtoBuf(t *testing.T) { } func TestSignedHeaderProtoBuf(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stateID := RandStateID() - commit := randCommit(stateID) + commit := randCommit(ctx, t, stateID) + h := MakeRandHeader() sh := SignedHeader{Header: &h, Commit: commit} @@ -980,7 +977,7 @@ func TestHeader_ValidateBasic(t *testing.T) { ChainID: string(make([]byte, MaxChainIDLen)), Height: 1, LastBlockID: BlockID{ - Hash: make([]byte, tmhash.Size+1), + Hash: make([]byte, crypto.HashSize+1), }, }, true, "wrong Hash", @@ -992,9 +989,9 @@ func TestHeader_ValidateBasic(t *testing.T) { ChainID: string(make([]byte, MaxChainIDLen)), Height: 1, LastBlockID: BlockID{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), PartSetHeader: PartSetHeader{ - Hash: make([]byte, tmhash.Size+1), + Hash: make([]byte, crypto.HashSize+1), }, }, }, @@ -1007,12 +1004,12 @@ func TestHeader_ValidateBasic(t *testing.T) { ChainID: string(make([]byte, MaxChainIDLen)), Height: 1, LastBlockID: BlockID{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), PartSetHeader: PartSetHeader{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), }, }, - LastCommitHash: make([]byte, tmhash.Size+1), + LastCommitHash: make([]byte, crypto.HashSize+1), }, true, "wrong LastCommitHash", }, @@ -1023,13 +1020,13 @@ func TestHeader_ValidateBasic(t *testing.T) { ChainID: string(make([]byte, MaxChainIDLen)), Height: 1, LastBlockID: BlockID{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), PartSetHeader: PartSetHeader{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), }, }, - LastCommitHash: make([]byte, tmhash.Size), - DataHash: make([]byte, tmhash.Size+1), + LastCommitHash: make([]byte, crypto.HashSize), + DataHash: make([]byte, crypto.HashSize+1), }, true, "wrong DataHash", }, @@ -1040,14 +1037,14 @@ func TestHeader_ValidateBasic(t *testing.T) { ChainID: string(make([]byte, MaxChainIDLen)), Height: 1, LastBlockID: BlockID{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), PartSetHeader: PartSetHeader{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), }, }, - LastCommitHash: make([]byte, tmhash.Size), - DataHash: make([]byte, tmhash.Size), - EvidenceHash: make([]byte, tmhash.Size+1), + LastCommitHash: make([]byte, crypto.HashSize), + DataHash: make([]byte, crypto.HashSize), + EvidenceHash: make([]byte, crypto.HashSize+1), }, true, "wrong EvidenceHash", }, @@ -1058,14 +1055,15 @@ func TestHeader_ValidateBasic(t *testing.T) { ChainID: string(make([]byte, MaxChainIDLen)), Height: 1, LastBlockID: BlockID{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), PartSetHeader: PartSetHeader{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), }, }, - LastCommitHash: make([]byte, tmhash.Size), - DataHash: make([]byte, tmhash.Size), - EvidenceHash: make([]byte, tmhash.Size), + LastCommitHash: make([]byte, crypto.HashSize), + DataHash: make([]byte, crypto.HashSize), + EvidenceHash: make([]byte, crypto.HashSize), + ProposerProTxHash: make([]byte, crypto.ProTxHashSize+1), }, true, "invalid ProposerProTxHash length; got: 33, expected: 32", @@ -1077,16 +1075,16 @@ func TestHeader_ValidateBasic(t *testing.T) { ChainID: string(make([]byte, MaxChainIDLen)), Height: 1, LastBlockID: BlockID{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), PartSetHeader: PartSetHeader{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), }, }, - LastCommitHash: make([]byte, tmhash.Size), - DataHash: make([]byte, tmhash.Size), - EvidenceHash: make([]byte, tmhash.Size), + LastCommitHash: make([]byte, crypto.HashSize), + DataHash: make([]byte, crypto.HashSize), + EvidenceHash: make([]byte, crypto.HashSize), ProposerProTxHash: make([]byte, crypto.ProTxHashSize), - ValidatorsHash: make([]byte, tmhash.Size+1), + ValidatorsHash: make([]byte, crypto.HashSize+1), }, true, "wrong ValidatorsHash", }, @@ -1097,17 +1095,17 @@ func TestHeader_ValidateBasic(t *testing.T) { ChainID: string(make([]byte, MaxChainIDLen)), Height: 1, LastBlockID: BlockID{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), PartSetHeader: PartSetHeader{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), }, }, - LastCommitHash: make([]byte, tmhash.Size), - DataHash: make([]byte, tmhash.Size), - EvidenceHash: make([]byte, tmhash.Size), + LastCommitHash: make([]byte, crypto.HashSize), + DataHash: make([]byte, crypto.HashSize), + EvidenceHash: make([]byte, crypto.HashSize), ProposerProTxHash: make([]byte, crypto.ProTxHashSize), - ValidatorsHash: make([]byte, tmhash.Size), - NextValidatorsHash: make([]byte, tmhash.Size+1), + ValidatorsHash: make([]byte, crypto.HashSize), + NextValidatorsHash: make([]byte, crypto.HashSize+1), }, true, "wrong NextValidatorsHash", }, @@ -1118,18 +1116,18 @@ func TestHeader_ValidateBasic(t *testing.T) { ChainID: string(make([]byte, MaxChainIDLen)), Height: 1, LastBlockID: BlockID{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), PartSetHeader: PartSetHeader{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), }, }, - LastCommitHash: make([]byte, tmhash.Size), - DataHash: make([]byte, tmhash.Size), - EvidenceHash: make([]byte, tmhash.Size), + LastCommitHash: make([]byte, crypto.HashSize), + DataHash: make([]byte, crypto.HashSize), + EvidenceHash: make([]byte, crypto.HashSize), ProposerProTxHash: make([]byte, crypto.ProTxHashSize), - ValidatorsHash: make([]byte, tmhash.Size), - NextValidatorsHash: make([]byte, tmhash.Size), - ConsensusHash: make([]byte, tmhash.Size+1), + ValidatorsHash: make([]byte, crypto.HashSize), + NextValidatorsHash: make([]byte, crypto.HashSize), + ConsensusHash: make([]byte, crypto.HashSize+1), }, true, "wrong ConsensusHash", }, @@ -1140,19 +1138,19 @@ func TestHeader_ValidateBasic(t *testing.T) { ChainID: string(make([]byte, MaxChainIDLen)), Height: 1, LastBlockID: BlockID{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), PartSetHeader: PartSetHeader{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), }, }, - LastCommitHash: make([]byte, tmhash.Size), - DataHash: make([]byte, tmhash.Size), - EvidenceHash: make([]byte, tmhash.Size), + LastCommitHash: make([]byte, crypto.HashSize), + DataHash: make([]byte, crypto.HashSize), + EvidenceHash: make([]byte, crypto.HashSize), ProposerProTxHash: make([]byte, crypto.ProTxHashSize), - ValidatorsHash: make([]byte, tmhash.Size), - NextValidatorsHash: make([]byte, tmhash.Size), - ConsensusHash: make([]byte, tmhash.Size), - LastResultsHash: make([]byte, tmhash.Size+1), + ValidatorsHash: make([]byte, crypto.HashSize), + NextValidatorsHash: make([]byte, crypto.HashSize), + ConsensusHash: make([]byte, crypto.HashSize), + LastResultsHash: make([]byte, crypto.HashSize+1), }, true, "wrong LastResultsHash", }, @@ -1164,19 +1162,19 @@ func TestHeader_ValidateBasic(t *testing.T) { Height: 1, CoreChainLockedHeight: 1, LastBlockID: BlockID{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), PartSetHeader: PartSetHeader{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), }, }, - LastCommitHash: make([]byte, tmhash.Size), - DataHash: make([]byte, tmhash.Size), - EvidenceHash: make([]byte, tmhash.Size), + LastCommitHash: make([]byte, crypto.HashSize), + DataHash: make([]byte, crypto.HashSize), + EvidenceHash: make([]byte, crypto.HashSize), ProposerProTxHash: make([]byte, crypto.ProTxHashSize), - ValidatorsHash: make([]byte, tmhash.Size), - NextValidatorsHash: make([]byte, tmhash.Size), - ConsensusHash: make([]byte, tmhash.Size), - LastResultsHash: make([]byte, tmhash.Size), + ValidatorsHash: make([]byte, crypto.HashSize), + NextValidatorsHash: make([]byte, crypto.HashSize), + ConsensusHash: make([]byte, crypto.HashSize), + LastResultsHash: make([]byte, crypto.HashSize), }, false, "", }, @@ -1264,9 +1262,9 @@ func TestCommit_ValidateBasic(t *testing.T) { Height: 1, Round: 1, BlockID: BlockID{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), PartSetHeader: PartSetHeader{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), }, }, ThresholdBlockSignature: make([]byte, bls12381.SignatureSize+1), @@ -1280,9 +1278,9 @@ func TestCommit_ValidateBasic(t *testing.T) { Height: 1, Round: 1, BlockID: BlockID{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), PartSetHeader: PartSetHeader{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), }, }, ThresholdBlockSignature: make([]byte, bls12381.SignatureSize), @@ -1296,9 +1294,9 @@ func TestCommit_ValidateBasic(t *testing.T) { Height: 1, Round: 1, BlockID: BlockID{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), PartSetHeader: PartSetHeader{ - Hash: make([]byte, tmhash.Size), + Hash: make([]byte, crypto.HashSize), }, }, ThresholdBlockSignature: make([]byte, bls12381.SignatureSize), diff --git a/types/canonical.go b/types/canonical.go index a43e678d87..9aef9b56aa 100644 --- a/types/canonical.go +++ b/types/canonical.go @@ -21,7 +21,7 @@ func CanonicalizeBlockID(bid tmproto.BlockID) *tmproto.CanonicalBlockID { panic(err) } var cbid *tmproto.CanonicalBlockID - if rbid == nil || rbid.IsZero() { + if rbid == nil || rbid.IsNil() { cbid = nil } else { cbid = &tmproto.CanonicalBlockID{ @@ -63,6 +63,18 @@ func CanonicalizeVote(chainID string, vote *tmproto.Vote) tmproto.CanonicalVote } } +// CanonicalizeVoteExtension extracts the vote extension from the given vote +// and constructs a CanonicalizeVoteExtension struct, whose representation in +// bytes is what is signed in order to produce the vote extension's signature. +func CanonicalizeVoteExtension(chainID string, vote *tmproto.Vote) tmproto.CanonicalVoteExtension { + return tmproto.CanonicalVoteExtension{ + Extension: vote.Extension, + Height: vote.Height, + Round: int64(vote.Round), + ChainId: chainID, + } +} + // CanonicalTime can be used to stringify time in a canonical way. func CanonicalTime(t time.Time) string { // Note that sending time over amino resets it to diff --git a/types/canonical_test.go b/types/canonical_test.go index 53a8ea52fc..2ccb80ff71 100644 --- a/types/canonical_test.go +++ b/types/canonical_test.go @@ -4,13 +4,13 @@ import ( "reflect" "testing" - "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/crypto" tmrand "github.com/tendermint/tendermint/libs/rand" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) func TestCanonicalizeBlockID(t *testing.T) { - randhash := tmrand.Bytes(tmhash.Size) + randhash := tmrand.Bytes(crypto.HashSize) block1 := tmproto.BlockID{Hash: randhash, PartSetHeader: tmproto.PartSetHeader{Total: 5, Hash: randhash}} block2 := tmproto.BlockID{Hash: randhash, diff --git a/types/core_chainlock.go b/types/core_chainlock.go index 9a18d35166..12d601b138 100644 --- a/types/core_chainlock.go +++ b/types/core_chainlock.go @@ -1,6 +1,7 @@ package types import ( + "crypto/sha256" "encoding/binary" "errors" "fmt" @@ -57,7 +58,8 @@ func (cl CoreChainLock) RequestID() []byte { binary.LittleEndian.PutUint32(coreBlockHeightBytes[:], cl.CoreBlockHeight) s = append(s, coreBlockHeightBytes[:]...) - return crypto.Sha256(crypto.Sha256(s)) + hash := sha256.Sum256(s) + return hash[:] } // ValidateBasic performs stateless validation on a Chain Lock returning an error diff --git a/types/errors_p2p.go b/types/errors_p2p.go deleted file mode 100644 index ab166d7d34..0000000000 --- a/types/errors_p2p.go +++ /dev/null @@ -1,33 +0,0 @@ -package types - -import ( - "fmt" -) - -//------------------------------------------------------------------- - -type ErrNetAddressNoID struct { - Addr string -} - -func (e ErrNetAddressNoID) Error() string { - return fmt.Sprintf("address (%s) does not contain ID", e.Addr) -} - -type ErrNetAddressInvalid struct { - Addr string - Err error -} - -func (e ErrNetAddressInvalid) Error() string { - return fmt.Sprintf("invalid address (%s): %v", e.Addr, e.Err) -} - -type ErrNetAddressLookup struct { - Addr string - Err error -} - -func (e ErrNetAddressLookup) Error() string { - return fmt.Sprintf("error looking up host (%s): %v", e.Addr, e.Err) -} diff --git a/types/event_bus.go b/types/event_bus.go deleted file mode 100644 index 4e93fb5c94..0000000000 --- a/types/event_bus.go +++ /dev/null @@ -1,330 +0,0 @@ -package types - -import ( - "context" - "fmt" - "strings" - - "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/service" -) - -const defaultCapacity = 0 - -type EventBusSubscriber interface { - Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, outCapacity ...int) (Subscription, error) - Unsubscribe(ctx context.Context, args tmpubsub.UnsubscribeArgs) error - UnsubscribeAll(ctx context.Context, subscriber string) error - - NumClients() int - NumClientSubscriptions(clientID string) int -} - -type Subscription interface { - ID() string - Out() <-chan tmpubsub.Message - Canceled() <-chan struct{} - Err() error -} - -// EventBus is a common bus for all events going through the system. All calls -// are proxied to underlying pubsub server. All events must be published using -// EventBus to ensure correct data types. -type EventBus struct { - service.BaseService - pubsub *tmpubsub.Server -} - -// NewEventBus returns a new event bus. -func NewEventBus() *EventBus { - return NewEventBusWithBufferCapacity(defaultCapacity) -} - -// NewEventBusWithBufferCapacity returns a new event bus with the given buffer capacity. -func NewEventBusWithBufferCapacity(cap int) *EventBus { - // capacity could be exposed later if needed - pubsub := tmpubsub.NewServer(tmpubsub.BufferCapacity(cap)) - b := &EventBus{pubsub: pubsub} - b.BaseService = *service.NewBaseService(nil, "EventBus", b) - return b -} - -func (b *EventBus) SetLogger(l log.Logger) { - b.BaseService.SetLogger(l) - b.pubsub.SetLogger(l.With("module", "pubsub")) -} - -func (b *EventBus) OnStart() error { - return b.pubsub.Start() -} - -func (b *EventBus) OnStop() { - if err := b.pubsub.Stop(); err != nil { - b.pubsub.Logger.Error("error trying to stop eventBus", "error", err) - } -} - -func (b *EventBus) NumClients() int { - return b.pubsub.NumClients() -} - -func (b *EventBus) NumClientSubscriptions(clientID string) int { - return b.pubsub.NumClientSubscriptions(clientID) -} - -func (b *EventBus) Subscribe( - ctx context.Context, - subscriber string, - query tmpubsub.Query, - outCapacity ...int, -) (Subscription, error) { - return b.pubsub.Subscribe(ctx, subscriber, query, outCapacity...) -} - -// This method can be used for a local consensus explorer and synchronous -// testing. Do not use for for public facing / untrusted subscriptions! -func (b *EventBus) SubscribeUnbuffered( - ctx context.Context, - subscriber string, - query tmpubsub.Query, -) (Subscription, error) { - return b.pubsub.SubscribeUnbuffered(ctx, subscriber, query) -} - -func (b *EventBus) Unsubscribe(ctx context.Context, args tmpubsub.UnsubscribeArgs) error { - return b.pubsub.Unsubscribe(ctx, args) -} - -func (b *EventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { - return b.pubsub.UnsubscribeAll(ctx, subscriber) -} - -func (b *EventBus) Publish(eventValue string, eventData TMEventData) error { - // no explicit deadline for publishing events - ctx := context.Background() - - tokens := strings.Split(EventTypeKey, ".") - event := types.Event{ - Type: tokens[0], - Attributes: []types.EventAttribute{ - { - Key: tokens[1], - Value: eventValue, - }, - }, - } - - return b.pubsub.PublishWithEvents(ctx, eventData, []types.Event{event}) -} - -func (b *EventBus) PublishEventNewBlock(data EventDataNewBlock) error { - // no explicit deadline for publishing events - ctx := context.Background() - events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) - - // add Tendermint-reserved new block event - events = append(events, EventNewBlock) - - return b.pubsub.PublishWithEvents(ctx, data, events) -} - -func (b *EventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { - // no explicit deadline for publishing events - ctx := context.Background() - events := append(data.ResultBeginBlock.Events, data.ResultEndBlock.Events...) - - // add Tendermint-reserved new block header event - events = append(events, EventNewBlockHeader) - - return b.pubsub.PublishWithEvents(ctx, data, events) -} - -func (b *EventBus) PublishEventNewEvidence(evidence EventDataNewEvidence) error { - return b.Publish(EventNewEvidenceValue, evidence) -} - -func (b *EventBus) PublishEventVote(data EventDataVote) error { - return b.Publish(EventVoteValue, data) -} - -func (b *EventBus) PublishEventCommit(data EventDataCommit) error { - return b.Publish(EventCommitValue, data) -} - -func (b *EventBus) PublishEventValidBlock(data EventDataRoundState) error { - return b.Publish(EventValidBlockValue, data) -} - -func (b *EventBus) PublishEventBlockSyncStatus(data EventDataBlockSyncStatus) error { - return b.Publish(EventBlockSyncStatusValue, data) -} - -func (b *EventBus) PublishEventStateSyncStatus(data EventDataStateSyncStatus) error { - return b.Publish(EventStateSyncStatusValue, data) -} - -// PublishEventTx publishes tx event with events from Result. Note it will add -// predefined keys (EventTypeKey, TxHashKey). Existing events with the same keys -// will be overwritten. -func (b *EventBus) PublishEventTx(data EventDataTx) error { - // no explicit deadline for publishing events - ctx := context.Background() - events := data.Result.Events - - // add Tendermint-reserved events - events = append(events, EventTx) - - tokens := strings.Split(TxHashKey, ".") - events = append(events, types.Event{ - Type: tokens[0], - Attributes: []types.EventAttribute{ - { - Key: tokens[1], - Value: fmt.Sprintf("%X", Tx(data.Tx).Hash()), - }, - }, - }) - - tokens = strings.Split(TxHeightKey, ".") - events = append(events, types.Event{ - Type: tokens[0], - Attributes: []types.EventAttribute{ - { - Key: tokens[1], - Value: fmt.Sprintf("%d", data.Height), - }, - }, - }) - - return b.pubsub.PublishWithEvents(ctx, data, events) -} - -func (b *EventBus) PublishEventNewRoundStep(data EventDataRoundState) error { - return b.Publish(EventNewRoundStepValue, data) -} - -func (b *EventBus) PublishEventTimeoutPropose(data EventDataRoundState) error { - return b.Publish(EventTimeoutProposeValue, data) -} - -func (b *EventBus) PublishEventTimeoutWait(data EventDataRoundState) error { - return b.Publish(EventTimeoutWaitValue, data) -} - -func (b *EventBus) PublishEventNewRound(data EventDataNewRound) error { - return b.Publish(EventNewRoundValue, data) -} - -func (b *EventBus) PublishEventCompleteProposal(data EventDataCompleteProposal) error { - return b.Publish(EventCompleteProposalValue, data) -} - -func (b *EventBus) PublishEventPolka(data EventDataRoundState) error { - return b.Publish(EventPolkaValue, data) -} - -func (b *EventBus) PublishEventUnlock(data EventDataRoundState) error { - return b.Publish(EventUnlockValue, data) -} - -func (b *EventBus) PublishEventRelock(data EventDataRoundState) error { - return b.Publish(EventRelockValue, data) -} - -func (b *EventBus) PublishEventLock(data EventDataRoundState) error { - return b.Publish(EventLockValue, data) -} - -func (b *EventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpdate) error { - return b.Publish(EventValidatorSetUpdateValue, data) -} - -//----------------------------------------------------------------------------- -type NopEventBus struct{} - -func (NopEventBus) Subscribe( - ctx context.Context, - subscriber string, - query tmpubsub.Query, - out chan<- interface{}, -) error { - return nil -} - -func (NopEventBus) Unsubscribe(ctx context.Context, args tmpubsub.UnsubscribeArgs) error { - return nil -} - -func (NopEventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { - return nil -} - -func (NopEventBus) PublishEventNewBlock(data EventDataNewBlock) error { - return nil -} - -func (NopEventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { - return nil -} - -func (NopEventBus) PublishEventNewEvidence(evidence EventDataNewEvidence) error { - return nil -} - -func (NopEventBus) PublishEventVote(data EventDataVote) error { - return nil -} - -func (NopEventBus) PublishEventTx(data EventDataTx) error { - return nil -} - -func (NopEventBus) PublishEventNewRoundStep(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventTimeoutPropose(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventTimeoutWait(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventNewRound(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventCompleteProposal(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventPolka(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventUnlock(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventRelock(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventLock(data EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpdate) error { - return nil -} - -func (NopEventBus) PublishEventBlockSyncStatus(data EventDataBlockSyncStatus) error { - return nil -} - -func (NopEventBus) PublishEventStateSyncStatus(data EventDataStateSyncStatus) error { - return nil -} diff --git a/types/event_bus_test.go b/types/event_bus_test.go deleted file mode 100644 index f1c4067f49..0000000000 --- a/types/event_bus_test.go +++ /dev/null @@ -1,515 +0,0 @@ -package types - -import ( - "context" - "fmt" - mrand "math/rand" - "testing" - "time" - - "github.com/dashevo/dashd-go/btcjson" - - "github.com/tendermint/tendermint/crypto" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - abci "github.com/tendermint/tendermint/abci/types" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" -) - -func TestEventBusPublishEventTx(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - tx := Tx("foo") - result := abci.ResponseDeliverTx{ - Data: []byte("bar"), - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, - }, - } - - // PublishEventTx adds 3 composite keys, so the query below should work - query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND testType.baz=1", tx.Hash()) - txsSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustCompile(query)) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - msg := <-txsSub.Out() - edt := msg.Data().(EventDataTx) - assert.Equal(t, int64(1), edt.Height) - assert.Equal(t, uint32(0), edt.Index) - assert.EqualValues(t, tx, edt.Tx) - assert.Equal(t, result, edt.Result) - close(done) - }() - - err = eventBus.PublishEventTx(EventDataTx{abci.TxResult{ - Height: 1, - Index: 0, - Tx: tx, - Result: result, - }}) - assert.NoError(t, err) - - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatal("did not receive a transaction after 1 sec.") - } -} - -func TestEventBusPublishEventNewBlock(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - block := MakeBlock(0, 0, nil, []Tx{}, nil, []Evidence{}, 0) - blockID := BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(BlockPartSizeBytes).Header()} - resultBeginBlock := abci.ResponseBeginBlock{ - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, - }, - } - resultEndBlock := abci.ResponseEndBlock{ - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "foz", Value: "2"}}}, - }, - } - - // PublishEventNewBlock adds the tm.event compositeKey, so the query below should work - query := "tm.event='NewBlock' AND testType.baz=1 AND testType.foz=2" - blocksSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustCompile(query)) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - msg := <-blocksSub.Out() - edt := msg.Data().(EventDataNewBlock) - assert.Equal(t, block, edt.Block) - assert.Equal(t, blockID, edt.BlockID) - assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock) - assert.Equal(t, resultEndBlock, edt.ResultEndBlock) - close(done) - }() - - err = eventBus.PublishEventNewBlock(EventDataNewBlock{ - Block: block, - BlockID: blockID, - ResultBeginBlock: resultBeginBlock, - ResultEndBlock: resultEndBlock, - }) - assert.NoError(t, err) - - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatal("did not receive a block after 1 sec.") - } -} - -func TestEventBusPublishEventTxDuplicateKeys(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - tx := Tx("foo") - result := abci.ResponseDeliverTx{ - Data: []byte("bar"), - Events: []abci.Event{ - { - Type: "transfer", - Attributes: []abci.EventAttribute{ - {Key: "sender", Value: "foo"}, - {Key: "recipient", Value: "bar"}, - {Key: "amount", Value: "5"}, - }, - }, - { - Type: "transfer", - Attributes: []abci.EventAttribute{ - {Key: "sender", Value: "baz"}, - {Key: "recipient", Value: "cat"}, - {Key: "amount", Value: "13"}, - }, - }, - { - Type: "withdraw.rewards", - Attributes: []abci.EventAttribute{ - {Key: "address", Value: "bar"}, - {Key: "source", Value: "iceman"}, - {Key: "amount", Value: "33"}, - }, - }, - }, - } - - testCases := []struct { - query string - expectResults bool - }{ - { - "tm.event='Tx' AND tx.height=1 AND transfer.sender='DoesNotExist'", - false, - }, - { - "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo'", - true, - }, - { - "tm.event='Tx' AND tx.height=1 AND transfer.sender='baz'", - true, - }, - { - "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo' AND transfer.sender='baz'", - true, - }, - { - "tm.event='Tx' AND tx.height=1 AND transfer.sender='foo' AND transfer.sender='DoesNotExist'", - false, - }, - } - - for i, tc := range testCases { - sub, err := eventBus.Subscribe(context.Background(), fmt.Sprintf("client-%d", i), tmquery.MustCompile(tc.query)) - require.NoError(t, err) - - done := make(chan struct{}) - - go func() { - select { - case msg := <-sub.Out(): - data := msg.Data().(EventDataTx) - assert.Equal(t, int64(1), data.Height) - assert.Equal(t, uint32(0), data.Index) - assert.EqualValues(t, tx, data.Tx) - assert.Equal(t, result, data.Result) - close(done) - case <-time.After(1 * time.Second): - return - } - }() - - err = eventBus.PublishEventTx(EventDataTx{abci.TxResult{ - Height: 1, - Index: 0, - Tx: tx, - Result: result, - }}) - assert.NoError(t, err) - - select { - case <-done: - if !tc.expectResults { - require.Fail(t, "unexpected transaction result(s) from subscription") - } - case <-time.After(1 * time.Second): - if tc.expectResults { - require.Fail(t, "failed to receive a transaction after 1 second") - } - } - } -} - -func TestEventBusPublishEventNewBlockHeader(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - block := MakeBlock(0, 0, nil, []Tx{}, nil, []Evidence{}, 0) - resultBeginBlock := abci.ResponseBeginBlock{ - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}}, - }, - } - resultEndBlock := abci.ResponseEndBlock{ - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{{Key: "foz", Value: "2"}}}, - }, - } - - // PublishEventNewBlockHeader adds the tm.event compositeKey, so the query below should work - query := "tm.event='NewBlockHeader' AND testType.baz=1 AND testType.foz=2" - headersSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustCompile(query)) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - msg := <-headersSub.Out() - edt := msg.Data().(EventDataNewBlockHeader) - assert.Equal(t, block.Header, edt.Header) - assert.Equal(t, resultBeginBlock, edt.ResultBeginBlock) - assert.Equal(t, resultEndBlock, edt.ResultEndBlock) - close(done) - }() - - err = eventBus.PublishEventNewBlockHeader(EventDataNewBlockHeader{ - Header: block.Header, - ResultBeginBlock: resultBeginBlock, - ResultEndBlock: resultEndBlock, - }) - assert.NoError(t, err) - - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatal("did not receive a block header after 1 sec.") - } -} - -func TestEventBusPublishEventNewEvidence(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - ev, err := NewMockDuplicateVoteEvidence(1, time.Now(), "test-chain-id", btcjson.LLMQType_5_60, crypto.RandQuorumHash()) - require.NoError(t, err) - - query := "tm.event='NewEvidence'" - evSub, err := eventBus.Subscribe(context.Background(), "test", tmquery.MustCompile(query)) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - msg := <-evSub.Out() - edt := msg.Data().(EventDataNewEvidence) - assert.Equal(t, ev, edt.Evidence) - assert.Equal(t, int64(4), edt.Height) - close(done) - }() - - err = eventBus.PublishEventNewEvidence(EventDataNewEvidence{ - Evidence: ev, - Height: 4, - }) - assert.NoError(t, err) - - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatal("did not receive a block header after 1 sec.") - } -} - -func TestEventBusPublish(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - t.Error(err) - } - }) - - const numEventsExpected = 14 - - sub, err := eventBus.Subscribe(context.Background(), "test", tmquery.All, numEventsExpected) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - numEvents := 0 - for range sub.Out() { - numEvents++ - if numEvents >= numEventsExpected { - close(done) - return - } - } - }() - - err = eventBus.Publish(EventNewBlockHeaderValue, EventDataNewBlockHeader{}) - require.NoError(t, err) - err = eventBus.PublishEventNewBlock(EventDataNewBlock{}) - require.NoError(t, err) - err = eventBus.PublishEventNewBlockHeader(EventDataNewBlockHeader{}) - require.NoError(t, err) - err = eventBus.PublishEventVote(EventDataVote{}) - require.NoError(t, err) - err = eventBus.PublishEventNewRoundStep(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventTimeoutPropose(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventTimeoutWait(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventNewRound(EventDataNewRound{}) - require.NoError(t, err) - err = eventBus.PublishEventCompleteProposal(EventDataCompleteProposal{}) - require.NoError(t, err) - err = eventBus.PublishEventPolka(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventUnlock(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventRelock(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventLock(EventDataRoundState{}) - require.NoError(t, err) - err = eventBus.PublishEventValidatorSetUpdates(EventDataValidatorSetUpdate{}) - require.NoError(t, err) - err = eventBus.PublishEventBlockSyncStatus(EventDataBlockSyncStatus{}) - require.NoError(t, err) - err = eventBus.PublishEventStateSyncStatus(EventDataStateSyncStatus{}) - require.NoError(t, err) - - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatalf("expected to receive %d events after 1 sec.", numEventsExpected) - } -} - -func BenchmarkEventBus(b *testing.B) { - benchmarks := []struct { - name string - numClients int - randQueries bool - randEvents bool - }{ - {"10Clients1Query1Event", 10, false, false}, - {"100Clients", 100, false, false}, - {"1000Clients", 1000, false, false}, - - {"10ClientsRandQueries1Event", 10, true, false}, - {"100Clients", 100, true, false}, - {"1000Clients", 1000, true, false}, - - {"10ClientsRandQueriesRandEvents", 10, true, true}, - {"100Clients", 100, true, true}, - {"1000Clients", 1000, true, true}, - - {"10Clients1QueryRandEvents", 10, false, true}, - {"100Clients", 100, false, true}, - {"1000Clients", 1000, false, true}, - } - - for _, bm := range benchmarks { - bm := bm - b.Run(bm.name, func(b *testing.B) { - benchmarkEventBus(bm.numClients, bm.randQueries, bm.randEvents, b) - }) - } -} - -func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *testing.B) { - // for random* functions - mrand.Seed(time.Now().Unix()) - - eventBus := NewEventBusWithBufferCapacity(0) // set buffer capacity to 0 so we are not testing cache - err := eventBus.Start() - if err != nil { - b.Error(err) - } - b.Cleanup(func() { - if err := eventBus.Stop(); err != nil { - b.Error(err) - } - }) - - ctx := context.Background() - q := EventQueryNewBlock - - for i := 0; i < numClients; i++ { - if randQueries { - q = randQuery() - } - sub, err := eventBus.Subscribe(ctx, fmt.Sprintf("client-%d", i), q) - if err != nil { - b.Fatal(err) - } - go func() { - for { - select { - case <-sub.Out(): - case <-sub.Canceled(): - return - } - } - }() - } - - eventValue := EventNewBlockValue - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - if randEvents { - eventValue = randEventValue() - } - - err := eventBus.Publish(eventValue, EventDataString("Gamora")) - if err != nil { - b.Error(err) - } - } -} - -var events = []string{ - EventNewBlockValue, - EventNewBlockHeaderValue, - EventNewRoundValue, - EventNewRoundStepValue, - EventTimeoutProposeValue, - EventCompleteProposalValue, - EventPolkaValue, - EventUnlockValue, - EventLockValue, - EventRelockValue, - EventTimeoutWaitValue, - EventVoteValue, - EventBlockSyncStatusValue, - EventStateSyncStatusValue, -} - -func randEventValue() string { - - return events[mrand.Intn(len(events))] -} - -var queries = []tmpubsub.Query{ - EventQueryNewBlock, - EventQueryNewBlockHeader, - EventQueryNewRound, - EventQueryNewRoundStep, - EventQueryTimeoutPropose, - EventQueryCompleteProposal, - EventQueryPolka, - EventQueryUnlock, - EventQueryLock, - EventQueryRelock, - EventQueryTimeoutWait, - EventQueryVote, - EventQueryBlockSyncStatus, - EventQueryStateSyncStatus, -} - -func randQuery() tmpubsub.Query { - return queries[mrand.Intn(len(queries))] -} diff --git a/types/events.go b/types/events.go index 924d1b7f82..63d0c80911 100644 --- a/types/events.go +++ b/types/events.go @@ -6,9 +6,8 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" - tmjson "github.com/tendermint/tendermint/libs/json" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/internal/jsontypes" + tmquery "github.com/tendermint/tendermint/internal/pubsub/query" ) // Reserved event types (alphabetically sorted). @@ -39,10 +38,13 @@ const ( EventStateSyncStatusValue = "StateSyncStatus" EventTimeoutProposeValue = "TimeoutPropose" EventTimeoutWaitValue = "TimeoutWait" - EventUnlockValue = "Unlock" EventValidBlockValue = "ValidBlock" EventVoteValue = "Vote" EventCommitValue = "Commit" + + // Events emitted by the evidence reactor when evidence is validated + // and before it is committed + EventEvidenceValidatedValue = "EvidenceValidated" ) // Pre-populated ABCI Tendermint-reserved events @@ -90,24 +92,31 @@ var ( // ENCODING / DECODING -// TMEventData implements events.EventData. -type TMEventData interface { - // empty interface +// EventData is satisfied by types that can be published as event data. +// +// Implementations of this interface that contain ABCI event metadata should +// also implement the eventlog.ABCIEventer extension interface to expose those +// metadata to the event log machinery. Event data that do not contain ABCI +// metadata can safely omit this. +type EventData interface { + // The value must support encoding as a type-tagged JSON object. + jsontypes.Tagged } func init() { - tmjson.RegisterType(EventDataNewBlock{}, "tendermint/event/NewBlock") - tmjson.RegisterType(EventDataNewBlockHeader{}, "tendermint/event/NewBlockHeader") - tmjson.RegisterType(EventDataNewEvidence{}, "tendermint/event/NewEvidence") - tmjson.RegisterType(EventDataTx{}, "tendermint/event/Tx") - tmjson.RegisterType(EventDataRoundState{}, "tendermint/event/RoundState") - tmjson.RegisterType(EventDataNewRound{}, "tendermint/event/NewRound") - tmjson.RegisterType(EventDataCompleteProposal{}, "tendermint/event/CompleteProposal") - tmjson.RegisterType(EventDataVote{}, "tendermint/event/Vote") - tmjson.RegisterType(EventDataValidatorSetUpdate{}, "tendermint/event/ValidatorSetUpdate") - tmjson.RegisterType(EventDataString(""), "tendermint/event/ProposalString") - tmjson.RegisterType(EventDataBlockSyncStatus{}, "tendermint/event/FastSyncStatus") - tmjson.RegisterType(EventDataStateSyncStatus{}, "tendermint/event/StateSyncStatus") + jsontypes.MustRegister(EventDataBlockSyncStatus{}) + jsontypes.MustRegister(EventDataCompleteProposal{}) + jsontypes.MustRegister(EventDataNewBlock{}) + jsontypes.MustRegister(EventDataNewBlockHeader{}) + jsontypes.MustRegister(EventDataNewEvidence{}) + jsontypes.MustRegister(EventDataNewRound{}) + jsontypes.MustRegister(EventDataRoundState{}) + jsontypes.MustRegister(EventDataStateSyncStatus{}) + jsontypes.MustRegister(EventDataTx{}) + jsontypes.MustRegister(EventDataVote{}) + jsontypes.MustRegister(EventDataValidatorSetUpdate{}) + jsontypes.MustRegister(EventDataEvidenceValidated{}) + jsontypes.MustRegister(EventDataString("")) } // Most event messages are basic types (a block, a transaction) @@ -117,87 +126,148 @@ type EventDataNewBlock struct { Block *Block `json:"block"` BlockID BlockID `json:"block_id"` - ResultBeginBlock abci.ResponseBeginBlock `json:"result_begin_block"` - ResultEndBlock abci.ResponseEndBlock `json:"result_end_block"` + ResultFinalizeBlock abci.ResponseFinalizeBlock `json:"result_finalize_block"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataNewBlock) TypeTag() string { return "tendermint/event/NewBlock" } + +// ABCIEvents implements the eventlog.ABCIEventer interface. +func (e EventDataNewBlock) ABCIEvents() []abci.Event { return e.ResultFinalizeBlock.Events } + type EventDataNewBlockHeader struct { Header Header `json:"header"` - NumTxs int64 `json:"num_txs"` // Number of txs in a block - ResultBeginBlock abci.ResponseBeginBlock `json:"result_begin_block"` - ResultEndBlock abci.ResponseEndBlock `json:"result_end_block"` + NumTxs int64 `json:"num_txs,string"` // Number of txs in a block + ResultFinalizeBlock abci.ResponseFinalizeBlock `json:"result_finalize_block"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataNewBlockHeader) TypeTag() string { return "tendermint/event/NewBlockHeader" } + +// ABCIEvents implements the eventlog.ABCIEventer interface. +func (e EventDataNewBlockHeader) ABCIEvents() []abci.Event { return e.ResultFinalizeBlock.Events } + type EventDataNewEvidence struct { Evidence Evidence `json:"evidence"` - Height int64 `json:"height"` + Height int64 `json:"height,string"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataNewEvidence) TypeTag() string { return "tendermint/event/NewEvidence" } + // All txs fire EventDataTx type EventDataTx struct { abci.TxResult } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataTx) TypeTag() string { return "tendermint/event/Tx" } + +// ABCIEvents implements the eventlog.ABCIEventer interface. +func (e EventDataTx) ABCIEvents() []abci.Event { + base := []abci.Event{ + eventWithAttr(TxHashKey, fmt.Sprintf("%X", Tx(e.Tx).Hash())), + eventWithAttr(TxHeightKey, fmt.Sprintf("%d", e.Height)), + } + return append(base, e.Result.Events...) +} + // NOTE: This goes into the replay WAL type EventDataRoundState struct { - Height int64 `json:"height"` + Height int64 `json:"height,string"` Round int32 `json:"round"` Step string `json:"step"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataRoundState) TypeTag() string { return "tendermint/event/RoundState" } + type ValidatorInfo struct { ProTxHash ProTxHash `json:"pro_tx_hash"` Index int32 `json:"index"` } type EventDataNewRound struct { - Height int64 `json:"height"` + Height int64 `json:"height,string"` Round int32 `json:"round"` Step string `json:"step"` Proposer ValidatorInfo `json:"proposer"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataNewRound) TypeTag() string { return "tendermint/event/NewRound" } + type EventDataCompleteProposal struct { - Height int64 `json:"height"` + Height int64 `json:"height,string"` Round int32 `json:"round"` Step string `json:"step"` BlockID BlockID `json:"block_id"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataCompleteProposal) TypeTag() string { return "tendermint/event/CompleteProposal" } + type EventDataVote struct { Vote *Vote } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataVote) TypeTag() string { return "tendermint/event/Vote" } + type EventDataCommit struct { Commit *Commit } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataCommit) TypeTag() string { return "tendermint/event/Commit" } + type EventDataString string +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataString) TypeTag() string { return "tendermint/event/ProposalString" } + type EventDataValidatorSetUpdate struct { ValidatorSetUpdates []*Validator `json:"validator_updates"` ThresholdPublicKey crypto.PubKey `json:"threshold_public_key"` QuorumHash crypto.QuorumHash `json:"quorum_hash"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataValidatorSetUpdate) TypeTag() string { return "tendermint/event/ValidatorSetUpdates" } + // EventDataBlockSyncStatus shows the fastsync status and the // height when the node state sync mechanism changes. type EventDataBlockSyncStatus struct { Complete bool `json:"complete"` - Height int64 `json:"height"` + Height int64 `json:"height,string"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataBlockSyncStatus) TypeTag() string { return "tendermint/event/FastSyncStatus" } + // EventDataStateSyncStatus shows the statesync status and the // height when the node state sync mechanism changes. type EventDataStateSyncStatus struct { Complete bool `json:"complete"` - Height int64 `json:"height"` + Height int64 `json:"height,string"` } +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataStateSyncStatus) TypeTag() string { return "tendermint/event/StateSyncStatus" } + +type EventDataEvidenceValidated struct { + Evidence Evidence `json:"evidence"` + + Height int64 `json:"height,string"` +} + +// TypeTag implements the required method of jsontypes.Tagged. +func (EventDataEvidenceValidated) TypeTag() string { return "tendermint/event/EvidenceValidated" } + // PUBSUB const ( @@ -210,12 +280,11 @@ const ( // see EventBus#PublishEventTx TxHeightKey = "tx.height" - // BlockHeightKey is a reserved key used for indexing BeginBlock and Endblock - // events. + // BlockHeightKey is a reserved key used for indexing FinalizeBlock events. BlockHeightKey = "block.height" - EventTypeBeginBlock = "begin_block" - EventTypeEndBlock = "end_block" + // EventTypeFinalizeBlock is a reserved key used for indexing FinalizeBlock events. + EventTypeFinalizeBlock = "finalize_block" ) var ( @@ -231,27 +300,27 @@ var ( EventQueryTimeoutPropose = QueryForEvent(EventTimeoutProposeValue) EventQueryTimeoutWait = QueryForEvent(EventTimeoutWaitValue) EventQueryTx = QueryForEvent(EventTxValue) - EventQueryUnlock = QueryForEvent(EventUnlockValue) EventQueryValidatorSetUpdates = QueryForEvent(EventValidatorSetUpdateValue) EventQueryValidBlock = QueryForEvent(EventValidBlockValue) EventQueryVote = QueryForEvent(EventVoteValue) EventQueryBlockSyncStatus = QueryForEvent(EventBlockSyncStatusValue) EventQueryStateSyncStatus = QueryForEvent(EventStateSyncStatusValue) + EventQueryEvidenceValidated = QueryForEvent(EventEvidenceValidatedValue) ) -func EventQueryTxFor(tx Tx) tmpubsub.Query { +func EventQueryTxFor(tx Tx) *tmquery.Query { return tmquery.MustCompile(fmt.Sprintf("%s='%s' AND %s='%X'", EventTypeKey, EventTxValue, TxHashKey, tx.Hash())) } -func QueryForEvent(eventValue string) tmpubsub.Query { +func QueryForEvent(eventValue string) *tmquery.Query { return tmquery.MustCompile(fmt.Sprintf("%s='%s'", EventTypeKey, eventValue)) } // BlockEventPublisher publishes all block related events type BlockEventPublisher interface { - PublishEventNewBlock(block EventDataNewBlock) error - PublishEventNewBlockHeader(header EventDataNewBlockHeader) error - PublishEventNewEvidence(evidence EventDataNewEvidence) error + PublishEventNewBlock(EventDataNewBlock) error + PublishEventNewBlockHeader(EventDataNewBlockHeader) error + PublishEventNewEvidence(EventDataNewEvidence) error PublishEventTx(EventDataTx) error PublishEventValidatorSetUpdates(EventDataValidatorSetUpdate) error } @@ -259,3 +328,16 @@ type BlockEventPublisher interface { type TxEventPublisher interface { PublishEventTx(EventDataTx) error } + +// eventWithAttr constructs a single abci.Event with a single attribute. +// The type of the event and the name of the attribute are obtained by +// splitting the event type on period (e.g., "foo.bar"). +func eventWithAttr(etype, value string) abci.Event { + parts := strings.SplitN(etype, ".", 2) + return abci.Event{ + Type: parts[0], + Attributes: []abci.EventAttribute{{ + Key: parts[1], Value: value, + }}, + } +} diff --git a/types/events_test.go b/types/events_test.go index bd4bde264a..f0498dbd23 100644 --- a/types/events_test.go +++ b/types/events_test.go @@ -7,6 +7,22 @@ import ( "github.com/stretchr/testify/assert" ) +// Verify that the event data types satisfy their shared interface. +var ( + _ EventData = EventDataBlockSyncStatus{} + _ EventData = EventDataCompleteProposal{} + _ EventData = EventDataNewBlock{} + _ EventData = EventDataNewBlockHeader{} + _ EventData = EventDataNewEvidence{} + _ EventData = EventDataNewRound{} + _ EventData = EventDataRoundState{} + _ EventData = EventDataStateSyncStatus{} + _ EventData = EventDataTx{} + _ EventData = EventDataValidatorSetUpdate{} + _ EventData = EventDataVote{} + _ EventData = EventDataString("") +) + func TestQueryTxFor(t *testing.T) { tx := Tx("foo") assert.Equal(t, diff --git a/types/evidence.go b/types/evidence.go index c4ab3a5f12..b081005dcc 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -3,6 +3,7 @@ package types import ( "bytes" "context" + "encoding/json" "errors" "fmt" "strings" @@ -13,8 +14,8 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/merkle" - "github.com/tendermint/tendermint/crypto/tmhash" - tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/internal/jsontypes" + tmmath "github.com/tendermint/tendermint/libs/math" tmrand "github.com/tendermint/tendermint/libs/rand" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -22,13 +23,16 @@ import ( // Evidence represents any provable malicious activity by a validator. // Verification logic for each evidence is part of the evidence module. type Evidence interface { - ABCI() []abci.Evidence // forms individual evidence to be sent to the application - Bytes() []byte // bytes which comprise the evidence - Hash() []byte // hash of the evidence - Height() int64 // height of the infraction - String() string // string format of the evidence - Time() time.Time // time of the infraction - ValidateBasic() error // basic consistency check + ABCI() []abci.Misbehavior // forms individual evidence to be sent to the application + Bytes() []byte // bytes which comprise the evidence + Hash() []byte // hash of the evidence + Height() int64 // height of the infraction + String() string // string format of the evidence + Time() time.Time // time of the infraction + ValidateBasic() error // basic consistency check + + // Implementations must support tagged encoding in JSON. + jsontypes.Tagged } //-------------------------------------------------------------------------------------- @@ -39,11 +43,14 @@ type DuplicateVoteEvidence struct { VoteB *Vote `json:"vote_b"` // abci specific information - TotalVotingPower int64 - ValidatorPower int64 + TotalVotingPower int64 `json:",string"` + ValidatorPower int64 `json:",string"` Timestamp time.Time } +// TypeTag implements the jsontypes.Tagged interface. +func (*DuplicateVoteEvidence) TypeTag() string { return "tendermint/DuplicateVoteEvidence" } + var _ Evidence = &DuplicateVoteEvidence{} // NewDuplicateVoteEvidence creates DuplicateVoteEvidence with right ordering given @@ -78,9 +85,9 @@ func NewDuplicateVoteEvidence(vote1, vote2 *Vote, blockTime time.Time, valSet *V } // ABCI returns the application relevant representation of the evidence -func (dve *DuplicateVoteEvidence) ABCI() []abci.Evidence { - return []abci.Evidence{{ - Type: abci.EvidenceType_DUPLICATE_VOTE, +func (dve *DuplicateVoteEvidence) ABCI() []abci.Misbehavior { + return []abci.Misbehavior{{ + Type: abci.MisbehaviorType_DUPLICATE_VOTE, Validator: abci.Validator{ ProTxHash: dve.VoteA.ValidatorProTxHash, Power: dve.ValidatorPower, @@ -104,7 +111,7 @@ func (dve *DuplicateVoteEvidence) Bytes() []byte { // Hash returns the hash of the evidence. func (dve *DuplicateVoteEvidence) Hash() []byte { - return tmhash.Sum(dve.Bytes()) + return crypto.Checksum(dve.Bytes()) } // Height returns the height of the infraction @@ -202,14 +209,28 @@ func DuplicateVoteEvidenceFromProto(pb *tmproto.DuplicateVoteEvidence) (*Duplica return nil, errors.New("nil duplicate vote evidence") } - vA, err := VoteFromProto(pb.VoteA) - if err != nil { - return nil, err + var vA *Vote + if pb.VoteA != nil { + var err error + vA, err = VoteFromProto(pb.VoteA) + if err != nil { + return nil, err + } + if err = vA.ValidateBasic(); err != nil { + return nil, err + } } - vB, err := VoteFromProto(pb.VoteB) - if err != nil { - return nil, err + var vB *Vote + if pb.VoteB != nil { + var err error + vB, err = VoteFromProto(pb.VoteB) + if err != nil { + return nil, err + } + if err = vB.ValidateBasic(); err != nil { + return nil, err + } } dve := &DuplicateVoteEvidence{ @@ -228,6 +249,100 @@ func DuplicateVoteEvidenceFromProto(pb *tmproto.DuplicateVoteEvidence) (*Duplica // EvidenceList is a list of Evidence. Evidences is not a word. type EvidenceList []Evidence +// StringIndented returns a string representation of the evidence. +func (evl EvidenceList) StringIndented(indent string) string { + if evl == nil { + return "nil-Evidence" + } + evStrings := make([]string, tmmath.MinInt(len(evl), 21)) + for i, ev := range evl { + if i == 20 { + evStrings[i] = fmt.Sprintf("... (%v total)", len(evl)) + break + } + evStrings[i] = fmt.Sprintf("Evidence:%v", ev) + } + return fmt.Sprintf(`EvidenceList{ +%s %v +%s}#%v`, + indent, strings.Join(evStrings, "\n"+indent+" "), + indent, evl.Hash()) +} + +// ByteSize returns the total byte size of all the evidence +func (evl EvidenceList) ByteSize() int64 { + if len(evl) != 0 { + pb, err := evl.ToProto() + if err != nil { + panic(err) + } + return int64(pb.Size()) + } + return 0 +} + +// FromProto sets a protobuf EvidenceList to the given pointer. +func (evl *EvidenceList) FromProto(eviList *tmproto.EvidenceList) error { + if eviList == nil { + return errors.New("nil evidence list") + } + + eviBzs := make(EvidenceList, len(eviList.Evidence)) + for i := range eviList.Evidence { + evi, err := EvidenceFromProto(&eviList.Evidence[i]) + if err != nil { + return err + } + eviBzs[i] = evi + } + *evl = eviBzs + return nil +} + +// ToProto converts EvidenceList to protobuf +func (evl *EvidenceList) ToProto() (*tmproto.EvidenceList, error) { + if evl == nil { + return nil, errors.New("nil evidence list") + } + + eviBzs := make([]tmproto.Evidence, len(*evl)) + for i, v := range *evl { + protoEvi, err := EvidenceToProto(v) + if err != nil { + return nil, err + } + eviBzs[i] = *protoEvi + } + return &tmproto.EvidenceList{Evidence: eviBzs}, nil +} + +func (evl EvidenceList) MarshalJSON() ([]byte, error) { + lst := make([]json.RawMessage, len(evl)) + for i, ev := range evl { + bits, err := jsontypes.Marshal(ev) + if err != nil { + return nil, err + } + lst[i] = bits + } + return json.Marshal(lst) +} + +func (evl *EvidenceList) UnmarshalJSON(data []byte) error { + var lst []json.RawMessage + if err := json.Unmarshal(data, &lst); err != nil { + return err + } + out := make([]Evidence, len(lst)) + for i, elt := range lst { + if err := jsontypes.Unmarshal(elt, &out[i]); err != nil { + return err + } + } + *evl = EvidenceList(out) + return nil +} + // Hash returns the simple merkle root hash of the EvidenceList. func (evl EvidenceList) Hash() []byte { // These allocations are required because Evidence is not of type Bytes, and @@ -260,6 +375,16 @@ func (evl EvidenceList) Has(evidence Evidence) bool { return false } +// ToABCI converts the evidence list to a slice of the ABCI protobuf messages +// for use when communicating the evidence to an application. +func (evl EvidenceList) ToABCI() []abci.Misbehavior { + var el []abci.Misbehavior + for _, e := range evl { + el = append(el, e.ABCI()...) + } + return el +} + //------------------------------------------ PROTO -------------------------------------- // EvidenceToProto is a generalized function for encoding evidence that conforms to the @@ -299,7 +424,7 @@ func EvidenceFromProto(evidence *tmproto.Evidence) (Evidence, error) { } func init() { - tmjson.RegisterType(&DuplicateVoteEvidence{}, "tendermint/DuplicateVoteEvidence") + jsontypes.MustRegister((*DuplicateVoteEvidence)(nil)) } //-------------------------------------------- ERRORS -------------------------------------- @@ -342,6 +467,7 @@ func (err *ErrEvidenceOverflow) Error() string { // NewMockDuplicateVoteEvidence assumes the round to be 0 and the validator index to be 0 func NewMockDuplicateVoteEvidence( + ctx context.Context, height int64, time time.Time, chainID string, @@ -349,13 +475,14 @@ func NewMockDuplicateVoteEvidence( quorumHash crypto.QuorumHash, ) (*DuplicateVoteEvidence, error) { val := NewMockPVForQuorum(quorumHash) - return NewMockDuplicateVoteEvidenceWithValidator(height, time, val, chainID, quorumType, quorumHash) + return NewMockDuplicateVoteEvidenceWithValidator(ctx, height, time, val, chainID, quorumType, quorumHash) } // NewMockDuplicateVoteEvidenceWithValidator assumes voting power to be DefaultDashVotingPower and // validator to be the only one in the set // TODO: discuss if this might be moved to some *_test.go file func NewMockDuplicateVoteEvidenceWithValidator( + ctx context.Context, height int64, time time.Time, pv PrivValidator, @@ -363,14 +490,14 @@ func NewMockDuplicateVoteEvidenceWithValidator( quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, ) (*DuplicateVoteEvidence, error) { - pubKey, err := pv.GetPubKey(context.Background(), quorumHash) + pubKey, err := pv.GetPubKey(ctx, quorumHash) if err != nil { panic(err) } stateID := RandStateID().WithHeight(height - 1) - proTxHash, _ := pv.GetProTxHash(context.Background()) + proTxHash, _ := pv.GetProTxHash(ctx) val := NewValidator(pubKey, DefaultDashVotingPower, proTxHash, "") voteA := makeMockVote(height, 0, 0, proTxHash, randBlockID()) @@ -380,7 +507,7 @@ func NewMockDuplicateVoteEvidenceWithValidator( voteA.StateSignature = vA.StateSignature voteB := makeMockVote(height, 0, 0, proTxHash, randBlockID()) vB := voteB.ToProto() - _ = pv.SignVote(context.Background(), chainID, quorumType, quorumHash, vB, stateID, nil) + _ = pv.SignVote(ctx, chainID, quorumType, quorumHash, vB, stateID, nil) voteB.BlockSignature = vB.BlockSignature voteB.StateSignature = vB.StateSignature return NewDuplicateVoteEvidence( @@ -426,10 +553,10 @@ func makeMockVote(height int64, round, index int32, proTxHash crypto.ProTxHash, func randBlockID() BlockID { return BlockID{ - Hash: tmrand.Bytes(tmhash.Size), + Hash: tmrand.Bytes(crypto.HashSize), PartSetHeader: PartSetHeader{ Total: 1, - Hash: tmrand.Bytes(tmhash.Size), + Hash: tmrand.Bytes(crypto.HashSize), }, } } diff --git a/types/evidence_test.go b/types/evidence_test.go index 21d5440ff9..0a0bb75882 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -13,14 +13,16 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" - "github.com/tendermint/tendermint/crypto/tmhash" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) var defaultVoteTime = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) func TestEvidenceList(t *testing.T) { - ev := randomDuplicateVoteEvidence(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ev := randomDuplicateVoteEvidence(ctx, t) evl := EvidenceList([]Evidence{ev}) assert.NotNil(t, evl.Hash()) @@ -28,7 +30,47 @@ func TestEvidenceList(t *testing.T) { assert.False(t, evl.Has(&DuplicateVoteEvidence{})) } -func randomDuplicateVoteEvidence(t *testing.T) *DuplicateVoteEvidence { +// TestEvidenceListProtoBuf to ensure parity in protobuf output and input +func TestEvidenceListProtoBuf(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + const chainID = "mychain" + ev, err := NewMockDuplicateVoteEvidence(ctx, math.MaxInt64, time.Now(), chainID, btcjson.LLMQType_5_60, crypto.RandQuorumHash()) + require.NoError(t, err) + data := EvidenceList{ev} + testCases := []struct { + msg string + data1 *EvidenceList + expPass1 bool + expPass2 bool + }{ + {"success", &data, true, true}, + {"empty evidenceData", &EvidenceList{}, true, true}, + {"fail nil Data", nil, false, false}, + } + + for _, tc := range testCases { + protoData, err := tc.data1.ToProto() + if tc.expPass1 { + require.NoError(t, err, tc.msg) + } else { + require.Error(t, err, tc.msg) + } + + eviD := new(EvidenceList) + err = eviD.FromProto(protoData) + if tc.expPass2 { + require.NoError(t, err, tc.msg) + require.Equal(t, tc.data1, eviD, tc.msg) + } else { + require.Error(t, err, tc.msg) + } + } +} + +func randomDuplicateVoteEvidence(ctx context.Context, t *testing.T) *DuplicateVoteEvidence { + t.Helper() quorumHash := crypto.RandQuorumHash() val := NewMockPVForQuorum(quorumHash) blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) @@ -38,9 +80,8 @@ func randomDuplicateVoteEvidence(t *testing.T) *DuplicateVoteEvidence { const height = int64(10) stateID := RandStateID().WithHeight(height - 1) return &DuplicateVoteEvidence{ - - VoteA: makeVote(t, val, chainID, 0, height, 2, 1, quorumType, quorumHash, blockID, stateID), - VoteB: makeVote(t, val, chainID, 0, height, 2, 1, quorumType, quorumHash, blockID2, stateID), + VoteA: makeVote(ctx, t, val, chainID, 0, height, 2, 1, quorumType, quorumHash, blockID, stateID), + VoteB: makeVote(ctx, t, val, chainID, 0, height, 2, 1, quorumType, quorumHash, blockID2, stateID), TotalVotingPower: 3 * DefaultDashVotingPower, ValidatorPower: DefaultDashVotingPower, Timestamp: defaultVoteTime, @@ -49,10 +90,13 @@ func randomDuplicateVoteEvidence(t *testing.T) *DuplicateVoteEvidence { func TestDuplicateVoteEvidence(t *testing.T) { const height = int64(13) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + quorumType := btcjson.LLMQType_5_60 - ev, err := NewMockDuplicateVoteEvidence(height, time.Now(), "mock-chain-id", quorumType, crypto.RandQuorumHash()) - assert.NoError(t, err) - assert.Equal(t, ev.Hash(), tmhash.Sum(ev.Bytes())) + ev, err := NewMockDuplicateVoteEvidence(ctx, height, time.Now(), "mock-chain-id", quorumType, crypto.RandQuorumHash()) + require.NoError(t, err) + assert.Equal(t, ev.Hash(), crypto.Checksum(ev.Bytes())) assert.NotNil(t, ev.String()) assert.Equal(t, ev.Height(), height) } @@ -60,11 +104,14 @@ func TestDuplicateVoteEvidence(t *testing.T) { func TestDuplicateVoteEvidenceValidation(t *testing.T) { quorumHash := crypto.RandQuorumHash() val := NewMockPVForQuorum(quorumHash) - blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) - blockID2 := makeBlockID(tmhash.Sum([]byte("blockhash2")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) + blockID := makeBlockID(crypto.Checksum([]byte("blockhash")), math.MaxInt32, crypto.Checksum([]byte("partshash"))) + blockID2 := makeBlockID(crypto.Checksum([]byte("blockhash2")), math.MaxInt32, crypto.Checksum([]byte("partshash"))) quorumType := btcjson.LLMQType_5_60 const chainID = "mychain" + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + testCases := []struct { testName string malleateEvidence func(*DuplicateVoteEvidence) @@ -79,7 +126,7 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { }, true}, {"Invalid vote type", func(ev *DuplicateVoteEvidence) { ev.VoteA = makeVote( - t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0, quorumType, + ctx, t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0, quorumType, quorumHash, blockID2, RandStateID().WithHeight(math.MaxInt64-1), ) }, true}, @@ -94,9 +141,9 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { t.Run(tc.testName, func(t *testing.T) { const height int64 = math.MaxInt64 stateID := RandStateID().WithHeight(height - 1) - vote1 := makeVote(t, val, chainID, math.MaxInt32, height, math.MaxInt32, 0x02, quorumType, + vote1 := makeVote(ctx, t, val, chainID, math.MaxInt32, height, math.MaxInt32, 0x02, quorumType, quorumHash, blockID, stateID) - vote2 := makeVote(t, val, chainID, math.MaxInt32, height, math.MaxInt32, 0x02, quorumType, + vote2 := makeVote(ctx, t, val, chainID, math.MaxInt32, height, math.MaxInt32, 0x02, quorumType, quorumHash, blockID2, stateID) thresholdPublicKey, err := val.GetThresholdPublicKey(context.Background(), quorumHash) assert.NoError(t, err) @@ -111,18 +158,30 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { } func TestMockEvidenceValidateBasic(t *testing.T) { - goodEvidence, err := NewMockDuplicateVoteEvidence(int64(1), time.Now(), "mock-chain-id", btcjson.LLMQType_5_60, + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + goodEvidence, err := NewMockDuplicateVoteEvidence(ctx, int64(1), time.Now(), "mock-chain-id", btcjson.LLMQType_5_60, crypto.RandQuorumHash()) assert.NoError(t, err) assert.Nil(t, goodEvidence.ValidateBasic()) } func makeVote( - t *testing.T, val PrivValidator, chainID string, - valIndex int32, height int64, round int32, step int, quorumType btcjson.LLMQType, - quorumHash crypto.QuorumHash, blockID BlockID, stateID StateID, + ctx context.Context, + t *testing.T, + val PrivValidator, + chainID string, + valIndex int32, + height int64, + round int32, + step int, + quorumType btcjson.LLMQType, + quorumHash crypto.QuorumHash, + blockID BlockID, + stateID StateID, ) *Vote { - proTxHash, err := val.GetProTxHash(context.Background()) + proTxHash, err := val.GetProTxHash(ctx) require.NoError(t, err) v := &Vote{ ValidatorProTxHash: proTxHash, @@ -134,28 +193,29 @@ func makeVote( } vpb := v.ToProto() - err = val.SignVote(context.Background(), chainID, quorumType, quorumHash, vpb, stateID, nil) - if err != nil { - panic(err) - } + err = val.SignVote(ctx, chainID, quorumType, quorumHash, vpb, stateID, nil) + require.NoError(t, err) v.BlockSignature = vpb.BlockSignature v.StateSignature = vpb.StateSignature return v } func TestEvidenceProto(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // -------- Votes -------- quorumHash := crypto.RandQuorumHash() val := NewMockPVForQuorum(quorumHash) - blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) - blockID2 := makeBlockID(tmhash.Sum([]byte("blockhash2")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) + blockID := makeBlockID(crypto.Checksum([]byte("blockhash")), math.MaxInt32, crypto.Checksum([]byte("partshash"))) + blockID2 := makeBlockID(crypto.Checksum([]byte("blockhash2")), math.MaxInt32, crypto.Checksum([]byte("partshash"))) quorumType := btcjson.LLMQType_5_60 const chainID = "mychain" var height int64 = math.MaxInt64 stateID := RandStateID().WithHeight(height - 1) - v := makeVote(t, val, chainID, math.MaxInt32, height, 1, 0x01, quorumType, quorumHash, blockID, stateID) - v2 := makeVote(t, val, chainID, math.MaxInt32, height, 2, 0x01, quorumType, quorumHash, blockID2, stateID) + v := makeVote(ctx, t, val, chainID, math.MaxInt32, height, 1, 0x01, quorumType, quorumHash, blockID, stateID) + v2 := makeVote(ctx, t, val, chainID, math.MaxInt32, height, 2, 0x01, quorumType, quorumHash, blockID2, stateID) tests := []struct { testName string @@ -190,22 +250,25 @@ func TestEvidenceProto(t *testing.T) { } func TestEvidenceVectors(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Votes for duplicateEvidence quorumType := btcjson.LLMQType_5_60 quorumHash := make([]byte, crypto.QuorumHashSize) - val := NewMockPVForQuorum(make([]byte, crypto.QuorumHashSize)) + val := NewMockPVForQuorum(quorumHash) val.ProTxHash = make([]byte, crypto.ProTxHashSize) key := bls12381.GenPrivKeyFromSecret([]byte("it's a secret")) // deterministic key val.UpdatePrivateKey(context.Background(), key, quorumHash, key.PubKey(), 10) - blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) - blockID2 := makeBlockID(tmhash.Sum([]byte("blockhash2")), math.MaxInt32, tmhash.Sum([]byte("partshash"))) + blockID := makeBlockID(crypto.Checksum([]byte("blockhash")), math.MaxInt32, crypto.Checksum([]byte("partshash"))) + blockID2 := makeBlockID(crypto.Checksum([]byte("blockhash2")), math.MaxInt32, crypto.Checksum([]byte("partshash"))) const chainID = "mychain" stateID := StateID{ Height: 100, - LastAppHash: make([]byte, tmhash.Size), + LastAppHash: make([]byte, crypto.HashSize), } - v := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, 1, 0x01, quorumType, quorumHash, blockID, stateID) - v2 := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, 2, 0x01, quorumType, quorumHash, blockID2, stateID) + v := makeVote(ctx, t, val, chainID, math.MaxInt32, math.MaxInt64, 1, 0x01, quorumType, quorumHash, blockID, stateID) + v2 := makeVote(ctx, t, val, chainID, math.MaxInt32, math.MaxInt64, 2, 0x01, quorumType, quorumHash, blockID2, stateID) testCases := []struct { testName string diff --git a/types/genesis.go b/types/genesis.go index fbfdcba993..383fc82fe2 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -4,15 +4,15 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" + "os" "time" "github.com/dashevo/dashd-go/btcjson" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" + "github.com/tendermint/tendermint/internal/jsontypes" tmbytes "github.com/tendermint/tendermint/libs/bytes" - tmjson "github.com/tendermint/tendermint/libs/json" tmtime "github.com/tendermint/tendermint/libs/time" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -30,36 +30,130 @@ const ( // GenesisValidator is an initial validator. type GenesisValidator struct { - PubKey crypto.PubKey `json:"pub_key"` + PubKey crypto.PubKey + Power int64 + Name string + ProTxHash crypto.ProTxHash +} + +type genesisValidatorJSON struct { + PubKey json.RawMessage `json:"pub_key"` Power int64 `json:"power"` Name string `json:"name"` ProTxHash crypto.ProTxHash `json:"pro_tx_hash"` } +func (g GenesisValidator) MarshalJSON() ([]byte, error) { + pk, err := jsontypes.Marshal(g.PubKey) + if err != nil { + return nil, err + } + return json.Marshal(genesisValidatorJSON{ + ProTxHash: g.ProTxHash, PubKey: pk, Power: g.Power, Name: g.Name, + }) +} + +func (g *GenesisValidator) UnmarshalJSON(data []byte) error { + var gv genesisValidatorJSON + if err := json.Unmarshal(data, &gv); err != nil { + return err + } + if err := jsontypes.Unmarshal(gv.PubKey, &g.PubKey); err != nil { + return err + } + g.Power = gv.Power + g.Name = gv.Name + g.ProTxHash = gv.ProTxHash + return nil +} + // GenesisDoc defines the initial conditions for a tendermint blockchain, in particular its validator set. type GenesisDoc struct { - GenesisTime time.Time `json:"genesis_time"` - ChainID string `json:"chain_id"` - InitialHeight int64 `json:"initial_height"` + GenesisTime time.Time `json:"genesis_time"` + ChainID string `json:"chain_id"` + InitialHeight int64 `json:"initial_height,string"` + ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"` + Validators []GenesisValidator `json:"validators,omitempty"` + AppHash tmbytes.HexBytes `json:"app_hash"` + AppState json.RawMessage `json:"app_state,omitempty"` + + // dash fields InitialCoreChainLockedHeight uint32 `json:"initial_core_chain_locked_height"` InitialProposalCoreChainLock *tmproto.CoreChainLock `json:"initial_proposal_core_chain_lock"` - ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"` - Validators []GenesisValidator `json:"validators,omitempty"` ThresholdPublicKey crypto.PubKey `json:"threshold_public_key"` QuorumType btcjson.LLMQType `json:"quorum_type"` QuorumHash crypto.QuorumHash `json:"quorum_hash"` - AppHash tmbytes.HexBytes `json:"app_hash"` - AppState json.RawMessage `json:"app_state,omitempty"` +} + +type genesisDocJSON struct { + GenesisTime time.Time `json:"genesis_time"` + ChainID string `json:"chain_id"` + InitialHeight int64 `json:"initial_height,string"` + ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"` + Validators []GenesisValidator `json:"validators,omitempty"` + AppHash tmbytes.HexBytes `json:"app_hash"` + AppState json.RawMessage `json:"app_state,omitempty"` + + // dash fields + InitialCoreChainLockedHeight uint32 `json:"initial_core_chain_locked_height"` + InitialProposalCoreChainLock *tmproto.CoreChainLock `json:"initial_proposal_core_chain_lock"` + ThresholdPublicKey json.RawMessage `json:"threshold_public_key"` + QuorumType btcjson.LLMQType `json:"quorum_type"` + QuorumHash crypto.QuorumHash `json:"quorum_hash"` +} + +func (genDoc GenesisDoc) MarshalJSON() ([]byte, error) { + tpk, err := jsontypes.Marshal(genDoc.ThresholdPublicKey) + if err != nil { + return nil, err + } + return json.Marshal(genesisDocJSON{ + GenesisTime: genDoc.GenesisTime, + ChainID: genDoc.ChainID, + InitialHeight: genDoc.InitialHeight, + ConsensusParams: genDoc.ConsensusParams, + Validators: genDoc.Validators, + AppHash: genDoc.AppHash, + AppState: genDoc.AppState, + + InitialCoreChainLockedHeight: genDoc.InitialCoreChainLockedHeight, + InitialProposalCoreChainLock: genDoc.InitialProposalCoreChainLock, + ThresholdPublicKey: tpk, + QuorumType: genDoc.QuorumType, + QuorumHash: genDoc.QuorumHash, + }) +} + +func (genDoc *GenesisDoc) UnmarshalJSON(data []byte) error { + var g genesisDocJSON + if err := json.Unmarshal(data, &g); err != nil { + return err + } + if err := jsontypes.Unmarshal(g.ThresholdPublicKey, &genDoc.ThresholdPublicKey); err != nil { + return err + } + genDoc.GenesisTime = g.GenesisTime + genDoc.ChainID = g.ChainID + genDoc.InitialHeight = g.InitialHeight + genDoc.ConsensusParams = g.ConsensusParams + genDoc.Validators = g.Validators + genDoc.AppHash = g.AppHash + genDoc.AppState = g.AppState + genDoc.InitialCoreChainLockedHeight = g.InitialCoreChainLockedHeight + genDoc.InitialProposalCoreChainLock = g.InitialProposalCoreChainLock + genDoc.QuorumType = g.QuorumType + genDoc.QuorumHash = g.QuorumHash + return nil } // SaveAs is a utility method for saving GenensisDoc as a JSON file. func (genDoc *GenesisDoc) SaveAs(file string) error { - genDocBytes, err := tmjson.MarshalIndent(genDoc, "", " ") + genDocBytes, err := json.MarshalIndent(genDoc, "", " ") if err != nil { return err } - return ioutil.WriteFile(file, genDocBytes, 0644) // nolint:gosec + return os.WriteFile(file, genDocBytes, 0644) // nolint:gosec } // ValidatorHash returns the hash of the validator set contained in the GenesisDoc @@ -99,10 +193,15 @@ func (genDoc *GenesisDoc) ValidateAndComplete() error { if genDoc.ConsensusParams == nil { genDoc.ConsensusParams = DefaultConsensusParams() - } else if err := genDoc.ConsensusParams.ValidateConsensusParams(); err != nil { + } + genDoc.ConsensusParams.Complete() + + if err := genDoc.ConsensusParams.ValidateConsensusParams(); err != nil { return err } + lenVals := len(genDoc.Validators) + for _, v := range genDoc.Validators { if v.Power == 0 { return fmt.Errorf("the genesis file cannot contain validators with no voting power: %v", v) @@ -112,14 +211,14 @@ func (genDoc *GenesisDoc) ValidateAndComplete() error { } } - if genDoc.Validators != nil && genDoc.ThresholdPublicKey == nil { + if lenVals > 0 && genDoc.ThresholdPublicKey == nil { return fmt.Errorf("the threshold public key must be set if there are validators (%d Validator(s))", len(genDoc.Validators)) } - if genDoc.Validators != nil && len(genDoc.ThresholdPublicKey.Bytes()) != bls12381.PubKeySize { + if lenVals > 0 && len(genDoc.ThresholdPublicKey.Bytes()) != bls12381.PubKeySize { return fmt.Errorf("the threshold public key must be 48 bytes for BLS") } - if genDoc.Validators != nil && len(genDoc.QuorumHash.Bytes()) < crypto.SmallAppHashSize { + if lenVals > 0 && len(genDoc.QuorumHash.Bytes()) < crypto.SmallAppHashSize { return fmt.Errorf("the quorum hash must be at least %d bytes long (%d Validator(s))", crypto.SmallAppHashSize, len(genDoc.Validators)) @@ -142,7 +241,7 @@ func (genDoc *GenesisDoc) ValidateAndComplete() error { // GenesisDocFromJSON unmarshalls JSON data into a GenesisDoc. func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) { genDoc := GenesisDoc{} - err := tmjson.Unmarshal(jsonBlob, &genDoc) + err := json.Unmarshal(jsonBlob, &genDoc) if err != nil { return nil, err } @@ -156,7 +255,7 @@ func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) { // GenesisDocFromFile reads JSON data from a file and unmarshalls it into a GenesisDoc. func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) { - jsonBlob, err := ioutil.ReadFile(genDocFile) + jsonBlob, err := os.ReadFile(genDocFile) if err != nil { return nil, fmt.Errorf("couldn't read GenesisDoc file: %w", err) } diff --git a/types/genesis_test.go b/types/genesis_test.go index 234f0043c8..999dd7bca6 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -2,17 +2,15 @@ package types import ( - "io/ioutil" + "encoding/json" "os" "testing" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/bls12381" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/bls12381" tmtime "github.com/tendermint/tendermint/libs/time" ) @@ -95,7 +93,7 @@ func TestGenesisBad(t *testing.T) { } } -func TestGenesisGood(t *testing.T) { +func TestBasicGenesisDoc(t *testing.T) { // test a good one by raw json genDocBytes := []byte( `{ @@ -106,7 +104,7 @@ func TestGenesisGood(t *testing.T) { "consensus_params": null, "validators": [{ "pub_key":{"type": "tendermint/PubKeyBLS12381","value": "F5BjXeh0DppqaxX7a3LzoWr6CXPZcZeba6VHYdbiUCxQ23b00mFD8FRZpCz9Ug1E"}, - "power":"100", + "power":100, "name":"", "pro_tx_hash":"51BF39CC1F41B9FC63DFA5B1EDF3F0CA3AD5CAFAE4B12B4FE9263B08BB50C45F" }], @@ -116,7 +114,21 @@ func TestGenesisGood(t *testing.T) { }, "quorum_hash":"43FF39CC1F41B9FC63DFA5B1EDF3F0CA3AD5CAFAE4B12B4FE9263B08BB50C4CC", "app_hash":"", - "app_state":{"account_owner": "Bob"} + "app_state":{"account_owner": "Bob"}, + "consensus_params": { + "synchrony": {"precision": "1", "message_delay": "10"}, + "timeout": { + "propose": "30000000000", + "propose_delta": "50000000", + "vote": "30000000000", + "vote_delta": "50000000", + "commit": "10000000000", + "bypass_commit_timeout": false + }, + "validator": {"pub_key_types":["ed25519"]}, + "block": {"max_bytes": "100"}, + "evidence": {"max_age_num_blocks": "100", "max_age_duration": "10"} + } }`, ) _, err := GenesisDocFromJSON(genDocBytes) @@ -130,7 +142,7 @@ func TestGenesisGood(t *testing.T) { ThresholdPublicKey: pubkey, QuorumHash: crypto.RandQuorumHash(), } - genDocBytes, err = tmjson.Marshal(baseGenDoc) + genDocBytes, err = json.Marshal(baseGenDoc) assert.NoError(t, err, "error marshaling genDoc") // test base gendoc and check consensus params were filled @@ -142,14 +154,14 @@ func TestGenesisGood(t *testing.T) { assert.NotNil(t, genDoc.Validators[0].ProTxHash, "expected validator's proTxHash to be filled in") // create json with consensus params filled - genDocBytes, err = tmjson.Marshal(genDoc) + genDocBytes, err = json.Marshal(genDoc) assert.NoError(t, err, "error marshaling genDoc") genDoc, err = GenesisDocFromJSON(genDocBytes) - assert.NoError(t, err, "expected no error for valid genDoc json") + require.NoError(t, err, "expected no error for valid genDoc json") // test with invalid consensus params genDoc.ConsensusParams.Block.MaxBytes = 0 - genDocBytes, err = tmjson.Marshal(genDoc) + genDocBytes, err = json.Marshal(genDoc) assert.NoError(t, err, "error marshaling genDoc") _, err = GenesisDocFromJSON(genDocBytes) assert.Error(t, err, "expected error for genDoc json with block size of 0") @@ -159,7 +171,6 @@ func TestGenesisGood(t *testing.T) { []byte(`{"chain_id":"mychain"}`), // missing validators []byte(`{"chain_id":"mychain","validators":[]}`), // missing validators []byte(`{"chain_id":"mychain","validators":null}`), // nil validator - []byte(`{"chain_id":"mychain"}`), // missing validators } for _, tc := range missingValidatorsTestCases { @@ -169,7 +180,7 @@ func TestGenesisGood(t *testing.T) { } func TestGenesisSaveAs(t *testing.T) { - tmpfile, err := ioutil.TempFile("", "genesis") + tmpfile, err := os.CreateTemp(t.TempDir(), "genesis") require.NoError(t, err) defer os.Remove(tmpfile.Name()) diff --git a/types/keys.go b/types/keys.go deleted file mode 100644 index 941e82b65b..0000000000 --- a/types/keys.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -// UNSTABLE -var ( - PeerStateKey = "ConsensusReactor.peerState" -) diff --git a/types/light.go b/types/light.go index d70ed88094..ab36e7012a 100644 --- a/types/light.go +++ b/types/light.go @@ -4,10 +4,26 @@ import ( "bytes" "errors" "fmt" + "time" + tbytes "github.com/tendermint/tendermint/libs/bytes" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) +// Info about the status of the light client +type LightClientInfo struct { + PrimaryID string `json:"primaryID"` + WitnessesID []string `json:"witnessesID"` + NumPeers int `json:"number_of_peers,string"` + LastTrustedHeight int64 `json:"last_trusted_height,string"` + LastTrustedHash tbytes.HexBytes `json:"last_trusted_hash"` + LatestBlockTime time.Time `json:"latest_block_time"` + TrustingPeriod string `json:"trusting_period"` + // Boolean that reflects whether LatestBlockTime + trusting period is before + // time.Now() (time when /status is called) + TrustedBlockExpired bool `json:"trusted_block_expired"` +} + // LightBlock is a SignedHeader and a ValidatorSet. // It is the basis of the light client type LightBlock struct { diff --git a/types/light_test.go b/types/light_test.go index 55a0c0497e..8ea12e7279 100644 --- a/types/light_test.go +++ b/types/light_test.go @@ -1,6 +1,7 @@ package types import ( + "context" "math" "reflect" "testing" @@ -12,9 +13,12 @@ import ( ) func TestLightBlockValidateBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + header := MakeRandHeader() stateID := RandStateID() - commit := randCommit(stateID) + commit := randCommit(ctx, t, stateID) vals, _ := RandValidatorSet(5) header.Height = commit.Height header.LastBlockID = commit.BlockID @@ -39,7 +43,7 @@ func TestLightBlockValidateBasic(t *testing.T) { {"valid light block", sh, vals, false}, {"hashes don't match", sh, vals2, true}, {"invalid validator set", sh, vals3, true}, - {"invalid signed header", &SignedHeader{Header: &header, Commit: randCommit(stateID)}, vals, true}, + {"invalid signed header", &SignedHeader{Header: &header, Commit: randCommit(ctx, t, stateID)}, vals, true}, } for _, tc := range testCases { @@ -58,8 +62,10 @@ func TestLightBlockValidateBasic(t *testing.T) { } func TestLightBlockProtobuf(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() header := MakeRandHeader() - commit := randCommit(RandStateID()) + commit := randCommit(ctx, t, RandStateID()) vals, _ := RandValidatorSet(5) header.Height = commit.Height header.LastBlockID = commit.BlockID @@ -112,7 +118,11 @@ func TestLightBlockProtobuf(t *testing.T) { } func TestSignedHeaderValidateBasic(t *testing.T) { - commit := randCommit(RandStateID()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + commit := randCommit(ctx, t, RandStateID()) + chainID := "𠜎" timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) h := Header{ @@ -167,6 +177,8 @@ func TestSignedHeaderValidateBasic(t *testing.T) { } func TestLightBlock_StateID(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() tests := []struct { name string @@ -176,19 +188,19 @@ func TestLightBlock_StateID(t *testing.T) { }{ { "State ID OK", - randCommit(StateID{12, []byte("12345678901234567890123456789012")}), + randCommit(ctx, t, StateID{12, []byte("12345678901234567890123456789012")}), StateID{12, []byte("12345678901234567890123456789012")}, false, }, { "Short app hash", - randCommit(StateID{12, []byte("12345678901234567890")}), + randCommit(ctx, t, StateID{12, []byte("12345678901234567890")}), StateID{12, []byte("12345678901234567890")}, false, }, { "Nil app hash", - randCommit(StateID{12, nil}), + randCommit(ctx, t, StateID{12, nil}), StateID{12, []byte{}}, false, }, diff --git a/types/netaddress.go b/types/netaddress.go index bc074dca6e..386601fd7f 100644 --- a/types/netaddress.go +++ b/types/netaddress.go @@ -10,7 +10,6 @@ import ( "fmt" "net" "strconv" - "strings" "time" ) @@ -62,58 +61,6 @@ func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress { } } -// NewNetAddressString returns a new NetAddress using the provided address in -// the form of "ID@IP:Port". -// Also resolves the host if host is not an IP. -// Errors are of type ErrNetAddressXxx where Xxx is in (NoID, Invalid, Lookup) -func NewNetAddressString(addr string) (*NetAddress, error) { - addrWithoutProtocol := removeProtocolIfDefined(addr) - spl := strings.Split(addrWithoutProtocol, "@") - if len(spl) != 2 { - return nil, ErrNetAddressNoID{addr} - } - - id, err := NewNodeID(spl[0]) - if err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} - } - - if err := id.Validate(); err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} - } - - addrWithoutProtocol = spl[1] - - // get host and port - host, portStr, err := net.SplitHostPort(addrWithoutProtocol) - if err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} - } - if len(host) == 0 { - return nil, ErrNetAddressInvalid{ - addrWithoutProtocol, - errors.New("host is empty")} - } - - ip := net.ParseIP(host) - if ip == nil { - ips, err := net.LookupIP(host) - if err != nil { - return nil, ErrNetAddressLookup{host, err} - } - ip = ips[0] - } - - port, err := strconv.ParseUint(portStr, 10, 16) - if err != nil { - return nil, ErrNetAddressInvalid{portStr, err} - } - - na := NewNetAddressIPPort(ip, uint16(port)) - na.ID = id - return na, nil -} - // Equals reports whether na and other are the same addresses, // including their ID, IP, and Port. func (na *NetAddress) Equals(other interface{}) bool { @@ -313,14 +260,6 @@ func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) } func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) } func (na *NetAddress) OnionCatTor() bool { return onionCatNet.Contains(na.IP) } -func removeProtocolIfDefined(addr string) string { - if strings.Contains(addr, "://") { - return strings.Split(addr, "://")[1] - } - return addr - -} - // ipNet returns a net.IPNet struct given the passed IP address string, number // of one bits to include at the start of the mask, and the total number of bits // for the mask. diff --git a/types/netaddress_test.go b/types/netaddress_test.go index 393d70e0b2..6cf2a302a1 100644 --- a/types/netaddress_test.go +++ b/types/netaddress_test.go @@ -47,83 +47,6 @@ func TestNewNetAddress(t *testing.T) { }, "Calling NewNetAddress with UDPAddr should not panic in testing") } -func TestNewNetAddressString(t *testing.T) { - testCases := []struct { - name string - addr string - expected string - correct bool - }{ - {"no node id and no protocol", "127.0.0.1:8080", "", false}, - {"no node id w/ tcp input", "tcp://127.0.0.1:8080", "", false}, - {"no node id w/ udp input", "udp://127.0.0.1:8080", "", false}, - - { - "no protocol", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - true, - }, - { - "tcp input", - "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - true, - }, - { - "udp input", - "udp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - true, - }, - {"malformed tcp input", "tcp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - {"malformed udp input", "udp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - - // {"127.0.0:8080", false}, - {"invalid host", "notahost", "", false}, - {"invalid port", "127.0.0.1:notapath", "", false}, - {"invalid host w/ port", "notahost:8080", "", false}, - {"just a port", "8082", "", false}, - {"non-existent port", "127.0.0:8080000", "", false}, - - {"too short nodeId", "deadbeef@127.0.0.1:8080", "", false}, - {"too short, not hex nodeId", "this-isnot-hex@127.0.0.1:8080", "", false}, - {"not hex nodeId", "xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - - {"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080", "", false}, - {"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080", "", false}, - {"notHex nodeId w/tcp", "tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - { - "correct nodeId w/tcp", - "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - true, - }, - - {"no node id", "tcp://@127.0.0.1:8080", "", false}, - {"no node id or IP", "tcp://@", "", false}, - {"tcp no host, w/ port", "tcp://:26656", "", false}, - {"empty", "", "", false}, - {"node id delimiter 1", "@", "", false}, - {"node id delimiter 2", " @", "", false}, - {"node id delimiter 3", " @ ", "", false}, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - addr, err := NewNetAddressString(tc.addr) - if tc.correct { - if assert.Nil(t, err, tc.addr) { - assert.Equal(t, tc.expected, addr.String()) - } - } else { - assert.NotNil(t, err, tc.addr) - } - }) - } -} - func TestNewNetAddressIPPort(t *testing.T) { addr := NewNetAddressIPPort(net.ParseIP("127.0.0.1"), 8080) assert.Equal(t, "127.0.0.1:8080", addr.String()) @@ -142,7 +65,7 @@ func TestNetAddressProperties(t *testing.T) { } for _, tc := range testCases { - addr, err := NewNetAddressString(tc.addr) + addr, err := ParseAddressString(tc.addr) require.Nil(t, err) err = addr.Valid() @@ -172,10 +95,10 @@ func TestNetAddressReachabilityTo(t *testing.T) { } for _, tc := range testCases { - addr, err := NewNetAddressString(tc.addr) + addr, err := ParseAddressString(tc.addr) require.Nil(t, err) - other, err := NewNetAddressString(tc.other) + other, err := ParseAddressString(tc.other) require.Nil(t, err) assert.Equal(t, tc.reachability, addr.ReachabilityTo(other)) diff --git a/types/node_id.go b/types/node_id.go index c260aa1172..a5db401598 100644 --- a/types/node_id.go +++ b/types/node_id.go @@ -31,8 +31,7 @@ func NewNodeID(nodeID string) (NodeID, error) { // IDAddressString returns id@hostPort. It strips the leading // protocol from protocolHostPort if it exists. func (id NodeID) AddressString(protocolHostPort string) string { - hostPort := removeProtocolIfDefined(protocolHostPort) - return fmt.Sprintf("%s@%s", id, hostPort) + return fmt.Sprintf("%s@%s", id, removeProtocolIfDefined(protocolHostPort)) } // NodeIDFromPubKey creates a node ID from a given PubKey address. diff --git a/types/node_info.go b/types/node_info.go index 7e1616e6ce..77e595b15d 100644 --- a/types/node_info.go +++ b/types/node_info.go @@ -3,6 +3,9 @@ package types import ( "errors" "fmt" + "net" + "strconv" + "strings" "github.com/tendermint/tendermint/crypto" @@ -23,9 +26,9 @@ func MaxNodeInfoSize() int { // ProtocolVersion contains the protocol versions for the software. type ProtocolVersion struct { - P2P uint64 `json:"p2p"` - Block uint64 `json:"block"` - App uint64 `json:"app"` + P2P uint64 `json:"p2p,string"` + Block uint64 `json:"block,string"` + App uint64 `json:"app,string"` } //------------------------------------------------------------- @@ -83,22 +86,15 @@ func (info NodeInfo) GetProTxHash() crypto.ProTxHash { // url-encoding), and we just need to be careful with how we handle that in our // clients. (e.g. off by default). func (info NodeInfo) Validate() error { - - // ID is already validated. - - // Validate ListenAddr. - _, err := NewNetAddressString(info.ID().AddressString(info.ListenAddr)) - if err != nil { + if _, err := ParseAddressString(info.ID().AddressString(info.ListenAddr)); err != nil { return err } - // Network is validated in CompatibleWith. - // Validate Version - if len(info.Version) > 0 && - (!tmstrings.IsASCIIText(info.Version) || tmstrings.ASCIITrim(info.Version) == "") { - - return fmt.Errorf("info.Version must be valid ASCII text without tabs, but got %v", info.Version) + if len(info.Version) > 0 { + if ver, err := tmstrings.ASCIITrim(info.Version); err != nil || ver == "" { + return fmt.Errorf("info.Version must be valid ASCII text without tabs, but got, %q [%s]", info.Version, ver) + } } // Validate Channels - ensure max and check for duplicates. @@ -114,8 +110,7 @@ func (info NodeInfo) Validate() error { channels[ch] = struct{}{} } - // Validate Moniker. - if !tmstrings.IsASCIIText(info.Moniker) || tmstrings.ASCIITrim(info.Moniker) == "" { + if m, err := tmstrings.ASCIITrim(info.Moniker); err != nil || m == "" { return fmt.Errorf("info.Moniker must be valid non-empty ASCII text without tabs, but got %v", info.Moniker) } @@ -129,8 +124,10 @@ func (info NodeInfo) Validate() error { } // XXX: Should we be more strict about address formats? rpcAddr := other.RPCAddress - if len(rpcAddr) > 0 && (!tmstrings.IsASCIIText(rpcAddr) || tmstrings.ASCIITrim(rpcAddr) == "") { - return fmt.Errorf("info.Other.RPCAddress=%v must be valid ASCII text without tabs", rpcAddr) + if len(rpcAddr) > 0 { + if a, err := tmstrings.ASCIITrim(rpcAddr); err != nil || a == "" { + return fmt.Errorf("info.Other.RPCAddress=%v must be valid ASCII text without tabs", rpcAddr) + } } return nil @@ -172,15 +169,6 @@ OUTER_LOOP: return nil } -// NetAddress returns a NetAddress derived from the NodeInfo - -// it includes the authenticated peer ID and the self-reported -// ListenAddr. Note that the ListenAddr is not authenticated and -// may not match that address actually dialed if its an outbound peer. -func (info NodeInfo) NetAddress() (*NetAddress, error) { - idAddr := info.ID().AddressString(info.ListenAddr) - return NewNetAddressString(idAddr) -} - // AddChannel is used by the router when a channel is opened to add it to the node info func (info *NodeInfo) AddChannel(channel uint16) { // check that the channel doesn't already exist @@ -255,3 +243,61 @@ func NodeInfoFromProto(pb *tmp2p.NodeInfo) (NodeInfo, error) { } return dni, nil } + +// ParseAddressString reads an address string, and returns the NetAddress struct +// with ip address, port and nodeID information, returning an error for any validation +// errors. +func ParseAddressString(addr string) (*NetAddress, error) { + addrWithoutProtocol := removeProtocolIfDefined(addr) + spl := strings.Split(addrWithoutProtocol, "@") + if len(spl) != 2 { + return nil, errors.New("invalid address") + } + + id, err := NewNodeID(spl[0]) + if err != nil { + return nil, err + } + + if err := id.Validate(); err != nil { + return nil, err + } + + addrWithoutProtocol = spl[1] + + // get host and port + host, portStr, err := net.SplitHostPort(addrWithoutProtocol) + if err != nil { + return nil, err + } + if len(host) == 0 { + return nil, err + } + + ip := net.ParseIP(host) + if ip == nil { + ips, err := net.LookupIP(host) + if err != nil { + return nil, err + } + ip = ips[0] + } + + port, err := strconv.ParseUint(portStr, 10, 16) + if err != nil { + return nil, err + } + + na := NewNetAddressIPPort(ip, uint16(port)) + na.ID = id + + return na, nil +} + +func removeProtocolIfDefined(addr string) string { + if strings.Contains(addr, "://") { + return strings.Split(addr, "://")[1] + } + return addr + +} diff --git a/types/node_info_test.go b/types/node_info_test.go index 253117b24f..f769d0255b 100644 --- a/types/node_info_test.go +++ b/types/node_info_test.go @@ -6,6 +6,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/version" @@ -75,20 +76,23 @@ func TestNodeInfoValidate(t *testing.T) { name := "testing" // test case passes - ni = testNodeInfo(nodeKeyID, name) + ni = testNodeInfo(t, nodeKeyID, name) ni.Channels = channels assert.NoError(t, ni.Validate()) for _, tc := range testCases { - ni := testNodeInfo(nodeKeyID, name) - ni.Channels = channels - tc.malleateNodeInfo(&ni) - err := ni.Validate() - if tc.expectErr { - assert.Error(t, err, tc.testName) - } else { - assert.NoError(t, err, tc.testName) - } + t.Run(tc.testName, func(t *testing.T) { + ni := testNodeInfo(t, nodeKeyID, name) + ni.Channels = channels + tc.malleateNodeInfo(&ni) + err := ni.Validate() + if tc.expectErr { + assert.Error(t, err, tc.testName) + } else { + assert.NoError(t, err, tc.testName) + } + }) + } } @@ -97,11 +101,12 @@ func testNodeID() NodeID { return NodeIDFromPubKey(ed25519.GenPrivKey().PubKey()) } -func testNodeInfo(id NodeID, name string) NodeInfo { - return testNodeInfoWithNetwork(id, name, "testing") +func testNodeInfo(t *testing.T, id NodeID, name string) NodeInfo { + return testNodeInfoWithNetwork(t, id, name, "testing") } -func testNodeInfoWithNetwork(id NodeID, name, network string) NodeInfo { +func testNodeInfoWithNetwork(t *testing.T, id NodeID, name, network string) NodeInfo { + t.Helper() return NodeInfo{ ProtocolVersion: ProtocolVersion{ P2P: version.P2PProtocol, @@ -109,23 +114,22 @@ func testNodeInfoWithNetwork(id NodeID, name, network string) NodeInfo { App: 0, }, NodeID: id, - ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort()), + ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort(t)), Network: network, Version: "1.2.3-rc0-deadbeef", Channels: []byte{testCh}, Moniker: name, Other: NodeInfoOther{ TxIndex: "on", - RPCAddress: fmt.Sprintf("127.0.0.1:%d", getFreePort()), + RPCAddress: fmt.Sprintf("127.0.0.1:%d", getFreePort(t)), }, } } -func getFreePort() int { +func getFreePort(t *testing.T) int { + t.Helper() port, err := tmnet.GetFreePort() - if err != nil { - panic(err) - } + require.NoError(t, err) return port } @@ -137,8 +141,8 @@ func TestNodeInfoCompatible(t *testing.T) { var newTestChannel byte = 0x2 // test NodeInfo is compatible - ni1 := testNodeInfo(nodeKey1ID, name) - ni2 := testNodeInfo(nodeKey2ID, name) + ni1 := testNodeInfo(t, nodeKey1ID, name) + ni2 := testNodeInfo(t, nodeKey2ID, name) assert.NoError(t, ni1.CompatibleWith(ni2)) // add another channel; still compatible @@ -155,14 +159,14 @@ func TestNodeInfoCompatible(t *testing.T) { } for _, tc := range testCases { - ni := testNodeInfo(nodeKey2ID, name) + ni := testNodeInfo(t, nodeKey2ID, name) tc.malleateNodeInfo(&ni) assert.Error(t, ni1.CompatibleWith(ni)) } } func TestNodeInfoAddChannel(t *testing.T) { - nodeInfo := testNodeInfo(testNodeID(), "testing") + nodeInfo := testNodeInfo(t, testNodeID(), "testing") nodeInfo.Channels = []byte{} require.Empty(t, nodeInfo.Channels) @@ -173,3 +177,80 @@ func TestNodeInfoAddChannel(t *testing.T) { nodeInfo.AddChannel(2) require.Contains(t, nodeInfo.Channels, byte(0x02)) } + +func TestParseAddressString(t *testing.T) { + testCases := []struct { + name string + addr string + expected string + correct bool + }{ + {"no node id and no protocol", "127.0.0.1:8080", "", false}, + {"no node id w/ tcp input", "tcp://127.0.0.1:8080", "", false}, + {"no node id w/ udp input", "udp://127.0.0.1:8080", "", false}, + + { + "no protocol", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + { + "tcp input", + "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + { + "udp input", + "udp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + {"malformed tcp input", "tcp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + {"malformed udp input", "udp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + + // {"127.0.0:8080", false}, + {"invalid host", "notahost", "", false}, + {"invalid port", "127.0.0.1:notapath", "", false}, + {"invalid host w/ port", "notahost:8080", "", false}, + {"just a port", "8082", "", false}, + {"non-existent port", "127.0.0:8080000", "", false}, + + {"too short nodeId", "deadbeef@127.0.0.1:8080", "", false}, + {"too short, not hex nodeId", "this-isnot-hex@127.0.0.1:8080", "", false}, + {"not hex nodeId", "xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + + {"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080", "", false}, + {"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080", "", false}, + {"notHex nodeId w/tcp", "tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + { + "correct nodeId w/tcp", + "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + + {"no node id", "tcp://@127.0.0.1:8080", "", false}, + {"no node id or IP", "tcp://@", "", false}, + {"tcp no host, w/ port", "tcp://:26656", "", false}, + {"empty", "", "", false}, + {"node id delimiter 1", "@", "", false}, + {"node id delimiter 2", " @", "", false}, + {"node id delimiter 3", " @ ", "", false}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + na, err := ParseAddressString(tc.addr) + if tc.correct { + require.NoError(t, err, tc.addr) + assert.Contains(t, tc.expected, na.IP.String()) + assert.Contains(t, tc.expected, fmt.Sprint(na.Port)) + } else { + assert.Error(t, err, "%v", tc.addr) + } + }) + } +} diff --git a/types/node_key.go b/types/node_key.go index 547fa1696e..927e17065d 100644 --- a/types/node_key.go +++ b/types/node_key.go @@ -1,11 +1,12 @@ package types import ( - "io/ioutil" + "encoding/json" + "os" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/internal/jsontypes" tmos "github.com/tendermint/tendermint/libs/os" ) @@ -17,23 +18,51 @@ import ( // It contains the nodes private key for authentication. type NodeKey struct { // Canonical ID - hex-encoded pubkey's address (IDByteLength bytes) - ID NodeID `json:"id"` + ID NodeID // Private key - PrivKey crypto.PrivKey `json:"priv_key"` + PrivKey crypto.PrivKey +} + +type nodeKeyJSON struct { + ID NodeID `json:"id"` + PrivKey json.RawMessage `json:"priv_key"` +} + +func (nk NodeKey) MarshalJSON() ([]byte, error) { + pk, err := jsontypes.Marshal(nk.PrivKey) + if err != nil { + return nil, err + } + return json.Marshal(nodeKeyJSON{ + ID: nk.ID, PrivKey: pk, + }) +} + +func (nk *NodeKey) UnmarshalJSON(data []byte) error { + var nkjson nodeKeyJSON + if err := json.Unmarshal(data, &nkjson); err != nil { + return err + } + var pk crypto.PrivKey + if err := jsontypes.Unmarshal(nkjson.PrivKey, &pk); err != nil { + return err + } + *nk = NodeKey{ID: nkjson.ID, PrivKey: pk} + return nil } // PubKey returns the peer's PubKey -func (nodeKey NodeKey) PubKey() crypto.PubKey { - return nodeKey.PrivKey.PubKey() +func (nk NodeKey) PubKey() crypto.PubKey { + return nk.PrivKey.PubKey() } // SaveAs persists the NodeKey to filePath. -func (nodeKey NodeKey) SaveAs(filePath string) error { - jsonBytes, err := tmjson.Marshal(nodeKey) +func (nk NodeKey) SaveAs(filePath string) error { + jsonBytes, err := json.Marshal(nk) if err != nil { return err } - return ioutil.WriteFile(filePath, jsonBytes, 0600) + return os.WriteFile(filePath, jsonBytes, 0600) } // LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If @@ -67,12 +96,12 @@ func GenNodeKey() NodeKey { // LoadNodeKey loads NodeKey located in filePath. func LoadNodeKey(filePath string) (NodeKey, error) { - jsonBytes, err := ioutil.ReadFile(filePath) + jsonBytes, err := os.ReadFile(filePath) if err != nil { return NodeKey{}, err } nodeKey := NodeKey{} - err = tmjson.Unmarshal(jsonBytes, &nodeKey) + err = json.Unmarshal(jsonBytes, &nodeKey) if err != nil { return NodeKey{}, err } diff --git a/types/node_key_test.go b/types/node_key_test.go index ae6b681012..0dea771eaf 100644 --- a/types/node_key_test.go +++ b/types/node_key_test.go @@ -14,10 +14,10 @@ func TestLoadOrGenNodeKey(t *testing.T) { filePath := filepath.Join(t.TempDir(), "peer_id.json") nodeKey, err := types.LoadOrGenNodeKey(filePath) - require.Nil(t, err) + require.NoError(t, err) nodeKey2, err := types.LoadOrGenNodeKey(filePath) - require.Nil(t, err) + require.NoError(t, err) require.Equal(t, nodeKey, nodeKey2) } diff --git a/types/params.go b/types/params.go index 15f7578a0b..2f54e57da1 100644 --- a/types/params.go +++ b/types/params.go @@ -1,6 +1,7 @@ package types import ( + "crypto/sha256" "errors" "fmt" "time" @@ -8,7 +9,6 @@ import ( "github.com/tendermint/tendermint/crypto/bls12381" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" - "github.com/tendermint/tendermint/crypto/tmhash" tmstrings "github.com/tendermint/tendermint/libs/strings" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -37,6 +37,8 @@ type ConsensusParams struct { Evidence EvidenceParams `json:"evidence"` Validator ValidatorParams `json:"validator"` Version VersionParams `json:"version"` + Synchrony SynchronyParams `json:"synchrony"` + Timeout TimeoutParams `json:"timeout"` } // HashedParams is a subset of ConsensusParams. @@ -50,15 +52,15 @@ type HashedParams struct { // BlockParams define limits on the block size and gas plus minimum time // between blocks. type BlockParams struct { - MaxBytes int64 `json:"max_bytes"` - MaxGas int64 `json:"max_gas"` + MaxBytes int64 `json:"max_bytes,string"` + MaxGas int64 `json:"max_gas,string"` } // EvidenceParams determine how we handle evidence of malfeasance. type EvidenceParams struct { - MaxAgeNumBlocks int64 `json:"max_age_num_blocks"` // only accept new evidence more recent than this - MaxAgeDuration time.Duration `json:"max_age_duration"` - MaxBytes int64 `json:"max_bytes"` + MaxAgeNumBlocks int64 `json:"max_age_num_blocks,string"` // only accept new evidence more recent than this + MaxAgeDuration time.Duration `json:"max_age_duration,string"` + MaxBytes int64 `json:"max_bytes,string"` } // ValidatorParams restrict the public key types validators can use. @@ -68,7 +70,26 @@ type ValidatorParams struct { } type VersionParams struct { - AppVersion uint64 `json:"app_version"` + AppVersion uint64 `json:"app_version,string"` +} + +// SynchronyParams influence the validity of block timestamps. +// For more information on the relationship of the synchrony parameters to +// block validity, see the Proposer-Based Timestamps specification: +// https://github.com/tendermint/tendermint/blob/master/spec/consensus/proposer-based-timestamp/README.md +type SynchronyParams struct { + Precision time.Duration `json:"precision,string"` + MessageDelay time.Duration `json:"message_delay,string"` +} + +// TimeoutParams configure the timings of the steps of the Tendermint consensus algorithm. +type TimeoutParams struct { + Propose time.Duration `json:"propose,string"` + ProposeDelta time.Duration `json:"propose_delta,string"` + Vote time.Duration `json:"vote,string"` + VoteDelta time.Duration `json:"vote_delta,string"` + Commit time.Duration `json:"commit,string"` + BypassCommitTimeout bool `json:"bypass_commit_timeout"` } // DefaultConsensusParams returns a default ConsensusParams. @@ -78,6 +99,8 @@ func DefaultConsensusParams() *ConsensusParams { Evidence: DefaultEvidenceParams(), Validator: DefaultValidatorParams(), Version: DefaultVersionParams(), + Synchrony: DefaultSynchronyParams(), + Timeout: DefaultTimeoutParams(), } } @@ -112,6 +135,89 @@ func DefaultVersionParams() VersionParams { } } +func DefaultSynchronyParams() SynchronyParams { + return SynchronyParams{ + // 505ms was selected as the default to enable chains that have validators in + // mixed leap-second handling environments. + // For more information, see: https://github.com/tendermint/tendermint/issues/7724 + Precision: 505 * time.Millisecond, + MessageDelay: 12 * time.Second, + } +} + +// SynchronyParamsOrDefaults returns the SynchronyParams, filling in any zero values +// with the Tendermint defined default values. +func (s SynchronyParams) SynchronyParamsOrDefaults() SynchronyParams { + // TODO: Remove this method and all uses once development on v0.37 begins. + // See: https://github.com/tendermint/tendermint/issues/8187 + + defaults := DefaultSynchronyParams() + if s.Precision == 0 { + s.Precision = defaults.Precision + } + if s.MessageDelay == 0 { + s.MessageDelay = defaults.MessageDelay + } + return s +} + +func DefaultTimeoutParams() TimeoutParams { + return TimeoutParams{ + Propose: 3000 * time.Millisecond, + ProposeDelta: 500 * time.Millisecond, + Vote: 1000 * time.Millisecond, + VoteDelta: 500 * time.Millisecond, + Commit: 1000 * time.Millisecond, + BypassCommitTimeout: false, + } +} + +// TimeoutParamsOrDefaults returns the SynchronyParams, filling in any zero values +// with the Tendermint defined default values. +func (t TimeoutParams) TimeoutParamsOrDefaults() TimeoutParams { + // TODO: Remove this method and all uses once development on v0.37 begins. + // See: https://github.com/tendermint/tendermint/issues/8187 + + defaults := DefaultTimeoutParams() + if t.Propose == 0 { + t.Propose = defaults.Propose + } + if t.ProposeDelta == 0 { + t.ProposeDelta = defaults.ProposeDelta + } + if t.Vote == 0 { + t.Vote = defaults.Vote + } + if t.VoteDelta == 0 { + t.VoteDelta = defaults.VoteDelta + } + if t.Commit == 0 { + t.Commit = defaults.Commit + } + return t +} + +// ProposeTimeout returns the amount of time to wait for a proposal. +func (t TimeoutParams) ProposeTimeout(round int32) time.Duration { + return time.Duration( + t.Propose.Nanoseconds()+t.ProposeDelta.Nanoseconds()*int64(round), + ) * time.Nanosecond +} + +// VoteTimeout returns the amount of time to wait for remaining votes after receiving any +2/3 votes. +func (t TimeoutParams) VoteTimeout(round int32) time.Duration { + return time.Duration( + t.Vote.Nanoseconds()+t.VoteDelta.Nanoseconds()*int64(round), + ) * time.Nanosecond +} + +// CommitTime accepts ti, the time at which the consensus engine received +2/3 +// precommits for a block and returns the point in time at which the consensus +// engine should begin consensus on the next block. +func (t TimeoutParams) CommitTime(ti time.Time) time.Time { + return ti.Add(t.Commit) +} + func (val *ValidatorParams) IsValidPubkeyType(pubkeyType string) bool { for i := 0; i < len(val.PubKeyTypes); i++ { if val.PubKeyTypes[i] == pubkeyType { @@ -121,6 +227,15 @@ func (val *ValidatorParams) IsValidPubkeyType(pubkeyType string) bool { return false } +func (params *ConsensusParams) Complete() { + if params.Synchrony == (SynchronyParams{}) { + params.Synchrony = DefaultSynchronyParams() + } + if params.Timeout == (TimeoutParams{}) { + params.Timeout = DefaultTimeoutParams() + } +} + // Validate validates the ConsensusParams to ensure all values are within their // allowed limits, and returns an error if they are not. func (params ConsensusParams) ValidateConsensusParams() error { @@ -144,7 +259,7 @@ func (params ConsensusParams) ValidateConsensusParams() error { } if params.Evidence.MaxAgeDuration <= 0 { - return fmt.Errorf("evidence.MaxAgeDuration must be grater than 0 if provided, Got %v", + return fmt.Errorf("evidence.MaxAgeDuration must be greater than 0 if provided, Got %v", params.Evidence.MaxAgeDuration) } @@ -158,6 +273,36 @@ func (params ConsensusParams) ValidateConsensusParams() error { params.Evidence.MaxBytes) } + if params.Synchrony.MessageDelay <= 0 { + return fmt.Errorf("synchrony.MessageDelay must be greater than 0. Got: %d", + params.Synchrony.MessageDelay) + } + + if params.Synchrony.Precision <= 0 { + return fmt.Errorf("synchrony.Precision must be greater than 0. Got: %d", + params.Synchrony.Precision) + } + + if params.Timeout.Propose <= 0 { + return fmt.Errorf("timeout.ProposeDelta must be greater than 0. Got: %d", params.Timeout.Propose) + } + + if params.Timeout.ProposeDelta <= 0 { + return fmt.Errorf("timeout.ProposeDelta must be greater than 0. Got: %d", params.Timeout.ProposeDelta) + } + + if params.Timeout.Vote <= 0 { + return fmt.Errorf("timeout.Vote must be greater than 0. Got: %d", params.Timeout.Vote) + } + + if params.Timeout.VoteDelta <= 0 { + return fmt.Errorf("timeout.VoteDelta must be greater than 0. Got: %d", params.Timeout.VoteDelta) + } + + if params.Timeout.Commit <= 0 { + return fmt.Errorf("timeout.Commit must be greater than 0. Got: %d", params.Timeout.Commit) + } + if len(params.Validator.PubKeyTypes) == 0 { return errors.New("len(Validator.PubKeyTypes) must be greater than 0") } @@ -179,8 +324,6 @@ func (params ConsensusParams) ValidateConsensusParams() error { // This allows the ConsensusParams to evolve more without breaking the block // protocol. No need for a Merkle tree here, just a small struct to hash. func (params ConsensusParams) HashConsensusParams() []byte { - hasher := tmhash.New() - hp := tmproto.HashedParams{ BlockMaxBytes: params.Block.MaxBytes, BlockMaxGas: params.Block.MaxGas, @@ -191,16 +334,17 @@ func (params ConsensusParams) HashConsensusParams() []byte { panic(err) } - _, err = hasher.Write(bz) - if err != nil { - panic(err) - } - return hasher.Sum(nil) + sum := sha256.Sum256(bz) + + return sum[:] } func (params *ConsensusParams) Equals(params2 *ConsensusParams) bool { return params.Block == params2.Block && params.Evidence == params2.Evidence && + params.Version == params2.Version && + params.Synchrony == params2.Synchrony && + params.Timeout == params2.Timeout && tmstrings.StringSliceEqual(params.Validator.PubKeyTypes, params2.Validator.PubKeyTypes) } @@ -231,6 +375,32 @@ func (params ConsensusParams) UpdateConsensusParams(params2 *tmproto.ConsensusPa if params2.Version != nil { res.Version.AppVersion = params2.Version.AppVersion } + if params2.Synchrony != nil { + if params2.Synchrony.MessageDelay != nil { + res.Synchrony.MessageDelay = *params2.Synchrony.GetMessageDelay() + } + if params2.Synchrony.Precision != nil { + res.Synchrony.Precision = *params2.Synchrony.GetPrecision() + } + } + if params2.Timeout != nil { + if params2.Timeout.Propose != nil { + res.Timeout.Propose = *params2.Timeout.GetPropose() + } + if params2.Timeout.ProposeDelta != nil { + res.Timeout.ProposeDelta = *params2.Timeout.GetProposeDelta() + } + if params2.Timeout.Vote != nil { + res.Timeout.Vote = *params2.Timeout.GetVote() + } + if params2.Timeout.VoteDelta != nil { + res.Timeout.VoteDelta = *params2.Timeout.GetVoteDelta() + } + if params2.Timeout.Commit != nil { + res.Timeout.Commit = *params2.Timeout.GetCommit() + } + res.Timeout.BypassCommitTimeout = params2.Timeout.GetBypassCommitTimeout() + } return res } @@ -251,11 +421,23 @@ func (params *ConsensusParams) ToProto() tmproto.ConsensusParams { Version: &tmproto.VersionParams{ AppVersion: params.Version.AppVersion, }, + Synchrony: &tmproto.SynchronyParams{ + MessageDelay: ¶ms.Synchrony.MessageDelay, + Precision: ¶ms.Synchrony.Precision, + }, + Timeout: &tmproto.TimeoutParams{ + Propose: ¶ms.Timeout.Propose, + ProposeDelta: ¶ms.Timeout.ProposeDelta, + Vote: ¶ms.Timeout.Vote, + VoteDelta: ¶ms.Timeout.VoteDelta, + Commit: ¶ms.Timeout.Commit, + BypassCommitTimeout: params.Timeout.BypassCommitTimeout, + }, } } func ConsensusParamsFromProto(pbParams tmproto.ConsensusParams) ConsensusParams { - return ConsensusParams{ + c := ConsensusParams{ Block: BlockParams{ MaxBytes: pbParams.Block.MaxBytes, MaxGas: pbParams.Block.MaxGas, @@ -272,4 +454,31 @@ func ConsensusParamsFromProto(pbParams tmproto.ConsensusParams) ConsensusParams AppVersion: pbParams.Version.AppVersion, }, } + if pbParams.Synchrony != nil { + if pbParams.Synchrony.MessageDelay != nil { + c.Synchrony.MessageDelay = *pbParams.Synchrony.GetMessageDelay() + } + if pbParams.Synchrony.Precision != nil { + c.Synchrony.Precision = *pbParams.Synchrony.GetPrecision() + } + } + if pbParams.Timeout != nil { + if pbParams.Timeout.Propose != nil { + c.Timeout.Propose = *pbParams.Timeout.GetPropose() + } + if pbParams.Timeout.ProposeDelta != nil { + c.Timeout.ProposeDelta = *pbParams.Timeout.GetProposeDelta() + } + if pbParams.Timeout.Vote != nil { + c.Timeout.Vote = *pbParams.Timeout.GetVote() + } + if pbParams.Timeout.VoteDelta != nil { + c.Timeout.VoteDelta = *pbParams.Timeout.GetVoteDelta() + } + if pbParams.Timeout.Commit != nil { + c.Timeout.Commit = *pbParams.Timeout.GetCommit() + } + c.Timeout.BypassCommitTimeout = pbParams.Timeout.BypassCommitTimeout + } + return c } diff --git a/types/params_test.go b/types/params_test.go index a05cc039ee..48d8ab3041 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -17,69 +17,235 @@ var ( func TestConsensusParamsValidation(t *testing.T) { testCases := []struct { + name string params ConsensusParams valid bool }{ // test block params - 0: {makeParams(1, 0, 2, 0, valBLS12381), true}, - 1: {makeParams(0, 0, 2, 0, valBLS12381), false}, - 2: {makeParams(47*1024*1024, 0, 2, 0, valBLS12381), true}, - 3: {makeParams(10, 0, 2, 0, valBLS12381), true}, - 4: {makeParams(100*1024*1024, 0, 2, 0, valBLS12381), true}, - 5: {makeParams(101*1024*1024, 0, 2, 0, valBLS12381), false}, - 6: {makeParams(1024*1024*1024, 0, 2, 0, valBLS12381), false}, - 7: {makeParams(1024*1024*1024, 0, -1, 0, valBLS12381), false}, + { + name: "block params valid", + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + precision: 1, + messageDelay: 1}), + valid: true, + }, + { + name: "block params invalid MaxBytes", + params: makeParams(makeParamsArgs{ + blockBytes: 0, + evidenceAge: 2, + precision: 1, + messageDelay: 1}), + valid: false, + }, + { + name: "block params large MaxBytes", + params: makeParams(makeParamsArgs{ + blockBytes: 47 * 1024 * 1024, + evidenceAge: 2, + precision: 1, + messageDelay: 1}), + valid: true, + }, + { + name: "block params small MaxBytes", + params: makeParams(makeParamsArgs{ + blockBytes: 10, + evidenceAge: 2, + precision: 1, + messageDelay: 1}), + valid: true, + }, + { + name: "block params 100MB MaxBytes", + params: makeParams(makeParamsArgs{ + blockBytes: 100 * 1024 * 1024, + evidenceAge: 2, + precision: 1, + messageDelay: 1}), + valid: true, + }, + { + name: "block params MaxBytes too large", + params: makeParams(makeParamsArgs{ + blockBytes: 101 * 1024 * 1024, + evidenceAge: 2, + precision: 1, + messageDelay: 1}), + valid: false, + }, + { + name: "block params 1GB MaxBytes", + params: makeParams(makeParamsArgs{ + blockBytes: 1024 * 1024 * 1024, + evidenceAge: 2, + precision: 1, + messageDelay: 1}), + valid: false, + }, // test evidence params - 8: {makeParams(1, 0, 0, 0, valBLS12381), false}, - 9: {makeParams(1, 0, 2, 2, valBLS12381), false}, - 10: {makeParams(1000, 0, 2, 1, valBLS12381), true}, - 11: {makeParams(1, 0, -1, 0, valBLS12381), false}, - // test no pubkey type provided - 12: {makeParams(1, 0, 2, 0, []string{}), false}, - // test invalid pubkey type provided - 13: {makeParams(1, 0, 2, 0, []string{"potatoes make good pubkeys"}), false}, + { + name: "evidence MaxAge and MaxBytes 0", + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 0, + maxEvidenceBytes: 0, + precision: 1, + messageDelay: 1}), + valid: false, + }, + { + name: "evidence MaxBytes greater than Block.MaxBytes", + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: 2, + maxEvidenceBytes: 2, + precision: 1, + messageDelay: 1}), + valid: false, + }, + { + name: "evidence size below Block.MaxBytes", + params: makeParams(makeParamsArgs{ + blockBytes: 1000, + evidenceAge: 2, + maxEvidenceBytes: 1, + precision: 1, + messageDelay: 1}), + valid: true, + }, + { + name: "evidence MaxAgeDuration < 0", + params: makeParams(makeParamsArgs{ + blockBytes: 1, + evidenceAge: -1, + maxEvidenceBytes: 0, + precision: 1, + messageDelay: 1}), + valid: false, + }, + { + name: "no pubkey types", + params: makeParams(makeParamsArgs{ + evidenceAge: 2, + pubkeyTypes: []string{}, + precision: 1, + messageDelay: 1}), + valid: false, + }, + { + name: "invalid pubkey types", + params: makeParams(makeParamsArgs{ + evidenceAge: 2, + pubkeyTypes: []string{"potatoes make good pubkeys"}, + precision: 1, + messageDelay: 1}), + valid: false, + }, + { + name: "negative MessageDelay", + params: makeParams(makeParamsArgs{ + evidenceAge: 2, + precision: 1, + messageDelay: -1}), + valid: false, + }, + { + name: "negative Precision", + params: makeParams(makeParamsArgs{ + evidenceAge: 2, + precision: -1, + messageDelay: 1}), + valid: false, + }, } for i, tc := range testCases { - if tc.valid { - assert.NoErrorf(t, tc.params.ValidateConsensusParams(), "expected no error for valid params (#%d)", i) - } else { - assert.Errorf(t, tc.params.ValidateConsensusParams(), "expected error for non valid params (#%d)", i) - } + t.Run(tc.name, func(t *testing.T) { + if tc.valid { + assert.NoErrorf(t, tc.params.ValidateConsensusParams(), "expected no error for valid params (#%d)", i) + } else { + assert.Errorf(t, tc.params.ValidateConsensusParams(), "expected error for non valid params (#%d)", i) + } + }) } } -func makeParams( - blockBytes, blockGas int64, - evidenceAge int64, - maxEvidenceBytes int64, - pubkeyTypes []string, -) ConsensusParams { +type makeParamsArgs struct { + blockBytes int64 + blockGas int64 + evidenceAge int64 + maxEvidenceBytes int64 + pubkeyTypes []string + precision time.Duration + messageDelay time.Duration + bypassCommitTimeout bool + + propose *time.Duration + proposeDelta *time.Duration + vote *time.Duration + voteDelta *time.Duration + commit *time.Duration +} + +func makeParams(args makeParamsArgs) ConsensusParams { + if args.pubkeyTypes == nil { + args.pubkeyTypes = valBLS12381 + } + if args.propose == nil { + args.propose = durationPtr(1) + } + if args.proposeDelta == nil { + args.proposeDelta = durationPtr(1) + } + if args.vote == nil { + args.vote = durationPtr(1) + } + if args.voteDelta == nil { + args.voteDelta = durationPtr(1) + } + if args.commit == nil { + args.commit = durationPtr(1) + } return ConsensusParams{ Block: BlockParams{ - MaxBytes: blockBytes, - MaxGas: blockGas, + MaxBytes: args.blockBytes, + MaxGas: args.blockGas, }, Evidence: EvidenceParams{ - MaxAgeNumBlocks: evidenceAge, - MaxAgeDuration: time.Duration(evidenceAge), - MaxBytes: maxEvidenceBytes, + MaxAgeNumBlocks: args.evidenceAge, + MaxAgeDuration: time.Duration(args.evidenceAge), + MaxBytes: args.maxEvidenceBytes, }, Validator: ValidatorParams{ - PubKeyTypes: pubkeyTypes, + PubKeyTypes: args.pubkeyTypes, + }, + Synchrony: SynchronyParams{ + Precision: args.precision, + MessageDelay: args.messageDelay, + }, + Timeout: TimeoutParams{ + Propose: *args.propose, + ProposeDelta: *args.proposeDelta, + Vote: *args.vote, + VoteDelta: *args.voteDelta, + Commit: *args.commit, + BypassCommitTimeout: args.bypassCommitTimeout, }, } } func TestConsensusParamsHash(t *testing.T) { params := []ConsensusParams{ - makeParams(4, 2, 3, 1, valBLS12381), - makeParams(1, 4, 3, 1, valBLS12381), - makeParams(1, 2, 4, 1, valBLS12381), - makeParams(2, 5, 7, 1, valBLS12381), - makeParams(1, 7, 6, 1, valBLS12381), - makeParams(9, 5, 4, 1, valBLS12381), - makeParams(7, 8, 9, 1, valBLS12381), - makeParams(4, 6, 5, 1, valBLS12381), + makeParams(makeParamsArgs{blockBytes: 4, blockGas: 2, evidenceAge: 3, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 4, evidenceAge: 3, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 4, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 2, blockGas: 5, evidenceAge: 7, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 7, evidenceAge: 6, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 9, blockGas: 5, evidenceAge: 4, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 7, blockGas: 8, evidenceAge: 9, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 4, blockGas: 6, evidenceAge: 5, maxEvidenceBytes: 1}), } hashes := make([][]byte, len(params)) @@ -99,20 +265,60 @@ func TestConsensusParamsHash(t *testing.T) { func TestConsensusParamsUpdate(t *testing.T) { testCases := []struct { - params ConsensusParams + intialParams ConsensusParams updates *tmproto.ConsensusParams updatedParams ConsensusParams }{ // empty updates { - makeParams(1, 2, 3, 0, valBLS12381), - &tmproto.ConsensusParams{}, - makeParams(1, 2, 3, 0, valBLS12381), + intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + updates: &tmproto.ConsensusParams{}, + updatedParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + }, + { + // update synchrony params + intialParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: time.Second, messageDelay: 3 * time.Second}), + updates: &tmproto.ConsensusParams{ + Synchrony: &tmproto.SynchronyParams{ + Precision: durationPtr(time.Second * 2), + MessageDelay: durationPtr(time.Second * 4), + }, + }, + updatedParams: makeParams(makeParamsArgs{evidenceAge: 3, precision: 2 * time.Second, messageDelay: 4 * time.Second}), + }, + { + // update timeout params + intialParams: makeParams(makeParamsArgs{ + propose: durationPtr(3 * time.Second), + proposeDelta: durationPtr(500 * time.Millisecond), + vote: durationPtr(time.Second), + voteDelta: durationPtr(500 * time.Millisecond), + commit: durationPtr(time.Second), + bypassCommitTimeout: false, + }), + updates: &tmproto.ConsensusParams{ + Timeout: &tmproto.TimeoutParams{ + Propose: durationPtr(2 * time.Second), + ProposeDelta: durationPtr(400 * time.Millisecond), + Vote: durationPtr(5 * time.Second), + VoteDelta: durationPtr(400 * time.Millisecond), + Commit: durationPtr(time.Minute), + BypassCommitTimeout: true, + }, + }, + updatedParams: makeParams(makeParamsArgs{ + propose: durationPtr(2 * time.Second), + proposeDelta: durationPtr(400 * time.Millisecond), + vote: durationPtr(5 * time.Second), + voteDelta: durationPtr(400 * time.Millisecond), + commit: durationPtr(time.Minute), + bypassCommitTimeout: true, + }), }, // fine updates { - makeParams(1, 2, 3, 0, valBLS12381), - &tmproto.ConsensusParams{ + intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + updates: &tmproto.ConsensusParams{ Block: &tmproto.BlockParams{ MaxBytes: 100, MaxGas: 200, @@ -126,11 +332,15 @@ func TestConsensusParamsUpdate(t *testing.T) { PubKeyTypes: valBLS12381, }, }, - makeParams(100, 200, 300, 50, valBLS12381), + updatedParams: makeParams(makeParamsArgs{ + blockBytes: 100, blockGas: 200, + evidenceAge: 300, + maxEvidenceBytes: 50, + }), }, { - makeParams(1, 2, 3, 0, valBLS12381), - &tmproto.ConsensusParams{ + intialParams: makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}), + updates: &tmproto.ConsensusParams{ Block: &tmproto.BlockParams{ MaxBytes: 100, MaxGas: 200, @@ -143,17 +353,23 @@ func TestConsensusParamsUpdate(t *testing.T) { Validator: &tmproto.ValidatorParams{ PubKeyTypes: valBLS12381, }, - }, makeParams(100, 200, 300, 50, valBLS12381), + }, + updatedParams: makeParams(makeParamsArgs{ + blockBytes: 100, + blockGas: 200, + evidenceAge: 300, + maxEvidenceBytes: 50, + }), }, } for _, tc := range testCases { - assert.Equal(t, tc.updatedParams, tc.params.UpdateConsensusParams(tc.updates)) + assert.Equal(t, tc.updatedParams, tc.intialParams.UpdateConsensusParams(tc.updates)) } } func TestConsensusParamsUpdate_AppVersion(t *testing.T) { - params := makeParams(1, 2, 3, 0, valBLS12381) + params := makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 3}) assert.EqualValues(t, 0, params.Version.AppVersion) @@ -165,14 +381,16 @@ func TestConsensusParamsUpdate_AppVersion(t *testing.T) { func TestProto(t *testing.T) { params := []ConsensusParams{ - makeParams(4, 2, 3, 1, valBLS12381), - makeParams(1, 4, 3, 1, valBLS12381), - makeParams(1, 2, 4, 1, valBLS12381), - makeParams(2, 5, 7, 1, valBLS12381), - makeParams(1, 7, 6, 1, valBLS12381), - makeParams(9, 5, 4, 1, valBLS12381), - makeParams(7, 8, 9, 1, valBLS12381), - makeParams(4, 6, 5, 1, valBLS12381), + makeParams(makeParamsArgs{blockBytes: 4, blockGas: 2, evidenceAge: 3, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 4, evidenceAge: 3, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 2, evidenceAge: 4, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 2, blockGas: 5, evidenceAge: 7, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 1, blockGas: 7, evidenceAge: 6, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 9, blockGas: 5, evidenceAge: 4, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 7, blockGas: 8, evidenceAge: 9, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{blockBytes: 4, blockGas: 6, evidenceAge: 5, maxEvidenceBytes: 1}), + makeParams(makeParamsArgs{precision: time.Second, messageDelay: time.Minute}), + makeParams(makeParamsArgs{precision: time.Nanosecond, messageDelay: time.Millisecond}), } for i := range params { @@ -184,3 +402,7 @@ func TestProto(t *testing.T) { } } + +func durationPtr(t time.Duration) *time.Duration { + return &t +} diff --git a/types/part_set.go b/types/part_set.go index 3a691083f7..9bf36279f7 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -2,15 +2,15 @@ package types import ( "bytes" + "encoding/json" "errors" "fmt" "io" + "sync" "github.com/tendermint/tendermint/crypto/merkle" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/bits" tmbytes "github.com/tendermint/tendermint/libs/bytes" - tmjson "github.com/tendermint/tendermint/libs/json" tmmath "github.com/tendermint/tendermint/libs/math" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -151,7 +151,7 @@ type PartSet struct { total uint32 hash []byte - mtx tmsync.Mutex + mtx sync.Mutex parts []*Part partsBitArray *bits.BitArray count uint32 @@ -365,7 +365,7 @@ func (ps *PartSet) MarshalJSON() ([]byte, error) { ps.mtx.Lock() defer ps.mtx.Unlock() - return tmjson.Marshal(struct { + return json.Marshal(struct { CountTotal string `json:"count/total"` PartsBitArray *bits.BitArray `json:"parts_bit_array"` }{ diff --git a/types/part_set_test.go b/types/part_set_test.go index c6ea0f4525..af65ca8db0 100644 --- a/types/part_set_test.go +++ b/types/part_set_test.go @@ -1,7 +1,7 @@ package types import ( - "io/ioutil" + "io" "testing" "github.com/stretchr/testify/assert" @@ -48,7 +48,7 @@ func TestBasicPartSet(t *testing.T) { // adding existing part added, err = partSet2.AddPart(partSet2.GetPart(0)) assert.False(t, added) - assert.Nil(t, err) + assert.NoError(t, err) assert.Equal(t, partSet.Hash(), partSet2.Hash()) assert.EqualValues(t, nParts, partSet2.Total()) @@ -57,7 +57,7 @@ func TestBasicPartSet(t *testing.T) { // Reconstruct data, assert that they are equal. data2Reader := partSet2.GetReader() - data2, err := ioutil.ReadAll(data2Reader) + data2, err := io.ReadAll(data2Reader) require.NoError(t, err) assert.Equal(t, data, data2) diff --git a/types/priv_validator.go b/types/priv_validator.go index 4348444455..0b34b87f57 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -9,12 +9,12 @@ import ( "strconv" "sync" - "github.com/tendermint/tendermint/libs/log" - "github.com/dashevo/dashd-go/btcjson" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" + tmbytes "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -54,7 +54,7 @@ type PrivValidator interface { vote *tmproto.Vote, stateID StateID, logger log.Logger) error SignProposal( ctx context.Context, chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, - proposal *tmproto.Proposal) ([]byte, error) + proposal *tmproto.Proposal) (tmbytes.HexBytes, error) ExtractIntoValidator(ctx context.Context, quorumHash crypto.QuorumHash) *Validator } @@ -222,6 +222,10 @@ func (pv *MockPV) GetPrivateKey(ctx context.Context, quorumHash crypto.QuorumHas return pv.PrivateKeys[quorumHash.String()].PrivKey, nil } +func (pv *MockPV) getPrivateKey(quorumHash crypto.QuorumHash) crypto.PrivKey { + return pv.PrivateKeys[quorumHash.String()].PrivKey +} + // ThresholdPublicKeyForQuorumHash ... func (pv *MockPV) ThresholdPublicKeyForQuorumHash(ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PubKey, error) { pv.mtx.RLock() @@ -257,12 +261,7 @@ func (pv *MockPV) SignVote( blockSignID := VoteBlockSignID(useChainID, vote, quorumType, quorumHash) - var privKey crypto.PrivKey - if quorumKeys, ok := pv.PrivateKeys[quorumHash.String()]; ok { - privKey = quorumKeys.PrivKey - } else { - return fmt.Errorf("file private validator could not sign vote for quorum hash %v", quorumHash) - } + privKey := pv.getPrivateKey(quorumHash) blockSignature, err := privKey.SignDigest(blockSignID) // fmt.Printf("validator %X signing vote of type %d at height %d with key %X blockSignBytes %X stateSignBytes %X\n", @@ -283,6 +282,18 @@ func (pv *MockPV) SignVote( vote.StateSignature = stateSignature } + var extSig []byte + // We only sign vote extensions for precommits + if vote.Type == tmproto.PrecommitType { + extSignID := VoteExtensionSignID(useChainID, vote, quorumType, quorumHash) + extSig, err = privKey.SignDigest(extSignID) + if err != nil { + return err + } + } else if len(vote.Extension) > 0 { + return errors.New("unexpected vote extension - vote extensions are only allowed in precommits") + } + vote.ExtensionSignature = extSig return nil } @@ -293,7 +304,7 @@ func (pv *MockPV) SignProposal( quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, proposal *tmproto.Proposal, -) ([]byte, error) { +) (tmbytes.HexBytes, error) { pv.mtx.Lock() defer pv.mtx.Unlock() if pv.breakProposalSigning { @@ -353,7 +364,7 @@ func (pv *MockPV) ExtractIntoValidator(ctx context.Context, quorumHash crypto.Qu // String returns a string representation of the MockPV. func (pv *MockPV) String() string { - proTxHash, _ := pv.GetProTxHash(context.Background()) // mockPV will never return an error, ignored here + proTxHash, _ := pv.GetProTxHash(context.TODO()) // mockPV will never return an error, ignored here return fmt.Sprintf("MockPV{%v}", proTxHash) } @@ -387,7 +398,7 @@ func (pv *ErroringMockPV) SignProposal( quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, proposal *tmproto.Proposal, -) ([]byte, error) { +) (tmbytes.HexBytes, error) { return nil, ErroringMockPVErr } diff --git a/types/proposal.go b/types/proposal.go index ee7c9aeae0..8e6c9a2858 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -1,10 +1,12 @@ package types import ( + "crypto/sha256" "encoding/binary" "errors" "fmt" "math" + "math/bits" "time" "github.com/dashevo/dashd-go/btcjson" @@ -29,27 +31,30 @@ var ( // a so-called Proof-of-Lock (POL) round, as noted in the POLRound. // If POLRound >= 0, then BlockID corresponds to the block that is locked in POLRound. type Proposal struct { - Type tmproto.SignedMsgType - Height int64 `json:"height"` - CoreChainLockedHeight uint32 `json:"core_height"` - Round int32 `json:"round"` // there can not be greater than 2_147_483_647 rounds - POLRound int32 `json:"pol_round"` // -1 if null. - BlockID BlockID `json:"block_id"` - Timestamp time.Time `json:"timestamp"` - Signature []byte `json:"signature"` + Type tmproto.SignedMsgType + Height int64 `json:"height,string"` + Round int32 `json:"round"` // there can not be greater than 2_147_483_647 rounds + POLRound int32 `json:"pol_round"` // -1 if null. + BlockID BlockID `json:"block_id"` + Timestamp time.Time `json:"timestamp"` + Signature []byte `json:"signature"` + + // dash fields + CoreChainLockedHeight uint32 `json:"core_height"` } // NewProposal returns a new Proposal. // If there is no POLRound, polRound should be -1. -func NewProposal(height int64, coreChainLockedHeight uint32, round int32, polRound int32, blockID BlockID) *Proposal { +func NewProposal(height int64, coreChainLockedHeight uint32, round int32, polRound int32, blockID BlockID, ts time.Time) *Proposal { return &Proposal{ - Type: tmproto.ProposalType, - Height: height, + Type: tmproto.ProposalType, + Height: height, + Round: round, + BlockID: blockID, + POLRound: polRound, + Timestamp: tmtime.Canonical(ts), + CoreChainLockedHeight: coreChainLockedHeight, - Round: round, - BlockID: blockID, - POLRound: polRound, - Timestamp: tmtime.Now(), } } @@ -71,7 +76,7 @@ func (p *Proposal) ValidateBasic() error { return errors.New("negative POLRound (exception: -1)") } if err := p.BlockID.ValidateBasic(); err != nil { - return fmt.Errorf("wrong BlockID: %v", err) + return fmt.Errorf("wrong BlockID: %w", err) } // ValidateBasic above would pass even if the BlockID was empty: if !p.BlockID.IsComplete() { @@ -90,6 +95,43 @@ func (p *Proposal) ValidateBasic() error { return nil } +// IsTimely validates that the block timestamp is 'timely' according to the proposer-based timestamp algorithm. +// To evaluate if a block is timely, its timestamp is compared to the local time of the validator along with the +// configured Precision and MsgDelay parameters. +// Specifically, a proposed block timestamp is considered timely if it is satisfies the following inequalities: +// +// localtime >= proposedBlockTime - Precision +// localtime <= proposedBlockTime + MsgDelay + Precision +// +// For more information on the meaning of 'timely', see the proposer-based timestamp specification: +// https://github.com/tendermint/tendermint/tree/master/spec/consensus/proposer-based-timestamp +func (p *Proposal) IsTimely(recvTime time.Time, sp SynchronyParams, round int32) bool { + // The message delay values are scaled as rounds progress. + // Every 10 rounds, the message delay is doubled to allow consensus to + // proceed in the case that the chosen value was too small for the given network conditions. + // For more information and discussion on this mechanism, see the relevant github issue: + // https://github.com/tendermint/spec/issues/371 + maxShift := bits.LeadingZeros64(uint64(sp.MessageDelay)) - 1 + nShift := int((round / 10)) + + if nShift > maxShift { + // if the number of 'doublings' would would overflow the size of the int, use the + // maximum instead. + nShift = maxShift + } + msgDelay := sp.MessageDelay * time.Duration(1<= len(txs) -// TODO: optimize this! func (txs Txs) Proof(i int) TxProof { - l := len(txs) - bzs := make([][]byte, l) - for i := 0; i < l; i++ { - bzs[i] = txs[i].Hash() - } - root, proofs := merkle.ProofsFromByteSlices(bzs) + hl := txs.hashList() + root, proofs := merkle.ProofsFromByteSlices(hl) return TxProof{ RootHash: root, @@ -79,6 +69,206 @@ func (txs Txs) Proof(i int) TxProof { } } +func (txs Txs) hashList() [][]byte { + hl := make([][]byte, len(txs)) + for i := 0; i < len(txs); i++ { + hl[i] = txs[i].Hash() + } + return hl +} + +// Txs is a slice of transactions. Sorting a Txs value orders the transactions +// lexicographically. +func (txs Txs) Len() int { return len(txs) } +func (txs Txs) Swap(i, j int) { txs[i], txs[j] = txs[j], txs[i] } +func (txs Txs) Less(i, j int) bool { + return bytes.Compare(txs[i], txs[j]) == -1 +} + +// ToSliceOfBytes converts a Txs to slice of byte slices. +func (txs Txs) ToSliceOfBytes() [][]byte { + txBzs := make([][]byte, len(txs)) + for i := 0; i < len(txs); i++ { + txBzs[i] = txs[i] + } + return txBzs +} + +// TxRecordSet contains indexes into an underlying set of transactions. +// These indexes are useful for validating and working with a list of TxRecords +// from the PrepareProposal response. +// +// Only one copy of the original data is referenced by all of the indexes but a +// transaction may appear in multiple indexes. +type TxRecordSet struct { + // all holds the complete list of all transactions from the original list of + // TxRecords. + all Txs + + // included is an index of the transactions that will be included in the block + // and is constructed from the list of both added and unmodified transactions. + // included maintains the original order that the transactions were present + // in the list of TxRecords. + included Txs + + // added, unmodified, removed, and unknown are indexes for each of the actions + // that may be supplied with a transaction. + // + // Because each transaction only has one action, it can be referenced by + // at most 3 indexes in this data structure: the action-specific index, the + // included index, and the all index. + added Txs + unmodified Txs + removed Txs + unknown Txs +} + +// NewTxRecordSet constructs a new set from the given transaction records. +// The contents of the input transactions are shared by the set, and must not +// be modified during the lifetime of the set. +func NewTxRecordSet(trs []*abci.TxRecord) TxRecordSet { + txrSet := TxRecordSet{ + all: make([]Tx, len(trs)), + } + for i, tr := range trs { + + txrSet.all[i] = Tx(tr.Tx) + + // The following set of assignments do not allocate new []byte, they create + // pointers to the already allocated slice. + switch tr.GetAction() { + case abci.TxRecord_UNKNOWN: + txrSet.unknown = append(txrSet.unknown, txrSet.all[i]) + case abci.TxRecord_UNMODIFIED: + txrSet.unmodified = append(txrSet.unmodified, txrSet.all[i]) + txrSet.included = append(txrSet.included, txrSet.all[i]) + case abci.TxRecord_ADDED: + txrSet.added = append(txrSet.added, txrSet.all[i]) + txrSet.included = append(txrSet.included, txrSet.all[i]) + case abci.TxRecord_REMOVED: + txrSet.removed = append(txrSet.removed, txrSet.all[i]) + } + } + return txrSet +} + +// IncludedTxs returns the transactions marked for inclusion in a block. This +// list maintains the order that the transactions were included in the list of +// TxRecords that were used to construct the TxRecordSet. +func (t TxRecordSet) IncludedTxs() []Tx { + return t.included +} + +// RemovedTxs returns the transactions marked for removal by the application. +func (t TxRecordSet) RemovedTxs() []Tx { + return t.removed +} + +// Validate checks that the record set was correctly constructed from the original +// list of transactions. +func (t TxRecordSet) Validate(maxSizeBytes int64, otxs Txs) error { + if len(t.unknown) > 0 { + return fmt.Errorf("%d transactions marked unknown (first unknown hash: %x)", len(t.unknown), t.unknown[0].Hash()) + } + + // The following validation logic performs a set of sorts on the data in the TxRecordSet indexes. + // It sorts the original transaction list, otxs, once. + // It sorts the new transaction list twice: once when sorting 'all', the total list, + // and once by sorting the set of the added, removed, and unmodified transactions indexes, + // which, when combined, comprise the complete list of modified transactions. + // + // Each of the added, removed, and unmodified indices is then iterated and once + // and each value index is checked against the sorted original list for containment. + // Asymptotically, this yields a total runtime of O(N*log(N) + 2*M*log(M) + M*log(N)). + // in the input size of the original list, N, and the input size of the new list, M, respectively. + // Performance gains are likely possible, but this was preferred for readability and maintainability. + + // Sort a copy of the complete transaction slice so we can check for + // duplication. The copy is so we do not change the original ordering. + // Only the slices are copied, the transaction contents are shared. + allCopy := sortedCopy(t.all) + + for i, cur := range allCopy { + // allCopy is sorted, so any duplicated data will be adjacent. + if i+1 < len(allCopy) && bytes.Equal(cur, allCopy[i+1]) { + return fmt.Errorf("found duplicate transaction with hash: %x", cur.Hash()) + } + } + + // create copies of each of the action-specific indexes so that order of the original + // indexes can be preserved. + addedCopy := sortedCopy(t.added) + removedCopy := sortedCopy(t.removed) + unmodifiedCopy := sortedCopy(t.unmodified) + + var size int64 + for _, cur := range append(unmodifiedCopy, addedCopy...) { + size += int64(len(cur)) + if size > maxSizeBytes { + return fmt.Errorf("transaction data size exceeds maximum %d", maxSizeBytes) + } + } + + // make a defensive copy of otxs so that the order of + // the caller's data is not altered. + otxsCopy := sortedCopy(otxs) + + if ix, ok := containsAll(otxsCopy, unmodifiedCopy); !ok { + return fmt.Errorf("new transaction incorrectly marked as removed, transaction hash: %x", unmodifiedCopy[ix].Hash()) + } + + if ix, ok := containsAll(otxsCopy, removedCopy); !ok { + return fmt.Errorf("new transaction incorrectly marked as removed, transaction hash: %x", removedCopy[ix].Hash()) + } + if ix, ok := containsAny(otxsCopy, addedCopy); ok { + return fmt.Errorf("existing transaction incorrectly marked as added, transaction hash: %x", addedCopy[ix].Hash()) + } + return nil +} + +func sortedCopy(txs Txs) Txs { + cp := make(Txs, len(txs)) + copy(cp, txs) + sort.Sort(cp) + return cp +} + +// containsAny checks that list a contains one of the transactions in list +// b. If a match is found, the index in b of the matching transaction is returned. +// Both lists must be sorted. +func containsAny(a, b []Tx) (int, bool) { + for i, cur := range b { + if _, ok := contains(a, cur); ok { + return i, true + } + } + return -1, false +} + +// containsAll checks that super contains all of the transactions in the sub +// list. If not all values in sub are present in super, the index in sub of the +// first Tx absent from super is returned. +func containsAll(super, sub Txs) (int, bool) { + for i, cur := range sub { + if _, ok := contains(super, cur); !ok { + return i, false + } + } + return -1, true +} + +// contains checks that the sorted list, set contains elem. If set does contain elem, then the +// index in set of elem is returned. +func contains(set []Tx, elem Tx) (int, bool) { + n := sort.Search(len(set), func(i int) bool { + return bytes.Compare(elem, set[i]) <= 0 + }) + if n == len(set) || !bytes.Equal(elem, set[n]) { + return -1, false + } + return n, true +} + // TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. type TxProof struct { RootHash tmbytes.HexBytes `json:"root_hash"` diff --git a/types/tx_test.go b/types/tx_test.go index 8fe277da82..77afa1b23f 100644 --- a/types/tx_test.go +++ b/types/tx_test.go @@ -2,12 +2,13 @@ package types import ( "bytes" - mrand "math/rand" + "math/rand" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" ctest "github.com/tendermint/tendermint/internal/libs/test" tmrand "github.com/tendermint/tendermint/libs/rand" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -21,11 +22,6 @@ func makeTxs(cnt, size int) Txs { return txs } -func randInt(low, high int) int { - off := mrand.Int() % (high - low) - return low + off -} - func TestTxIndex(t *testing.T) { for i := 0; i < 20; i++ { txs := makeTxs(15, 60) @@ -52,6 +48,179 @@ func TestTxIndexByHash(t *testing.T) { } } +func TestValidateTxRecordSet(t *testing.T) { + t.Run("should error on total transaction size exceeding max data size", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{6, 7, 8, 9, 10}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(9, []Tx{}) + require.Error(t, err) + }) + t.Run("should not error on removed transaction size exceeding max data size", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{6, 7, 8, 9}), + }, + { + Action: abci.TxRecord_REMOVED, + Tx: Tx([]byte{10}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(9, []Tx{[]byte{10}}) + require.NoError(t, err) + }) + t.Run("should error on duplicate transactions with the same action", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{100}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{200}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(100, []Tx{}) + require.Error(t, err) + }) + t.Run("should error on duplicate transactions with mixed actions", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{100}), + }, + { + Action: abci.TxRecord_REMOVED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{200}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(100, []Tx{}) + require.Error(t, err) + }) + t.Run("should error on new transactions marked UNMODIFIED", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_UNMODIFIED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(100, []Tx{}) + require.Error(t, err) + }) + t.Run("should error on new transactions marked REMOVED", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_REMOVED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(100, []Tx{}) + require.Error(t, err) + }) + t.Run("should error on existing transaction marked as ADDED", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{5, 4, 3, 2, 1}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{6}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(100, []Tx{{0}, {1, 2, 3, 4, 5}}) + require.Error(t, err) + }) + t.Run("should error if any transaction marked as UNKNOWN", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_UNKNOWN, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(100, []Tx{}) + require.Error(t, err) + }) + t.Run("TxRecordSet preserves order", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{100}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{99}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{55}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{12}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{66}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{9}), + }, + { + Action: abci.TxRecord_ADDED, + Tx: Tx([]byte{17}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(100, []Tx{}) + require.NoError(t, err) + for i, tx := range txrSet.IncludedTxs() { + require.Equal(t, Tx(trs[i].Tx), tx) + } + }) +} + func TestValidTxProof(t *testing.T) { cases := []struct { txs Txs @@ -92,7 +261,7 @@ func TestValidTxProof(t *testing.T) { require.NoError(t, err) p2, err = TxProofFromProto(pb2) - if assert.Nil(t, err, "%d: %d: %+v", h, i, err) { + if assert.NoError(t, err, "%d: %d: %+v", h, i, err) { assert.Nil(t, p2.Validate(root), "%d: %d", h, i) } } @@ -150,3 +319,7 @@ func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) { } } } + +func randInt(low, high int) int { + return rand.Intn(high-low) + low +} diff --git a/types/validation.go b/types/validation.go index 5423748ef6..0bdc6e1b92 100644 --- a/types/validation.go +++ b/types/validation.go @@ -6,15 +6,14 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/crypto/tmhash" ) // ValidateHash returns an error if the hash is not empty, but its -// size != tmhash.Size. +// size != crypto.HashSize. func ValidateHash(h []byte) error { - if len(h) > 0 && len(h) != tmhash.Size { + if len(h) > 0 && len(h) != crypto.HashSize { return fmt.Errorf("expected size to be %d bytes, got %d bytes", - tmhash.Size, + crypto.HashSize, len(h), ) } diff --git a/types/validation_test.go b/types/validation_test.go index 3b573ed7cd..7395d0a200 100644 --- a/types/validation_test.go +++ b/types/validation_test.go @@ -27,7 +27,7 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { chainID = "Lalande21185" ) - vote := examplePrecommit() + vote := examplePrecommit(t) vote.ValidatorProTxHash = proTxHash v := vote.ToProto() @@ -108,8 +108,11 @@ func TestValidatorSet_VerifyCommit_CheckThresholdSignatures(t *testing.T) { stateID = RandStateID().WithHeight(h - 1) ) - voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, 4, stateID) - commit, err := MakeCommit(blockID, stateID, h, 0, voteSet, vals) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + voteSet, valSet, vals := randVoteSet(ctx, t, h, 0, tmproto.PrecommitType, 4, stateID) + commit, err := makeCommit(ctx, blockID, stateID, h, 0, voteSet, vals) require.NoError(t, err) // malleate threshold sigs signature diff --git a/types/validator.go b/types/validator.go index bb3a9019a0..426d0e0caf 100644 --- a/types/validator.go +++ b/types/validator.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "encoding/json" "errors" "fmt" "strings" @@ -11,6 +12,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/bls12381" ce "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/jsontypes" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -19,12 +21,50 @@ import ( // make sure to update that method if changes are made here // The ProTxHash is part of Dash additions required for BLS threshold signatures type Validator struct { - PubKey crypto.PubKey `json:"pub_key"` - VotingPower int64 `json:"voting_power"` - ProTxHash ProTxHash `json:"pro_tx_hash"` - NodeAddress ValidatorAddress `json:"address"` + ProTxHash ProTxHash + PubKey crypto.PubKey + VotingPower int64 + NodeAddress ValidatorAddress - ProposerPriority int64 `json:"proposer_priority"` + ProposerPriority int64 +} + +type validatorJSON struct { + PubKey json.RawMessage `json:"pub_key,omitempty"` + VotingPower int64 `json:"voting_power,string"` + ProTxHash ProTxHash `json:"pro_tx_hash"` + NodeAddress ValidatorAddress `json:"address"` + ProposerPriority int64 `json:"proposer_priority,string"` +} + +func (v Validator) MarshalJSON() ([]byte, error) { + val := validatorJSON{ + ProTxHash: v.ProTxHash, + VotingPower: v.VotingPower, + ProposerPriority: v.ProposerPriority, + } + if v.PubKey != nil { + pk, err := jsontypes.Marshal(v.PubKey) + if err != nil { + return nil, err + } + val.PubKey = pk + } + return json.Marshal(val) +} + +func (v *Validator) UnmarshalJSON(data []byte) error { + var val validatorJSON + if err := json.Unmarshal(data, &val); err != nil { + return err + } + if err := jsontypes.Unmarshal(val.PubKey, &v.PubKey); err != nil { + return err + } + v.ProTxHash = val.ProTxHash + v.VotingPower = val.VotingPower + v.ProposerPriority = val.ProposerPriority + return nil } func NewTestValidatorGeneratedFromProTxHash(proTxHash crypto.ProTxHash) *Validator { diff --git a/types/validator_address.go b/types/validator_address.go index 4fab1317cf..15ba9b5a03 100644 --- a/types/validator_address.go +++ b/types/validator_address.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "math" + "math/rand" "net" "net/url" "regexp" @@ -15,7 +16,6 @@ import ( var ( reSchemeIsHost = regexp.MustCompile(`^[^/:]+:\d+(/|$)`) - rng = tmrand.NewRand() ) // ValidatorAddress is a ValidatorAddress that does not require node ID to be set @@ -136,14 +136,14 @@ func (va ValidatorAddress) String() string { // NetAddress converts ValidatorAddress to a NetAddress object func (va ValidatorAddress) NetAddress() (*NetAddress, error) { - return NewNetAddressString(va.String()) + return ParseAddressString(va.String()) } // RandValidatorAddress generates a random validator address. Used in tests. // It will panic in (very unlikely) case of error. func RandValidatorAddress() ValidatorAddress { nodeID := tmrand.Bytes(20) - port := rng.Int()%math.MaxUint16 + 1 + port := rand.Int()%math.MaxUint16 + 1 // nolint addr, err := ParseValidatorAddress(fmt.Sprintf("tcp://%x@127.0.0.1:%d", nodeID, port)) if err != nil { panic(fmt.Sprintf("cannot generate random validator address: %s", err)) diff --git a/types/validator_set.go b/types/validator_set.go index 9cf2ba963b..ca2d11dae0 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -70,6 +70,12 @@ type ValidatorSet struct { totalVotingPower int64 } +type ValidatorSetUpdate struct { + Validators []*Validator + ThresholdPublicKey crypto.PubKey + QuorumHash crypto.QuorumHash +} + // NewValidatorSet initializes a ValidatorSet by copying over the values from // `valz`, a list of Validators. If valz is nil or empty, the new ValidatorSet // will have an empty list of Validators. @@ -358,7 +364,7 @@ func (vals *ValidatorSet) shiftByAvgProposerPriority() { // Makes a copy of the validator list. func validatorListCopy(valsList []*Validator) []*Validator { - if valsList == nil { + if len(valsList) == 0 { return nil } valsCopy := make([]*Validator, len(valsList)) @@ -370,6 +376,9 @@ func validatorListCopy(valsList []*Validator) []*Validator { // Copy each validator into a new ValidatorSet. func (vals *ValidatorSet) Copy() *ValidatorSet { + if vals == nil { + return nil + } return &ValidatorSet{ Validators: validatorListCopy(vals.Validators), Proposer: vals.Proposer, @@ -384,6 +393,9 @@ func (vals *ValidatorSet) Copy() *ValidatorSet { // HasProTxHash returns true if proTxHash given is in the validator set, false - // otherwise. func (vals *ValidatorSet) HasProTxHash(proTxHash crypto.ProTxHash) bool { + if len(proTxHash) == 0 { + return false + } for _, val := range vals.Validators { if bytes.Equal(val.ProTxHash, proTxHash) { return true diff --git a/types/validator_set_test.go b/types/validator_set_test.go index a4621a9f7f..dd76aa35af 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -13,7 +13,6 @@ import ( "testing/quick" "github.com/dashevo/dashd-go/btcjson" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -81,8 +80,11 @@ func TestValidatorSetBasic(t *testing.T) { } func TestValidatorSetValidateBasic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + quorumHash := crypto.RandQuorumHash() - val, _ := randValidatorInQuorum(quorumHash) + val, _ := randValidatorInQuorum(ctx, t, quorumHash) badValNoPublicKey := &Validator{ProTxHash: val.ProTxHash} badValNoProTxHash := &Validator{PubKey: val.PubKey} @@ -257,9 +259,9 @@ func TestCopy(t *testing.T) { // Test that IncrementProposerPriority requires positive times. func TestIncrementProposerPriorityPositiveTimes(t *testing.T) { vset := NewValidatorSet([]*Validator{ - NewTestValidatorGeneratedFromProTxHash(crypto.Sha256([]byte("foo"))), - NewTestValidatorGeneratedFromProTxHash(crypto.Sha256([]byte("bar"))), - NewTestValidatorGeneratedFromProTxHash(crypto.Sha256([]byte("baz"))), + NewTestValidatorGeneratedFromProTxHash(crypto.Checksum([]byte("foo"))), + NewTestValidatorGeneratedFromProTxHash(crypto.Checksum([]byte("bar"))), + NewTestValidatorGeneratedFromProTxHash(crypto.Checksum([]byte("baz"))), }, bls12381.GenPrivKey().PubKey(), btcjson.LLMQType_5_60, crypto.RandQuorumHash(), true) assert.Panics(t, func() { vset.IncrementProposerPriority(-1) }) @@ -275,9 +277,7 @@ func BenchmarkValidatorSetCopy(b *testing.B) { pubKey := privKey.PubKey() val := NewValidatorDefaultVotingPower(pubKey, crypto.ProTxHash{}) err := vset.UpdateWithChangeSet([]*Validator{val}, nil, crypto.RandQuorumHash()) - if err != nil { - panic("Failed to add validator") - } + require.NoError(b, err) } b.StartTimer() @@ -289,9 +289,9 @@ func BenchmarkValidatorSetCopy(b *testing.B) { //------------------------------------------------------------------- func TestProposerSelection1(t *testing.T) { - fooProTxHash := crypto.ProTxHash(crypto.Sha256([]byte("foo"))) - barProTxHash := crypto.ProTxHash(crypto.Sha256([]byte("bar"))) - bazProTxHash := crypto.ProTxHash(crypto.Sha256([]byte("baz"))) + fooProTxHash := crypto.ProTxHash(crypto.Checksum([]byte("foo"))) + barProTxHash := crypto.ProTxHash(crypto.Checksum([]byte("bar"))) + bazProTxHash := crypto.ProTxHash(crypto.Checksum([]byte("baz"))) vset := NewValidatorSet([]*Validator{ NewTestValidatorGeneratedFromProTxHash(fooProTxHash), NewTestValidatorGeneratedFromProTxHash(barProTxHash), @@ -348,10 +348,10 @@ func TestProposerSelection2(t *testing.T) { func TestProposerSelection3(t *testing.T) { proTxHashes := make([]crypto.ProTxHash, 4) - proTxHashes[0] = crypto.Sha256([]byte("avalidator_address12")) - proTxHashes[1] = crypto.Sha256([]byte("bvalidator_address12")) - proTxHashes[2] = crypto.Sha256([]byte("cvalidator_address12")) - proTxHashes[3] = crypto.Sha256([]byte("dvalidator_address12")) + proTxHashes[0] = crypto.Checksum([]byte("avalidator_address12")) + proTxHashes[1] = crypto.Checksum([]byte("bvalidator_address12")) + proTxHashes[2] = crypto.Checksum([]byte("cvalidator_address12")) + proTxHashes[3] = crypto.Checksum([]byte("dvalidator_address12")) vset, _ := GenerateValidatorSet(NewValSetParam(proTxHashes)) @@ -376,8 +376,8 @@ func TestProposerSelection3(t *testing.T) { } // serialize, deserialize, check proposer - b := vset.toBytes() - vset = vset.fromBytes(b) + b := vset.toBytes(t) + vset = vset.fromBytes(t, b) computed := vset.GetProposer() // findGetProposer() if i != 0 { @@ -422,47 +422,36 @@ func randModuloValidator(totalVotingPower int64) *Validator { return val } -func randValidatorInQuorum(quorumHash crypto.QuorumHash) (*Validator, PrivValidator) { +func randValidatorInQuorum(ctx context.Context, t *testing.T, quorumHash crypto.QuorumHash) (*Validator, PrivValidator) { privVal := NewMockPVForQuorum(quorumHash) - proTxHash, err := privVal.GetProTxHash(context.Background()) + proTxHash, err := privVal.GetProTxHash(ctx) if err != nil { panic(fmt.Errorf("could not retrieve proTxHash %w", err)) } - pubKey, err := privVal.GetPubKey(context.Background(), quorumHash) - if err != nil { - panic(fmt.Errorf("could not retrieve pubkey %w", err)) - } + pubKey, err := privVal.GetPubKey(ctx, quorumHash) + require.NoError(t, err) address := RandValidatorAddress().String() val := NewValidator(pubKey, DefaultDashVotingPower, proTxHash, address) return val, privVal } -func (vals *ValidatorSet) toBytes() []byte { +func (vals *ValidatorSet) toBytes(t *testing.T) []byte { pbvs, err := vals.ToProto() - if err != nil { - panic(err) - } + require.NoError(t, err) bz, err := pbvs.Marshal() - if err != nil { - panic(err) - } + require.NoError(t, err) return bz } -func (vals *ValidatorSet) fromBytes(b []byte) *ValidatorSet { +func (vals *ValidatorSet) fromBytes(t *testing.T, b []byte) *ValidatorSet { pbvs := new(tmproto.ValidatorSet) err := pbvs.Unmarshal(b) - if err != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - panic(err) - } + require.NoError(t, err) vs, err := ValidatorSetFromProto(pbvs) - if err != nil { - panic(err) - } + require.NoError(t, err) return vs } @@ -580,14 +569,14 @@ func TestEmptySet(t *testing.T) { valSet.GetProposer() // Add to empty set - proTxHashes := []crypto.ProTxHash{crypto.Sha256([]byte("v1")), crypto.Sha256([]byte("v2"))} + proTxHashes := []crypto.ProTxHash{crypto.Checksum([]byte("v1")), crypto.Checksum([]byte("v2"))} valSetAdd, _ := GenerateValidatorSet(NewValSetParam(proTxHashes)) assert.NoError(t, valSet.UpdateWithChangeSet(valSetAdd.Validators, valSetAdd.ThresholdPublicKey, crypto.RandQuorumHash())) verifyValidatorSet(t, valSet) // Delete all validators from set - v1 := NewTestRemoveValidatorGeneratedFromProTxHash(crypto.Sha256([]byte("v1"))) - v2 := NewTestRemoveValidatorGeneratedFromProTxHash(crypto.Sha256([]byte("v1"))) + v1 := NewTestRemoveValidatorGeneratedFromProTxHash(crypto.Checksum([]byte("v1"))) + v2 := NewTestRemoveValidatorGeneratedFromProTxHash(crypto.Checksum([]byte("v1"))) delList := []*Validator{v1, v2} assert.Error(t, valSet.UpdateWithChangeSet(delList, bls12381.PubKey{}, crypto.RandQuorumHash())) @@ -598,33 +587,33 @@ func TestEmptySet(t *testing.T) { func TestUpdatesForNewValidatorSet(t *testing.T) { - addresses12 := []crypto.Address{crypto.Sha256([]byte("v1")), crypto.Sha256([]byte("v2"))} + addresses12 := []crypto.Address{crypto.Checksum([]byte("v1")), crypto.Checksum([]byte("v2"))} valSet, _ := GenerateValidatorSet(NewValSetParam(addresses12)) verifyValidatorSet(t, valSet) // Verify duplicates are caught in NewValidatorSet() and it panics - v111 := NewTestValidatorGeneratedFromProTxHash(crypto.Sha256([]byte("v1"))) - v112 := NewTestValidatorGeneratedFromProTxHash(crypto.Sha256([]byte("v1"))) - v113 := NewTestValidatorGeneratedFromProTxHash(crypto.Sha256([]byte("v1"))) + v111 := NewTestValidatorGeneratedFromProTxHash(crypto.Checksum([]byte("v1"))) + v112 := NewTestValidatorGeneratedFromProTxHash(crypto.Checksum([]byte("v1"))) + v113 := NewTestValidatorGeneratedFromProTxHash(crypto.Checksum([]byte("v1"))) valList := []*Validator{v111, v112, v113} assert.Panics(t, func() { NewValidatorSet(valList, bls12381.PubKey{}, btcjson.LLMQType_5_60, crypto.QuorumHash{}, true) }) // Verify set including validator with voting power 0 cannot be created - v1 := NewTestRemoveValidatorGeneratedFromProTxHash(crypto.Sha256([]byte("v1"))) - v2 := NewTestValidatorGeneratedFromProTxHash(crypto.Sha256([]byte("v2"))) - v3 := NewTestValidatorGeneratedFromProTxHash(crypto.Sha256([]byte("v3"))) + v1 := NewTestRemoveValidatorGeneratedFromProTxHash(crypto.Checksum([]byte("v1"))) + v2 := NewTestValidatorGeneratedFromProTxHash(crypto.Checksum([]byte("v2"))) + v3 := NewTestValidatorGeneratedFromProTxHash(crypto.Checksum([]byte("v3"))) valList = []*Validator{v1, v2, v3} assert.Panics(t, func() { NewValidatorSet(valList, bls12381.PubKey{}, btcjson.LLMQType_5_60, crypto.QuorumHash{}, true) }) // Verify set including validator with negative voting power cannot be created - v1 = NewTestValidatorGeneratedFromProTxHash(crypto.Sha256([]byte("v1"))) + v1 = NewTestValidatorGeneratedFromProTxHash(crypto.Checksum([]byte("v1"))) v2 = &Validator{ VotingPower: -20, ProposerPriority: 0, - ProTxHash: crypto.Sha256([]byte("v2")), + ProTxHash: crypto.Checksum([]byte("v2")), } - v3 = NewTestValidatorGeneratedFromProTxHash(crypto.Sha256([]byte("v3"))) + v3 = NewTestValidatorGeneratedFromProTxHash(crypto.Checksum([]byte("v3"))) valList = []*Validator{v1, v2, v3} assert.Panics(t, func() { NewValidatorSet(valList, bls12381.PubKey{}, btcjson.LLMQType_5_60, crypto.QuorumHash{}, true) }) @@ -655,7 +644,7 @@ func permutation(valList []testVal) []testVal { func createNewValidatorList(testValList []testVal) []*Validator { valList := make([]*Validator, 0, len(testValList)) for _, val := range testValList { - valList = append(valList, NewTestValidatorGeneratedFromProTxHash(crypto.Sha256([]byte(val.name)))) + valList = append(valList, NewTestValidatorGeneratedFromProTxHash(crypto.Checksum([]byte(val.name)))) } sort.Sort(ValidatorsByProTxHashes(valList)) return valList @@ -665,7 +654,7 @@ func createNewValidatorSet(testValList []testVal) *ValidatorSet { opts := make([]ValSetParam, len(testValList)) for i, val := range testValList { opts[i] = ValSetParam{ - ProTxHash: crypto.Sha256([]byte(val.name)), + ProTxHash: crypto.Checksum([]byte(val.name)), VotingPower: val.power, } } @@ -680,18 +669,18 @@ func addValidatorsToValidatorSet(vals *ValidatorSet, testValList []testVal) ([]* combinedProTxHashes := make([]ProTxHash, 0, len(testValList)+len(vals.Validators)) for _, val := range testValList { if val.power != 0 { - valProTxHash := crypto.Sha256([]byte(val.name)) + valProTxHash := crypto.Checksum([]byte(val.name)) _, value := vals.GetByProTxHash(valProTxHash) if value == nil { addedProTxHashes = append(addedProTxHashes, valProTxHash) } } else { - valProTxHash := crypto.Sha256([]byte(val.name)) + valProTxHash := crypto.Checksum([]byte(val.name)) _, value := vals.GetByProTxHash(valProTxHash) if value != nil { removedProTxHashes = append(removedProTxHashes, valProTxHash) } - removedVals = append(removedVals, NewTestRemoveValidatorGeneratedFromProTxHash(crypto.Sha256([]byte(val.name)))) + removedVals = append(removedVals, NewTestRemoveValidatorGeneratedFromProTxHash(crypto.Checksum([]byte(val.name)))) } } originalProTxHashes := vals.GetProTxHashes() @@ -764,7 +753,7 @@ func toTestProTxHashValList(valList []*Validator) []testProTxHashVal { func switchToTestProTxHashValList(valList []testVal) []testProTxHashVal { testList := make([]testProTxHashVal, len(valList)) for i, val := range valList { - testList[i].proTxHash = crypto.Sha256([]byte(val.name)) + testList[i].proTxHash = crypto.Checksum([]byte(val.name)) testList[i].power = val.power } return testList @@ -1098,7 +1087,7 @@ type testVSetCfg struct { func randTestVSetCfg(t *testing.T, nBase, nAddMax int) testVSetCfg { if nBase <= 0 || nAddMax < 0 { - panic(fmt.Sprintf("bad parameters %v %v", nBase, nAddMax)) + t.Fatalf("bad parameters %v %v", nBase, nAddMax) } var nOld, nDel, nChanged, nAdd int @@ -1367,7 +1356,7 @@ func (tvals testValsByVotingPower) Len() int { // from the name by applying a single SHA256 func (tvals testValsByVotingPower) Less(i, j int) bool { if tvals[i].power == tvals[j].power { - return bytes.Compare(crypto.Sha256([]byte(tvals[i].name)), crypto.Sha256([]byte(tvals[j].name))) == -1 + return bytes.Compare(crypto.Checksum([]byte(tvals[i].name)), crypto.Checksum([]byte(tvals[j].name))) == -1 } return tvals[i].power > tvals[j].power } @@ -1387,7 +1376,7 @@ func BenchmarkUpdates(b *testing.B) { // Init with n validators proTxHashes0 := make([]crypto.ProTxHash, n) for j := 0; j < n; j++ { - proTxHashes0[j] = crypto.Sha256([]byte(fmt.Sprintf("v%d", j))) + proTxHashes0[j] = crypto.Checksum([]byte(fmt.Sprintf("v%d", j))) } valSet, _ := GenerateValidatorSet(NewValSetParam(proTxHashes0)) @@ -1396,7 +1385,7 @@ func BenchmarkUpdates(b *testing.B) { for j := 0; j < n+m; j++ { proTxHashes1[j] = []byte(fmt.Sprintf("v%d", j)) if j >= n { - newValList[j-n] = NewTestValidatorGeneratedFromProTxHash(crypto.Sha256([]byte(fmt.Sprintf("v%d", j)))) + newValList[j-n] = NewTestValidatorGeneratedFromProTxHash(crypto.Checksum([]byte(fmt.Sprintf("v%d", j)))) } } valSet2, _ := GenerateValidatorSet(NewValSetParam(proTxHashes1)) @@ -1411,6 +1400,9 @@ func BenchmarkUpdates(b *testing.B) { } func BenchmarkValidatorSet_VerifyCommit_Ed25519(b *testing.B) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for _, n := range []int{1, 8, 64, 1024} { n := n var ( @@ -1422,9 +1414,9 @@ func BenchmarkValidatorSet_VerifyCommit_Ed25519(b *testing.B) { b.ReportAllocs() // generate n validators stateID := RandStateID() - voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, n, stateID) + voteSet, valSet, vals := randVoteSet(ctx, b, h, 0, tmproto.PrecommitType, n, stateID) // create a commit with n validators - commit, err := MakeCommit(blockID, stateID, h, 0, voteSet, vals) + commit, err := makeCommit(ctx, blockID, stateID, h, 0, voteSet, vals) require.NoError(b, err) for i := 0; i < b.N/n; i++ { diff --git a/types/validator_test.go b/types/validator_test.go index e9e26e92e7..fcb377b71a 100644 --- a/types/validator_test.go +++ b/types/validator_test.go @@ -4,14 +4,17 @@ import ( "context" "testing" - "github.com/tendermint/tendermint/crypto" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto" ) func TestValidatorProtoBuf(t *testing.T) { - val, _ := randValidatorInQuorum(crypto.RandQuorumHash()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + val, _ := randValidatorInQuorum(ctx, t, crypto.RandQuorumHash()) testCases := []struct { msg string v1 *Validator diff --git a/types/vote.go b/types/vote.go index 2b75b8c7a3..fbcba0c231 100644 --- a/types/vote.go +++ b/types/vote.go @@ -38,14 +38,17 @@ func MaxVoteBytesForKeyType(keyType crypto.KeyType) int64 { var ( ErrVoteUnexpectedStep = errors.New("unexpected step") ErrVoteInvalidValidatorIndex = errors.New("invalid validator index") + ErrVoteInvalidValidatorAddress = errors.New("invalid validator address") + ErrVoteInvalidSignature = errors.New("invalid signature") + ErrVoteInvalidBlockHash = errors.New("invalid block hash") + ErrVoteNonDeterministicSignature = errors.New("non-deterministic signature") + ErrVoteNil = errors.New("nil vote") + ErrVoteInvalidExtension = errors.New("invalid vote extension") ErrVoteInvalidValidatorProTxHash = errors.New("invalid validator pro_tx_hash") ErrVoteInvalidValidatorPubKeySize = errors.New("invalid validator public key size") ErrVoteInvalidBlockSignature = errors.New("invalid block signature") ErrVoteInvalidStateSignature = errors.New("invalid state signature") ErrVoteStateSignatureShouldBeNil = errors.New("state signature when voting for nil block") - ErrVoteInvalidBlockHash = errors.New("invalid block hash") - ErrVoteNonDeterministicSignature = errors.New("non-deterministic signature") - ErrVoteNil = errors.New("nil vote") ) type ErrVoteConflictingVotes struct { @@ -73,13 +76,39 @@ type ProTxHash = crypto.ProTxHash // consensus. type Vote struct { Type tmproto.SignedMsgType `json:"type"` - Height int64 `json:"height"` + Height int64 `json:"height,string"` Round int32 `json:"round"` // assume there will not be greater than 2^32 rounds BlockID BlockID `json:"block_id"` // zero if vote is nil. ValidatorProTxHash ProTxHash `json:"validator_pro_tx_hash"` ValidatorIndex int32 `json:"validator_index"` BlockSignature tmbytes.HexBytes `json:"block_signature"` StateSignature tmbytes.HexBytes `json:"state_signature"` + Extension []byte `json:"extension"` + ExtensionSignature []byte `json:"extension_signature"` +} + +// VoteFromProto attempts to convert the given serialization (Protobuf) type to +// our Vote domain type. No validation is performed on the resulting vote - +// this is left up to the caller to decide whether to call ValidateBasic or +// ValidateWithExtension. +func VoteFromProto(pv *tmproto.Vote) (*Vote, error) { + blockID, err := BlockIDFromProto(&pv.BlockID) + if err != nil { + return nil, err + } + + return &Vote{ + Type: pv.Type, + Height: pv.Height, + Round: pv.Round, + BlockID: *blockID, + ValidatorProTxHash: pv.ValidatorProTxHash, + ValidatorIndex: pv.ValidatorIndex, + BlockSignature: pv.BlockSignature, + StateSignature: pv.StateSignature, + Extension: pv.Extension, + ExtensionSignature: pv.ExtensionSignature, + }, nil } // VoteBlockSignBytes returns the proto-encoding of the canonicalized Vote, for @@ -100,22 +129,36 @@ func VoteBlockSignBytes(chainID string, vote *tmproto.Vote) []byte { return bz } -// VoteBlockSignID returns signID that should be signed for the block -func VoteBlockSignID(chainID string, vote *tmproto.Vote, quorumType btcjson.LLMQType, quorumHash []byte) []byte { - blockSignBytes := VoteBlockSignBytes(chainID, vote) +// VoteExtensionSignBytes returns the proto-encoding of the canonicalized vote +// extension for signing. Panics if the marshaling fails. +// +// Similar to VoteSignBytes, the encoded Protobuf message is varint +// length-prefixed for backwards-compatibility with the Amino encoding. +func VoteExtensionSignBytes(chainID string, vote *tmproto.Vote) []byte { + pb := CanonicalizeVoteExtension(chainID, vote) + bz, err := protoio.MarshalDelimited(&pb) + if err != nil { + panic(err) + } - blockMessageHash := crypto.Sha256(blockSignBytes) + return bz +} - blockRequestID := VoteBlockRequestIDProto(vote) +// VoteExtensionSignID returns vote extension signature ID +func VoteExtensionSignID(chainID string, vote *tmproto.Vote, quorumType btcjson.LLMQType, quorumHash []byte) []byte { + reqID := voteHeightRoundRequestID("dpevote", vote.Height, vote.Round) + return makeSignID(VoteExtensionSignBytes(chainID, vote), reqID, quorumType, quorumHash) +} - blockSignID := crypto.SignID( - quorumType, - tmbytes.Reverse(quorumHash), - tmbytes.Reverse(blockRequestID), - tmbytes.Reverse(blockMessageHash), - ) +// VoteExtensionRequestID returns vote extension request ID +func VoteExtensionRequestID(vote *tmproto.Vote) []byte { + return voteHeightRoundRequestID("dpevote", vote.Height, vote.Round) +} - return blockSignID +// VoteBlockSignID returns signID that should be signed for the block +func VoteBlockSignID(chainID string, vote *tmproto.Vote, quorumType btcjson.LLMQType, quorumHash []byte) []byte { + reqID := voteHeightRoundRequestID("dpbvote", vote.Height, vote.Round) + return makeSignID(VoteBlockSignBytes(chainID, vote), reqID, quorumType, quorumHash) } func (vote *Vote) Copy() *Vote { @@ -133,7 +176,8 @@ func (vote *Vote) Copy() *Vote { // 6. type string // 7. first 6 bytes of block hash // 8. first 6 bytes of signature -// 9. timestamp +// 9. first 6 bytes of vote extension +// 10. timestamp func (vote *Vote) String() string { if vote == nil { return nilVoteStr @@ -149,7 +193,7 @@ func (vote *Vote) String() string { panic("Unknown vote type") } - return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%v) %X %X %X}", + return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%v) %X %X %X %X}", vote.ValidatorIndex, tmbytes.Fingerprint(vote.ValidatorProTxHash), vote.Height, @@ -159,38 +203,47 @@ func (vote *Vote) String() string { tmbytes.Fingerprint(vote.BlockID.Hash), tmbytes.Fingerprint(vote.BlockSignature), tmbytes.Fingerprint(vote.StateSignature), + tmbytes.Fingerprint(vote.Extension), ) } -func VoteBlockRequestID(vote *Vote) []byte { - requestIDMessage := []byte("dpbvote") - heightByteArray := make([]byte, 8) - binary.LittleEndian.PutUint64(heightByteArray, uint64(vote.Height)) - roundByteArray := make([]byte, 4) - binary.LittleEndian.PutUint32(roundByteArray, uint32(vote.Round)) - - requestIDMessage = append(requestIDMessage, heightByteArray...) - requestIDMessage = append(requestIDMessage, roundByteArray...) - - return crypto.Sha256(requestIDMessage) -} - -func VoteBlockRequestIDProto(vote *tmproto.Vote) []byte { - requestIDMessage := []byte("dpbvote") - heightByteArray := make([]byte, 8) - binary.LittleEndian.PutUint64(heightByteArray, uint64(vote.Height)) - roundByteArray := make([]byte, 4) - binary.LittleEndian.PutUint32(roundByteArray, uint32(vote.Round)) - - requestIDMessage = append(requestIDMessage, heightByteArray...) - requestIDMessage = append(requestIDMessage, roundByteArray...) - - return crypto.Sha256(requestIDMessage) +// VerifyWithExtension performs the same verification as Verify, but +// additionally checks whether the vote extension signature corresponds to the +// given chain ID and public key. We only verify vote extension signatures for +// precommits. +func (vote *Vote) VerifyWithExtension( + chainID string, + quorumType btcjson.LLMQType, + quorumHash crypto.QuorumHash, + pubKey crypto.PubKey, + proTxHash ProTxHash, + stateID StateID, +) ([]byte, []byte, error) { + v := vote.ToProto() + signID, stateSignID, err := vote.Verify(chainID, quorumType, quorumHash, pubKey, proTxHash, stateID) + if err != nil { + return nil, nil, err + } + // We only verify vote extension signatures for precommits. + if vote.Type == tmproto.PrecommitType { + extSignID := VoteExtensionSignID(chainID, v, quorumType, quorumHash) + // TODO: Remove extension signature nil check to enforce vote extension + // signing once we resolve https://github.com/tendermint/tendermint/issues/8272 + if vote.ExtensionSignature != nil && !pubKey.VerifySignatureDigest(extSignID, vote.ExtensionSignature) { + return nil, nil, ErrVoteInvalidSignature + } + } + return signID, stateSignID, nil } func (vote *Vote) Verify( - chainID string, quorumType btcjson.LLMQType, quorumHash []byte, - pubKey crypto.PubKey, proTxHash crypto.ProTxHash, stateID StateID) ([]byte, []byte, error) { + chainID string, + quorumType btcjson.LLMQType, + quorumHash []byte, + pubKey crypto.PubKey, + proTxHash crypto.ProTxHash, + stateID StateID, +) ([]byte, []byte, error) { if !bytes.Equal(proTxHash, vote.ValidatorProTxHash) { return nil, nil, ErrVoteInvalidValidatorProTxHash } @@ -200,7 +253,7 @@ func (vote *Vote) Verify( v := vote.ToProto() voteBlockSignBytes := VoteBlockSignBytes(chainID, v) - blockMessageHash := crypto.Sha256(voteBlockSignBytes) + blockMessageHash := crypto.Checksum(voteBlockSignBytes) blockRequestID := VoteBlockRequestID(vote) @@ -224,7 +277,7 @@ func (vote *Vote) Verify( // we must verify the stateID but only if the blockID isn't nil if vote.BlockID.Hash != nil { voteStateSignBytes := stateID.SignBytes(chainID) - stateMessageHash := crypto.Sha256(voteStateSignBytes) + stateMessageHash := crypto.Checksum(voteStateSignBytes) stateRequestID := stateID.SignRequestID() @@ -247,7 +300,9 @@ func (vote *Vote) Verify( return signID, stateSignID, nil } -// ValidateBasic performs basic validation. +// ValidateBasic checks whether the vote is well-formed. It does not, however, +// check vote extensions - for vote validation with vote extension validation, +// use ValidateWithExtension. func (vote *Vote) ValidateBasic() error { if !IsVoteTypeValid(vote.Type) { return errors.New("invalid Type") @@ -264,12 +319,12 @@ func (vote *Vote) ValidateBasic() error { // NOTE: Timestamp validation is subtle and handled elsewhere. if err := vote.BlockID.ValidateBasic(); err != nil { - return fmt.Errorf("wrong BlockID: %v", err) + return fmt.Errorf("wrong BlockID: %w", err) } // BlockID.ValidateBasic would not err if we for instance have an empty hash but a // non-empty PartsSetHeader: - if !vote.BlockID.IsZero() && !vote.BlockID.IsComplete() { + if !vote.BlockID.IsNil() && !vote.BlockID.IsComplete() { return fmt.Errorf("blockID must be either empty or complete, got: %v", vote.BlockID) } @@ -299,6 +354,40 @@ func (vote *Vote) ValidateBasic() error { return fmt.Errorf("state signature is too big (max: %d)", SignatureSize) } + // We should only ever see vote extensions in precommits. + if vote.Type != tmproto.PrecommitType { + if len(vote.Extension) > 0 { + return errors.New("unexpected vote extension") + } + if len(vote.ExtensionSignature) > 0 { + return errors.New("unexpected vote extension signature") + } + } + + return nil +} + +// ValidateWithExtension performs the same validations as ValidateBasic, but +// additionally checks whether a vote extension signature is present. This +// function is used in places where vote extension signatures are expected. +func (vote *Vote) ValidateWithExtension() error { + if err := vote.ValidateBasic(); err != nil { + return err + } + + // We should always see vote extension signatures in precommits + if vote.Type == tmproto.PrecommitType { + // TODO(thane): Remove extension length check once + // https://github.com/tendermint/tendermint/issues/8272 is + // resolved. + if len(vote.Extension) > 0 && len(vote.ExtensionSignature) == 0 { + return errors.New("vote extension signature is missing") + } + if len(vote.ExtensionSignature) > SignatureSize { + return fmt.Errorf("vote extension signature is too big (max: %d)", SignatureSize) + } + } + return nil } @@ -318,11 +407,16 @@ func (vote *Vote) ToProto() *tmproto.Vote { ValidatorIndex: vote.ValidatorIndex, BlockSignature: vote.BlockSignature, StateSignature: vote.StateSignature, + Extension: vote.Extension, + ExtensionSignature: vote.ExtensionSignature, } } // MarshalZerologObject formats this object for logging purposes func (vote *Vote) MarshalZerologObject(e *zerolog.Event) { + if vote == nil { + return + } e.Str("vote", vote.String()) e.Int64("height", vote.Height) e.Int32("round", vote.Round) @@ -332,7 +426,7 @@ func (vote *Vote) MarshalZerologObject(e *zerolog.Event) { e.Str("state_signature", vote.StateSignature.ShortString()) e.Str("val_proTxHash", vote.ValidatorProTxHash.ShortString()) e.Int32("val_index", vote.ValidatorIndex) - e.Bool("nil", vote.BlockID.IsZero()) + e.Bool("nil", vote.BlockID.IsNil()) } func (vote *Vote) HasVoteMessage() *tmcons.HasVote { @@ -344,27 +438,31 @@ func (vote *Vote) HasVoteMessage() *tmcons.HasVote { } } -// FromProto converts a proto generetad type to a handwritten type -// return type, nil if everything converts safely, otherwise nil, error -func VoteFromProto(pv *tmproto.Vote) (*Vote, error) { - if pv == nil { - return nil, errors.New("nil vote") - } +func VoteBlockRequestID(vote *Vote) []byte { + return voteHeightRoundRequestID("dpbvote", vote.Height, vote.Round) +} - blockID, err := BlockIDFromProto(&pv.BlockID) - if err != nil { - return nil, err - } +func VoteBlockRequestIDProto(vote *tmproto.Vote) []byte { + return voteHeightRoundRequestID("dpbvote", vote.Height, vote.Round) +} - vote := new(Vote) - vote.Type = pv.Type - vote.Height = pv.Height - vote.Round = pv.Round - vote.BlockID = *blockID - vote.ValidatorProTxHash = pv.ValidatorProTxHash - vote.ValidatorIndex = pv.ValidatorIndex - vote.BlockSignature = pv.BlockSignature - vote.StateSignature = pv.StateSignature - - return vote, vote.ValidateBasic() +func voteHeightRoundRequestID(prefix string, height int64, round int32) []byte { + reqID := []byte(prefix) + heightBytes := make([]byte, 8) + binary.LittleEndian.PutUint64(heightBytes, uint64(height)) + roundBytes := make([]byte, 4) + binary.LittleEndian.PutUint32(roundBytes, uint32(round)) + reqID = append(reqID, heightBytes...) + reqID = append(reqID, roundBytes...) + return crypto.Checksum(reqID) +} + +func makeSignID(signBytes, reqID []byte, quorumType btcjson.LLMQType, quorumHash []byte) []byte { + msgHash := crypto.Checksum(signBytes) + return crypto.SignID( + quorumType, + tmbytes.Reverse(quorumHash), + tmbytes.Reverse(reqID), + tmbytes.Reverse(msgHash), + ) } diff --git a/types/vote_set.go b/types/vote_set.go index 099b5f6021..0b93dc4bc2 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -2,14 +2,13 @@ package types import ( "bytes" + "encoding/json" "fmt" - "runtime/debug" "strings" + "sync" "github.com/tendermint/tendermint/crypto/bls12381" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/bits" - tmjson "github.com/tendermint/tendermint/libs/json" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -20,12 +19,6 @@ const ( MaxVotesCount = 10000 ) -// UNSTABLE -// XXX: duplicate of types.NodeID to avoid dependence between packages. -// Perhaps we can have a minimal types package containing this (and other things?) -// that both `types` and `p2p` import ? -type P2PID string - /* VoteSet helps collect signatures from validators at each height+round for a predefined vote type. @@ -68,15 +61,17 @@ type VoteSet struct { signedMsgType tmproto.SignedMsgType valSet *ValidatorSet - mtx tmsync.Mutex - votesBitArray *bits.BitArray - votes []*Vote // Primary votes to share - sum int64 // Sum of voting power for seen votes, discounting conflicts - maj23 *BlockID // First 2/3 majority seen - thresholdBlockSig []byte // If a 2/3 majority is seen, recover the block sig - thresholdStateSig []byte // If a 2/3 majority is seen, recover the state sig - votesByBlock map[string]*blockVotes // string(blockHash|blockParts) -> blockVotes - peerMaj23s map[P2PID]BlockID // Maj23 for each peer + mtx sync.Mutex + votesBitArray *bits.BitArray + votes []*Vote // Primary votes to share + sum int64 // Sum of voting power for seen votes, discounting conflicts + maj23 *BlockID // First 2/3 majority seen + votesByBlock map[string]*blockVotes // string(blockHash|blockParts) -> blockVotes + peerMaj23s map[string]BlockID // Maj23 for each peer + + // dash fields + thresholdBlockSig []byte // If a 2/3 majority is seen, recover the block sig + thresholdStateSig []byte // If a 2/3 majority is seen, recover the state sig } // NewVoteSet constructs a new VoteSet struct used to accumulate votes for given height/round. @@ -101,7 +96,7 @@ func NewVoteSet(chainID string, height int64, round int32, sum: 0, maj23: nil, votesByBlock: make(map[string]*blockVotes, valSet.Size()), - peerMaj23s: make(map[P2PID]BlockID), + peerMaj23s: make(map[string]BlockID), } } @@ -215,8 +210,14 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { // Check signature. - signID, stateSignID, err := vote.Verify( - voteSet.chainID, voteSet.valSet.QuorumType, voteSet.valSet.QuorumHash, val.PubKey, val.ProTxHash, voteSet.stateID) + signID, stateSignID, err := vote.VerifyWithExtension( + voteSet.chainID, + voteSet.valSet.QuorumType, + voteSet.valSet.QuorumHash, + val.PubKey, + val.ProTxHash, + voteSet.stateID, + ) if err != nil { return false, ErrInvalidVoteSignature( fmt.Errorf("failed to verify vote with ChainID %s and PubKey %s ProTxHash %s: %w", @@ -226,9 +227,6 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { // Add vote and get conflicting vote if any. added, conflicting := voteSet.addVerifiedVote(vote, blockKey, val.VotingPower, signID, stateSignID) if conflicting != nil { - fmt.Printf("-----\n") - debug.PrintStack() - fmt.Printf("-----\n") return added, NewConflictingVoteError(conflicting, vote) } if !added { @@ -248,6 +246,13 @@ func (voteSet *VoteSet) getVote(valIndex int32, blockKey string) (vote *Vote, ok return nil, false } +func (voteSet *VoteSet) GetVotes() []*Vote { + if voteSet == nil { + return nil + } + return voteSet.votes +} + // Assumes signature is valid. // If conflicting vote exists, returns it. func (voteSet *VoteSet) addVerifiedVote( @@ -402,7 +407,7 @@ func (voteSet *VoteSet) recoverThresholdSigs(blockVotes *blockVotes) error { // this can cause memory issues. // TODO: implement ability to remove peers too // NOTE: VoteSet must not be nil -func (voteSet *VoteSet) SetPeerMaj23(peerID P2PID, blockID BlockID) error { +func (voteSet *VoteSet) SetPeerMaj23(peerID string, blockID BlockID) error { if voteSet == nil { panic("SetPeerMaj23() on nil VoteSet") } @@ -609,7 +614,7 @@ func (voteSet *VoteSet) StringIndented(indent string) string { func (voteSet *VoteSet) MarshalJSON() ([]byte, error) { voteSet.mtx.Lock() defer voteSet.mtx.Unlock() - return tmjson.Marshal(VoteSetJSON{ + return json.Marshal(VoteSetJSON{ voteSet.voteStrings(), voteSet.bitArrayString(), voteSet.peerMaj23s, @@ -620,9 +625,9 @@ func (voteSet *VoteSet) MarshalJSON() ([]byte, error) { // NOTE: insufficient for unmarshaling from (compressed votes) // TODO: make the peerMaj23s nicer to read (eg just the block hash) type VoteSetJSON struct { - Votes []string `json:"votes"` - VotesBitArray string `json:"votes_bit_array"` - PeerMaj23s map[P2PID]BlockID `json:"peer_maj_23s"` + Votes []string `json:"votes"` + VotesBitArray string `json:"votes_bit_array"` + PeerMaj23s map[string]BlockID `json:"peer_maj_23s"` } // Return the bit-array of votes including diff --git a/types/vote_set_test.go b/types/vote_set_test.go index 6cee4fc05a..1615f8fb9b 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -19,18 +19,21 @@ import ( ) func TestVoteSet_AddVote_Good(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 10, + voteSet, _, privValidators := randVoteSet(ctx, t, height, round, tmproto.PrevoteType, 10, RandStateID().WithHeight(height-1)) val0 := privValidators[0] - val0ProTxHash, err := val0.GetProTxHash(context.Background()) + val0ProTxHash, err := val0.GetProTxHash(ctx) require.NoError(t, err) assert.Nil(t, voteSet.GetByProTxHash(val0ProTxHash)) assert.False(t, voteSet.BitArray().GetIndex(0)) blockID, ok := voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), "there should be no 2/3 majority") + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority") vote := &Vote{ ValidatorProTxHash: val0ProTxHash, @@ -40,18 +43,21 @@ func TestVoteSet_AddVote_Good(t *testing.T) { Type: tmproto.PrevoteType, BlockID: BlockID{nil, PartSetHeader{}}, } - _, err = signAddVote(val0, vote, voteSet) + _, err = signAddVote(ctx, val0, vote, voteSet) require.NoError(t, err) assert.NotNil(t, voteSet.GetByProTxHash(val0ProTxHash)) assert.True(t, voteSet.BitArray().GetIndex(0)) blockID, ok = voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), "there should be no 2/3 majority") + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority") } func TestVoteSet_AddVote_Bad(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 10, + voteSet, _, privValidators := randVoteSet(ctx, t, height, round, tmproto.PrevoteType, 10, RandStateID().WithHeight(height-1)) voteProto := &Vote{ @@ -65,10 +71,10 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { // val0 votes for nil. { - proTxHash, err := privValidators[0].GetProTxHash(context.Background()) + proTxHash, err := privValidators[0].GetProTxHash(ctx) require.NoError(t, err) vote := withValidator(voteProto, proTxHash, 0) - added, err := signAddVote(privValidators[0], vote, voteSet) + added, err := signAddVote(ctx, privValidators[0], vote, voteSet) if !added || err != nil { t.Errorf("expected VoteSet.Add to succeed") } @@ -76,10 +82,10 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { // val0 votes again for some block. { - proTxHash, err := privValidators[0].GetProTxHash(context.Background()) + proTxHash, err := privValidators[0].GetProTxHash(ctx) require.NoError(t, err) vote := withValidator(voteProto, proTxHash, 0) - added, err := signAddVote(privValidators[0], withBlockHash(vote, tmrand.Bytes(32)), voteSet) + added, err := signAddVote(ctx, privValidators[0], withBlockHash(vote, tmrand.Bytes(32)), voteSet) if added || err == nil { t.Errorf("expected VoteSet.Add to fail, conflicting vote.") } @@ -87,10 +93,10 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { // val1 votes on another height { - proTxHash, err := privValidators[1].GetProTxHash(context.Background()) + proTxHash, err := privValidators[1].GetProTxHash(ctx) require.NoError(t, err) vote := withValidator(voteProto, proTxHash, 1) - added, err := signAddVote(privValidators[1], withHeight(vote, height+1), voteSet) + added, err := signAddVote(ctx, privValidators[1], withHeight(vote, height+1), voteSet) if added || err == nil { t.Errorf("expected VoteSet.Add to fail, wrong height") } @@ -98,10 +104,10 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { // val2 votes on another round { - proTxHash, err := privValidators[2].GetProTxHash(context.Background()) + proTxHash, err := privValidators[2].GetProTxHash(ctx) require.NoError(t, err) vote := withValidator(voteProto, proTxHash, 2) - added, err := signAddVote(privValidators[2], withRound(vote, round+1), voteSet) + added, err := signAddVote(ctx, privValidators[2], withRound(vote, round+1), voteSet) if added || err == nil { t.Errorf("expected VoteSet.Add to fail, wrong round") } @@ -109,18 +115,22 @@ func TestVoteSet_AddVote_Bad(t *testing.T) { // val3 votes of another type. { - proTxHash, err := privValidators[3].GetProTxHash(context.Background()) + proTxHash, err := privValidators[3].GetProTxHash(ctx) require.NoError(t, err) vote := withValidator(voteProto, proTxHash, 3) - added, err := signAddVote(privValidators[3], withType(vote, byte(tmproto.PrecommitType)), voteSet) + added, err := signAddVote(ctx, privValidators[3], withType(vote, byte(tmproto.PrecommitType)), voteSet) if added || err == nil { t.Errorf("expected VoteSet.Add to fail, wrong type") } } + } // TestVoteSet_AddVote_StateID checks if state signature is verified correctly when adding votes to voteSet func TestVoteSet_AddVote_StateID(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height, round := int64(10), int32(0) randStateID1 := RandStateID().WithHeight(height - 1) @@ -140,21 +150,21 @@ func TestVoteSet_AddVote_StateID(t *testing.T) { //nolint:scopelint for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 10, + voteSet, _, privValidators := randVoteSet(ctx, t, height, round, tmproto.PrevoteType, 10, tc.voteSetStateID) val0 := privValidators[0] - val0ProTxHash, err := val0.GetProTxHash(context.Background()) + val0ProTxHash, err := val0.GetProTxHash(ctx) require.NoError(t, err) val1 := privValidators[1] - val1ProTxHash, err := val1.GetProTxHash(context.Background()) + val1ProTxHash, err := val1.GetProTxHash(ctx) require.NoError(t, err) assert.Nil(t, voteSet.GetByProTxHash(val0ProTxHash)) assert.False(t, voteSet.BitArray().GetIndex(0)) majorityBlockID, ok := voteSet.TwoThirdsMajority() - assert.False(t, ok || !majorityBlockID.IsZero(), "there should be no 2/3 majority") + assert.False(t, ok || !majorityBlockID.IsNil(), "there should be no 2/3 majority") blockID := randBlockID() vote1 := &Vote{ ValidatorProTxHash: val0ProTxHash, @@ -164,7 +174,7 @@ func TestVoteSet_AddVote_StateID(t *testing.T) { Type: tmproto.PrevoteType, BlockID: blockID, } - _, err = signAddVote(val0, vote1, voteSet) + _, err = signAddVote(ctx, val0, vote1, voteSet) require.NoError(t, err) vote2 := &Vote{ @@ -175,7 +185,7 @@ func TestVoteSet_AddVote_StateID(t *testing.T) { Type: tmproto.PrevoteType, BlockID: blockID, } - _, err = signAddVoteForStateID(val1, vote2, voteSet, tc.wrongStateID) + _, err = signAddVoteForStateID(ctx, val1, vote2, voteSet, tc.wrongStateID) if tc.shouldFail { require.Error(t, err) assert.Contains(t, err.Error(), "invalid state signature") @@ -186,14 +196,17 @@ func TestVoteSet_AddVote_StateID(t *testing.T) { assert.NotNil(t, voteSet.GetByProTxHash(val0ProTxHash)) assert.True(t, voteSet.BitArray().GetIndex(0)) majorityBlockID, ok = voteSet.TwoThirdsMajority() - assert.False(t, ok || !majorityBlockID.IsZero(), "there should be no 2/3 majority") + assert.False(t, ok || !majorityBlockID.IsNil(), "there should be no 2/3 majority") }) } } func TestVoteSet_2_3Majority(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 10, + voteSet, _, privValidators := randVoteSet(ctx, t, height, round, tmproto.PrevoteType, 10, RandStateID().WithHeight(height-1)) voteProto := &Vote{ @@ -206,41 +219,44 @@ func TestVoteSet_2_3Majority(t *testing.T) { } // 6 out of 10 voted for nil. for i := int32(0); i < 6; i++ { - proTxHash, err := privValidators[i].GetProTxHash(context.Background()) + proTxHash, err := privValidators[i].GetProTxHash(ctx) require.NoError(t, err) vote := withValidator(voteProto, proTxHash, i) - _, err = signAddVote(privValidators[i], vote, voteSet) + _, err = signAddVote(ctx, privValidators[i], vote, voteSet) require.NoError(t, err) } blockID, ok := voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), "there should be no 2/3 majority") + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority") // 7th validator voted for some blockhash { - proTxHash, err := privValidators[6].GetProTxHash(context.Background()) + proTxHash, err := privValidators[6].GetProTxHash(ctx) require.NoError(t, err) vote := withValidator(voteProto, proTxHash, 6) - _, err = signAddVote(privValidators[6], withBlockHash(vote, tmrand.Bytes(32)), voteSet) + _, err = signAddVote(ctx, privValidators[6], withBlockHash(vote, tmrand.Bytes(32)), voteSet) require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), "there should be no 2/3 majority") + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority") } // 8th validator voted for nil. { - proTxHash, err := privValidators[7].GetProTxHash(context.Background()) + proTxHash, err := privValidators[7].GetProTxHash(ctx) require.NoError(t, err) vote := withValidator(voteProto, proTxHash, 7) - _, err = signAddVote(privValidators[7], vote, voteSet) + _, err = signAddVote(ctx, privValidators[7], vote, voteSet) require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - assert.True(t, ok || blockID.IsZero(), "there should be 2/3 majority for nil") + assert.True(t, ok || blockID.IsNil(), "there should be 2/3 majority for nil") } } func TestVoteSet_2_3MajorityRedux(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 100, + voteSet, _, privValidators := randVoteSet(ctx, t, height, round, tmproto.PrevoteType, 100, RandStateID().WithHeight(height-1)) blockHash := crypto.CRandBytes(32) @@ -258,72 +274,72 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) { // 66 out of 100 voted for nil. for i := int32(0); i < 66; i++ { - proTxHash, err := privValidators[i].GetProTxHash(context.Background()) + proTxHash, err := privValidators[i].GetProTxHash(ctx) require.NoError(t, err) vote := withValidator(voteProto, proTxHash, i) - _, err = signAddVote(privValidators[i], vote, voteSet) + _, err = signAddVote(ctx, privValidators[i], vote, voteSet) require.NoError(t, err) } blockID, ok := voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority") // 67th validator voted for nil { - proTxHash, err := privValidators[66].GetProTxHash(context.Background()) + proTxHash, err := privValidators[66].GetProTxHash(ctx) require.NoError(t, err) vote := withValidator(voteProto, proTxHash, 66) - _, err = signAddVote(privValidators[66], withBlockHash(vote, nil), voteSet) + _, err = signAddVote(ctx, privValidators[66], withBlockHash(vote, nil), voteSet) require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority: last vote added was nil") } // 68th validator voted for a different BlockParts PartSetHeader { - proTxHash, err := privValidators[67].GetProTxHash(context.Background()) + proTxHash, err := privValidators[67].GetProTxHash(ctx) require.NoError(t, err) vote := withValidator(voteProto, proTxHash, 67) blockPartsHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)} - _, err = signAddVote(privValidators[67], withBlockPartSetHeader(vote, blockPartsHeader), voteSet) + _, err = signAddVote(ctx, privValidators[67], withBlockPartSetHeader(vote, blockPartsHeader), voteSet) require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority: last vote added had different PartSetHeader Hash") } // 69th validator voted for different BlockParts Total { - proTxHash, err := privValidators[68].GetProTxHash(context.Background()) + proTxHash, err := privValidators[68].GetProTxHash(ctx) require.NoError(t, err) vote := withValidator(voteProto, proTxHash, 68) blockPartsHeader := PartSetHeader{blockPartsTotal + 1, blockPartSetHeader.Hash} - _, err = signAddVote(privValidators[68], withBlockPartSetHeader(vote, blockPartsHeader), voteSet) + _, err = signAddVote(ctx, privValidators[68], withBlockPartSetHeader(vote, blockPartsHeader), voteSet) require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority: last vote added had different PartSetHeader Total") } // 70th validator voted for different CoreBlockHash { - proTxHash, err := privValidators[69].GetProTxHash(context.Background()) + proTxHash, err := privValidators[69].GetProTxHash(ctx) require.NoError(t, err) vote := withValidator(voteProto, proTxHash, 69) - _, err = signAddVote(privValidators[69], withBlockHash(vote, tmrand.Bytes(32)), voteSet) + _, err = signAddVote(ctx, privValidators[69], withBlockHash(vote, tmrand.Bytes(32)), voteSet) require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() - assert.False(t, ok || !blockID.IsZero(), + assert.False(t, ok || !blockID.IsNil(), "there should be no 2/3 majority: last vote added had different CoreBlockHash") } // 71st validator voted for the right CoreBlockHash & BlockPartSetHeader { - proTxHash, err := privValidators[70].GetProTxHash(context.Background()) + proTxHash, err := privValidators[70].GetProTxHash(ctx) require.NoError(t, err) vote := withValidator(voteProto, proTxHash, 70) - _, err = signAddVote(privValidators[70], vote, voteSet) + _, err = signAddVote(ctx, privValidators[70], vote, voteSet) require.NoError(t, err) blockID, ok = voteSet.TwoThirdsMajority() assert.True(t, ok && blockID.Equals(BlockID{blockHash, blockPartSetHeader}), @@ -332,8 +348,11 @@ func TestVoteSet_2_3MajorityRedux(t *testing.T) { } func TestVoteSet_Conflicts(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrevoteType, 4, + voteSet, _, privValidators := randVoteSet(ctx, t, height, round, tmproto.PrevoteType, 4, RandStateID().WithHeight(height-1)) blockHash1 := tmrand.Bytes(32) blockHash2 := tmrand.Bytes(32) @@ -347,13 +366,13 @@ func TestVoteSet_Conflicts(t *testing.T) { BlockID: BlockID{nil, PartSetHeader{}}, } - val0ProTxHash, err := privValidators[0].GetProTxHash(context.Background()) + val0ProTxHash, err := privValidators[0].GetProTxHash(ctx) require.NoError(t, err) // val0 votes for nil. { vote := withValidator(voteProto, val0ProTxHash, 0) - added, err := signAddVote(privValidators[0], vote, voteSet) + added, err := signAddVote(ctx, privValidators[0], vote, voteSet) if !added || err != nil { t.Errorf("expected VoteSet.Add to succeed") } @@ -362,7 +381,7 @@ func TestVoteSet_Conflicts(t *testing.T) { // val0 votes again for blockHash1. { vote := withValidator(voteProto, val0ProTxHash, 0) - added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash1), voteSet) + added, err := signAddVote(ctx, privValidators[0], withBlockHash(vote, blockHash1), voteSet) assert.False(t, added, "conflicting vote") assert.Error(t, err, "conflicting vote") } @@ -374,7 +393,7 @@ func TestVoteSet_Conflicts(t *testing.T) { // val0 votes again for blockHash1. { vote := withValidator(voteProto, val0ProTxHash, 0) - added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash1), voteSet) + added, err := signAddVote(ctx, privValidators[0], withBlockHash(vote, blockHash1), voteSet) assert.True(t, added, "called SetPeerMaj23()") assert.Error(t, err, "conflicting vote") } @@ -386,17 +405,17 @@ func TestVoteSet_Conflicts(t *testing.T) { // val0 votes again for blockHash1. { vote := withValidator(voteProto, val0ProTxHash, 0) - added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash2), voteSet) + added, err := signAddVote(ctx, privValidators[0], withBlockHash(vote, blockHash2), voteSet) assert.False(t, added, "duplicate SetPeerMaj23() from peerA") assert.Error(t, err, "conflicting vote") } // val1 votes for blockHash1. { - pvProTxHash, err := privValidators[1].GetProTxHash(context.Background()) + pvProTxHash, err := privValidators[1].GetProTxHash(ctx) assert.NoError(t, err) vote := withValidator(voteProto, pvProTxHash, 1) - added, err := signAddVote(privValidators[1], withBlockHash(vote, blockHash1), voteSet) + added, err := signAddVote(ctx, privValidators[1], withBlockHash(vote, blockHash1), voteSet) if !added || err != nil { t.Errorf("expected VoteSet.Add to succeed") } @@ -415,7 +434,7 @@ func TestVoteSet_Conflicts(t *testing.T) { pvProTxHash, err := privValidators[2].GetProTxHash(context.Background()) assert.NoError(t, err) vote := withValidator(voteProto, pvProTxHash, 2) - added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash2), voteSet) + added, err := signAddVote(ctx, privValidators[2], withBlockHash(vote, blockHash2), voteSet) if !added || err != nil { t.Errorf("expected VoteSet.Add to succeed") } @@ -435,10 +454,10 @@ func TestVoteSet_Conflicts(t *testing.T) { // val2 votes for blockHash1. { - pvProTxHash, err := privValidators[2].GetProTxHash(context.Background()) + pvProTxHash, err := privValidators[2].GetProTxHash(ctx) assert.NoError(t, err) vote := withValidator(voteProto, pvProTxHash, 2) - added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash1), voteSet) + added, err := signAddVote(ctx, privValidators[2], withBlockHash(vote, blockHash1), voteSet) assert.True(t, added) assert.Error(t, err, "conflicting vote") } @@ -457,8 +476,11 @@ func TestVoteSet_Conflicts(t *testing.T) { } func TestVoteSet_MakeCommit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + height, round := int64(1), int32(0) - voteSet, _, privValidators := randVoteSet(height, round, tmproto.PrecommitType, 10, + voteSet, _, privValidators := randVoteSet(ctx, t, height, round, tmproto.PrecommitType, 10, RandStateID().WithHeight(height-1)) blockHash, blockPartSetHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)} @@ -473,10 +495,10 @@ func TestVoteSet_MakeCommit(t *testing.T) { // 6 out of 10 voted for some block. for i := int32(0); i < 6; i++ { - pvProTxHash, err := privValidators[i].GetProTxHash(context.Background()) + pvProTxHash, err := privValidators[i].GetProTxHash(ctx) assert.NoError(t, err) vote := withValidator(voteProto, pvProTxHash, i) - _, err = signAddVote(privValidators[i], vote, voteSet) + _, err = signAddVote(ctx, privValidators[i], vote, voteSet) if err != nil { t.Error(err) } @@ -487,33 +509,33 @@ func TestVoteSet_MakeCommit(t *testing.T) { // 7th voted for some other block. { - pvProTxHash, err := privValidators[6].GetProTxHash(context.Background()) + pvProTxHash, err := privValidators[6].GetProTxHash(ctx) assert.NoError(t, err) vote := withValidator(voteProto, pvProTxHash, 6) vote = withBlockHash(vote, tmrand.Bytes(32)) vote = withBlockPartSetHeader(vote, PartSetHeader{123, tmrand.Bytes(32)}) - _, err = signAddVote(privValidators[6], vote, voteSet) + _, err = signAddVote(ctx, privValidators[6], vote, voteSet) require.NoError(t, err) } // The 8th voted like everyone else. { - pvProTxHash, err := privValidators[7].GetProTxHash(context.Background()) + pvProTxHash, err := privValidators[7].GetProTxHash(ctx) assert.NoError(t, err) vote := withValidator(voteProto, pvProTxHash, 7) - _, err = signAddVote(privValidators[7], vote, voteSet) + _, err = signAddVote(ctx, privValidators[7], vote, voteSet) require.NoError(t, err) } // The 9th voted for nil. { - pvProTxHash, err := privValidators[8].GetProTxHash(context.Background()) + pvProTxHash, err := privValidators[8].GetProTxHash(ctx) assert.NoError(t, err) vote := withValidator(voteProto, pvProTxHash, 8) vote.BlockID = BlockID{} - _, err = signAddVote(privValidators[8], vote, voteSet) + _, err = signAddVote(ctx, privValidators[8], vote, voteSet) require.NoError(t, err) } @@ -619,26 +641,30 @@ func castVote( Type: tmproto.PrevoteType, BlockID: blockID, } - proTxHash, err := privValidators[validatorID].GetProTxHash(context.Background()) + ctx := context.Background() + proTxHash, err := privValidators[validatorID].GetProTxHash(ctx) require.NoError(t, err) vote := withValidator(voteProto, proTxHash, validatorID) - signed, err := signAddVote(privValidators[validatorID], vote, voteSet) + signed, err := signAddVote(ctx, privValidators[validatorID], vote, voteSet) require.True(t, signed) require.NoError(t, err) majorityBlock, twoThirdsMajority := voteSet.TwoThirdsMajority() - assert.EqualValues(t, twoThirdsMajority, !majorityBlock.IsZero()) + assert.EqualValues(t, twoThirdsMajority, !majorityBlock.IsNil()) return twoThirdsMajority, voteSet.HasTwoThirdsAny() } // NOTE: privValidators are in order func randVoteSet( + ctx context.Context, + t testing.TB, height int64, round int32, signedMsgType tmproto.SignedMsgType, numValidators int, stateID StateID, ) (*VoteSet, *ValidatorSet, []PrivValidator) { + t.Helper() valSet, mockPVs := RandValidatorSet(numValidators) return NewVoteSet("test_chain_id", height, round, signedMsgType, valSet, stateID), valSet, diff --git a/types/vote_test.go b/types/vote_test.go index b4ff79d1c0..e859965667 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -6,37 +6,41 @@ import ( "testing" "github.com/dashevo/dashd-go/btcjson" - "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/crypto/bls12381" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/crypto/bls12381" "github.com/tendermint/tendermint/internal/libs/protoio" + "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) -func examplePrevote() *Vote { - return exampleVote(byte(tmproto.PrevoteType)) +func examplePrevote(t *testing.T) *Vote { + t.Helper() + return exampleVote(t, byte(tmproto.PrevoteType)) } -func examplePrecommit() *Vote { - return exampleVote(byte(tmproto.PrecommitType)) +func examplePrecommit(t testing.TB) *Vote { + t.Helper() + vote := exampleVote(t, byte(tmproto.PrecommitType)) + vote.ExtensionSignature = []byte("signature") + return vote } -func exampleVote(t byte) *Vote { +func exampleVote(tb testing.TB, t byte) *Vote { + tb.Helper() + return &Vote{ Type: tmproto.SignedMsgType(t), Height: 12345, Round: 2, BlockID: BlockID{ - Hash: tmhash.Sum([]byte("blockID_hash")), + Hash: crypto.Checksum([]byte("blockID_hash")), PartSetHeader: PartSetHeader{ Total: 1000000, - Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + Hash: crypto.Checksum([]byte("blockID_part_set_header_hash")), }, }, ValidatorProTxHash: crypto.ProTxHashFromSeedBytes([]byte("validator_pro_tx_hash")), @@ -45,7 +49,7 @@ func exampleVote(t byte) *Vote { } func TestVoteSignable(t *testing.T) { - vote := examplePrecommit() + vote := examplePrecommit(t) v := vote.ToProto() signBytes := VoteBlockSignBytes("test_chain_id", v) pb := CanonicalizeVote("test_chain_id", v) @@ -117,6 +121,25 @@ func TestVoteSignBytesTestVectors(t *testing.T) { 0x32, 0xd, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64}, // chainID }, + // containing vote extension + 5: { + "test_chain_id", &Vote{ + Height: 1, + Round: 1, + Extension: []byte("extension"), + }, + []byte{ + 0x21, // length + 0x11, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height + 0x19, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round + // remaning fields: + // (field_number << 3) | wire_type + 0x32, + 0xd, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, // chainID + }, // chainID + }, } for i, tc := range tests { v := tc.vote.ToProto() @@ -163,12 +186,15 @@ func TestVoteProposalNotEq(t *testing.T) { } func TestVoteVerifySignature(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + quorumHash := crypto.RandQuorumHash() privVal := NewMockPVForQuorum(quorumHash) pubkey, err := privVal.GetPubKey(context.Background(), quorumHash) require.NoError(t, err) - vote := examplePrecommit() + vote := examplePrecommit(t) v := vote.ToProto() stateID := RandStateID().WithHeight(vote.Height - 1) quorumType := btcjson.LLMQType_5_60 @@ -176,7 +202,7 @@ func TestVoteVerifySignature(t *testing.T) { signStateID := stateID.SignID("test_chain_id", quorumType, quorumHash) // sign it - err = privVal.SignVote(context.Background(), "test_chain_id", quorumType, quorumHash, v, stateID, nil) + err = privVal.SignVote(ctx, "test_chain_id", quorumType, quorumHash, v, stateID, nil) require.NoError(t, err) // verify the same vote @@ -205,6 +231,89 @@ func TestVoteVerifySignature(t *testing.T) { require.True(t, valid) } +// TestVoteExtension tests that the vote verification behaves correctly in each case +// of vote extension being set on the vote. +func TestVoteExtension(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testCases := []struct { + name string + extension []byte + includeSignature bool + expectError bool + }{ + { + name: "all fields present", + extension: []byte("extension"), + includeSignature: true, + expectError: false, + }, + // TODO(thane): Re-enable once + // https://github.com/tendermint/tendermint/issues/8272 is resolved + //{ + // name: "no extension signature", + // extension: []byte("extension"), + // includeSignature: false, + // expectError: true, + //}, + { + name: "empty extension", + includeSignature: true, + expectError: false, + }, + // TODO: Re-enable once + // https://github.com/tendermint/tendermint/issues/8272 is resolved. + //{ + // name: "no extension and no signature", + // includeSignature: false, + // expectError: true, + //}, + } + + logger := log.NewTestingLogger(t) + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + height, round := int64(1), int32(0) + quorumHash := crypto.RandQuorumHash() + privVal := NewMockPVForQuorum(quorumHash) + proTxHash, err := privVal.GetProTxHash(ctx) + require.NoError(t, err) + pk, err := privVal.GetPubKey(ctx, quorumHash) + require.NoError(t, err) + blk := Block{} + blockID, err := blk.BlockID() + require.NoError(t, err) + stateID := RandStateID().WithHeight(height - 1) + vote := &Vote{ + ValidatorProTxHash: proTxHash, + ValidatorIndex: 0, + Height: height, + Round: round, + Type: tmproto.PrecommitType, + BlockID: blockID, + Extension: tc.extension, + } + + v := vote.ToProto() + err = privVal.SignVote(ctx, "test_chain_id", btcjson.LLMQType_5_60, quorumHash, v, stateID, logger) + require.NoError(t, err) + vote.BlockSignature = v.BlockSignature + vote.StateSignature = v.StateSignature + if tc.includeSignature { + vote.ExtensionSignature = v.ExtensionSignature + } + _, _, err = vote.VerifyWithExtension("test_chain_id", btcjson.LLMQType_5_60, quorumHash, pk, proTxHash, stateID) + if tc.expectError { + require.Error(t, err) + } else { + require.NoError(t, err) + } + }) + } +} + func TestIsVoteTypeValid(t *testing.T) { tc := []struct { name string @@ -227,9 +336,12 @@ func TestIsVoteTypeValid(t *testing.T) { } func TestVoteVerify(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + quorumHash := crypto.RandQuorumHash() privVal := NewMockPVForQuorum(quorumHash) - proTxHash, err := privVal.GetProTxHash(context.Background()) + proTxHash, err := privVal.GetProTxHash(ctx) require.NoError(t, err) quorumType := btcjson.LLMQType_5_60 @@ -237,7 +349,7 @@ func TestVoteVerify(t *testing.T) { pubkey, err := privVal.GetPubKey(context.Background(), quorumHash) require.NoError(t, err) - vote := examplePrevote() + vote := examplePrevote(t) vote.ValidatorProTxHash = proTxHash stateID := RandStateID().WithHeight(vote.Height - 1) @@ -257,84 +369,216 @@ func TestVoteVerify(t *testing.T) { } func TestVoteString(t *testing.T) { - str := examplePrecommit().String() - expected := `Vote{56789:959A8F5EF2BE 12345/02/SIGNED_MSG_TYPE_PRECOMMIT(Precommit) 8B01023386C3 000000000000 000000000000}` + str := examplePrecommit(t).String() + expected := `Vote{56789:959A8F5EF2BE 12345/02/SIGNED_MSG_TYPE_PRECOMMIT(Precommit) 8B01023386C3 000000000000 000000000000 000000000000}` if str != expected { t.Errorf("got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str) } - str2 := examplePrevote().String() - expected = `Vote{56789:959A8F5EF2BE 12345/02/SIGNED_MSG_TYPE_PREVOTE(Prevote) 8B01023386C3 000000000000 000000000000}` + str2 := examplePrevote(t).String() + expected = `Vote{56789:959A8F5EF2BE 12345/02/SIGNED_MSG_TYPE_PREVOTE(Prevote) 8B01023386C3 000000000000 000000000000 000000000000}` if str2 != expected { t.Errorf("got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str2) } } -func TestVoteValidateBasic(t *testing.T) { +func signVote( + ctx context.Context, + t *testing.T, + pv PrivValidator, + chainID string, + quorumType btcjson.LLMQType, + quorumHash crypto.QuorumHash, + vote *Vote, + stateID StateID, + logger log.Logger, +) { + t.Helper() + + v := vote.ToProto() + require.NoError(t, pv.SignVote(ctx, chainID, quorumType, quorumHash, v, stateID, logger)) + vote.StateSignature = v.StateSignature + vote.BlockSignature = v.BlockSignature + vote.ExtensionSignature = v.ExtensionSignature +} + +func TestValidVotes(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testCases := []struct { + name string + vote *Vote + malleateVote func(*Vote) + }{ + {"good prevote", examplePrevote(t), func(v *Vote) {}}, + {"good precommit without vote extension", examplePrecommit(t), func(v *Vote) { v.Extension = nil }}, + {"good precommit with vote extension", examplePrecommit(t), func(v *Vote) { v.Extension = []byte("extension") }}, + } + for _, tc := range testCases { + quorumHash := crypto.RandQuorumHash() + privVal := NewMockPVForQuorum(quorumHash) + + v := tc.vote.ToProto() + stateID := RandStateID().WithHeight(v.Height - 1) + signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, tc.vote, stateID, nil) + tc.malleateVote(tc.vote) + require.NoError(t, tc.vote.ValidateBasic(), "ValidateBasic for %s", tc.name) + require.NoError(t, tc.vote.ValidateWithExtension(), "ValidateWithExtension for %s", tc.name) + } +} + +func TestInvalidVotes(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testCases := []struct { + name string + malleateVote func(*Vote) + }{ + {"negative height", func(v *Vote) { v.Height = -1 }}, + {"negative round", func(v *Vote) { v.Round = -1 }}, + {"invalid block ID", func(v *Vote) { v.BlockID = BlockID{[]byte{1, 2, 3}, PartSetHeader{111, []byte("blockparts")}} }}, + {"Invalid ProTxHash", func(v *Vote) { v.ValidatorProTxHash = make([]byte, 1) }}, + {"Invalid ValidatorIndex", func(v *Vote) { v.ValidatorIndex = -1 }}, + {"Invalid Signature", func(v *Vote) { v.BlockSignature = nil }}, + {"Too big Signature", func(v *Vote) { v.BlockSignature = make([]byte, SignatureSize+1) }}, + } + for _, tc := range testCases { + quorumHash := crypto.RandQuorumHash() + privVal := NewMockPVForQuorum(quorumHash) + prevote := examplePrevote(t) + v := prevote.ToProto() + stateID := RandStateID().WithHeight(v.Height - 1) + signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, prevote, stateID, nil) + tc.malleateVote(prevote) + require.Error(t, prevote.ValidateBasic(), "ValidateBasic for %s in invalid prevote", tc.name) + require.Error(t, prevote.ValidateWithExtension(), "ValidateWithExtension for %s in invalid prevote", tc.name) + + precommit := examplePrecommit(t) + signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, precommit, stateID, nil) + tc.malleateVote(precommit) + require.Error(t, precommit.ValidateBasic(), "ValidateBasic for %s in invalid precommit", tc.name) + require.Error(t, precommit.ValidateWithExtension(), "ValidateWithExtension for %s in invalid precommit", tc.name) + } +} + +func TestInvalidPrevotes(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + quorumHash := crypto.RandQuorumHash() privVal := NewMockPVForQuorum(quorumHash) testCases := []struct { - testName string + name string malleateVote func(*Vote) - expectErr bool }{ - {"Good Vote", func(v *Vote) {}, false}, - {"Negative Height", func(v *Vote) { v.Height = -1 }, true}, - {"Negative Round", func(v *Vote) { v.Round = -1 }, true}, - {"Invalid BlockID", func(v *Vote) { - v.BlockID = BlockID{[]byte{1, 2, 3}, PartSetHeader{111, []byte("blockparts")}} - }, true}, - {"Invalid ProTxHash", func(v *Vote) { v.ValidatorProTxHash = make([]byte, 1) }, true}, - {"Invalid ValidatorIndex", func(v *Vote) { v.ValidatorIndex = -1 }, true}, - {"Invalid Signature", func(v *Vote) { v.BlockSignature = nil }, true}, - {"Too big Signature", func(v *Vote) { v.BlockSignature = make([]byte, SignatureSize+1) }, true}, + {"vote extension present", func(v *Vote) { v.Extension = []byte("extension") }}, + {"vote extension signature present", func(v *Vote) { v.ExtensionSignature = []byte("signature") }}, } for _, tc := range testCases { - tc := tc - t.Run(tc.testName, func(t *testing.T) { - vote := examplePrecommit() - v := vote.ToProto() - stateID := RandStateID().WithHeight(v.Height - 1) - err := privVal.SignVote(context.Background(), "test_chain_id", 0, quorumHash, v, stateID, nil) - vote.BlockSignature = v.BlockSignature - vote.StateSignature = v.StateSignature - require.NoError(t, err) - tc.malleateVote(vote) - assert.Equal(t, tc.expectErr, vote.ValidateBasic() != nil, "Validate Basic had an unexpected result") - }) + prevote := examplePrevote(t) + v := prevote.ToProto() + stateID := RandStateID().WithHeight(v.Height - 1) + signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, prevote, stateID, nil) + tc.malleateVote(prevote) + require.Error(t, prevote.ValidateBasic(), "ValidateBasic for %s", tc.name) + require.Error(t, prevote.ValidateWithExtension(), "ValidateWithExtension for %s", tc.name) + } +} + +func TestInvalidPrecommitExtensions(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + quorumHash := crypto.RandQuorumHash() + privVal := NewMockPVForQuorum(quorumHash) + + testCases := []struct { + name string + malleateVote func(*Vote) + }{ + {"vote extension present without signature", func(v *Vote) { + v.Extension = []byte("extension") + v.ExtensionSignature = nil + }}, + // TODO(thane): Re-enable once https://github.com/tendermint/tendermint/issues/8272 is resolved + //{"missing vote extension signature", func(v *Vote) { v.ExtensionSignature = nil }}, + {"oversized vote extension signature", func(v *Vote) { v.ExtensionSignature = make([]byte, SignatureSize+1) }}, + } + for _, tc := range testCases { + precommit := examplePrecommit(t) + v := precommit.ToProto() + stateID := RandStateID().WithHeight(v.Height - 1) + signVote(ctx, t, privVal, "test_chain_id", 0, quorumHash, precommit, stateID, nil) + tc.malleateVote(precommit) + // We don't expect an error from ValidateBasic, because it doesn't + // handle vote extensions. + require.NoError(t, precommit.ValidateBasic(), "ValidateBasic for %s", tc.name) + require.Error(t, precommit.ValidateWithExtension(), "ValidateWithExtension for %s", tc.name) } } func TestVoteProtobuf(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + quorumHash := crypto.RandQuorumHash() privVal := NewMockPVForQuorum(quorumHash) - vote := examplePrecommit() + vote := examplePrecommit(t) v := vote.ToProto() stateID := RandStateID().WithHeight(v.Height - 1) - err := privVal.SignVote(context.Background(), "test_chain_id", 0, quorumHash, v, stateID, nil) + err := privVal.SignVote(ctx, "test_chain_id", 0, quorumHash, v, stateID, nil) vote.BlockSignature = v.BlockSignature vote.StateSignature = v.StateSignature require.NoError(t, err) testCases := []struct { - msg string - v1 *Vote - expPass bool + msg string + vote *Vote + convertsOk bool + passesValidateBasic bool }{ - {"success", vote, true}, - {"fail vote validate basic", &Vote{}, false}, - {"failure nil", nil, false}, + {"success", vote, true, true}, + {"fail vote validate basic", &Vote{}, true, false}, } for _, tc := range testCases { - protoProposal := tc.v1.ToProto() + protoProposal := tc.vote.ToProto() v, err := VoteFromProto(protoProposal) - if tc.expPass { + if tc.convertsOk { require.NoError(t, err) - require.Equal(t, tc.v1, v, tc.msg) } else { require.Error(t, err) } + + err = v.ValidateBasic() + if tc.passesValidateBasic { + require.NoError(t, err) + require.Equal(t, tc.vote, v, tc.msg) + } else { + require.Error(t, err) + } + } +} + +var sink interface{} + +func BenchmarkVoteSignBytes(b *testing.B) { + protoVote := examplePrecommit(b).ToProto() + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + sink = VoteBlockSignBytes("test_chain_id", protoVote) } + + if sink == nil { + b.Fatal("Benchmark did not run") + } + + // Reset the sink. + sink = (interface{})(nil) } diff --git a/version/version.go b/version/version.go index 47da8ef5dc..a826d99de6 100644 --- a/version/version.go +++ b/version/version.go @@ -27,8 +27,8 @@ var ( ) type Consensus struct { - Block uint64 `json:"block"` - App uint64 `json:"app"` + Block uint64 `json:"block,string"` + App uint64 `json:"app,string"` } func (c Consensus) ToProto() tmversion.Consensus {